Esempio n. 1
0
def training(args):
    logs_dir = os.path.join(settings.TRAINING_DIR, 'logs/')
    ckpts_dir = os.path.join(settings.TRAINING_DIR, 'ckpts/')
    os.makedirs(os.path.dirname(logs_dir), exist_ok=True)
    os.makedirs(os.path.dirname(ckpts_dir), exist_ok=True)

    args = dotdict(args)
    weight_id = args.weight_id
    weight = dj_models.ModelWeights.objects.get(id=weight_id)
    pretrained = None
    if weight.pretrained_on:
        pretrained = weight.pretrained_on.location
    save_stdout = sys.stdout
    size = [args.input_h, args.input_w]  # Height, width
    try:
        model = bindings.models_binding.get(args.model_id)
    except KeyError:
        return -1
    try:
        dataset_path = str(
            dj_models.Dataset.objects.get(id=args.dataset_id).path)
        dataset = bindings.dataset_binding.get(args.dataset_id)
    except KeyError:
        return -1

    dataset = dataset(dataset_path, args.batch_size, size)
    d = dataset.d
    num_classes = dataset.num_classes
    in_ = eddl.Input([1, size[0], size[1]])
    out = model(in_, num_classes)
    net = eddl.Model([in_], [out])

    with open(f'{logs_dir}/{weight_id}.log', 'w') as f:
        with redirect_stdout(f):
            eddl.build(net, eddl.sgd(args.lr, 0.9),
                       [bindings.losses_binding.get(args.loss)],
                       [bindings.metrics_binding.get(args.metric)],
                       eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
            eddl.summary(net)

            if pretrained and os.path.exists(pretrained):
                eddl.load(net, pretrained)
                logging.info('Weights loaded')

            logging.info('Reading dataset')

            images = eddlT.create(
                [args.batch_size, d.n_channels_, size[0], size[1]])
            labels = eddlT.create([args.batch_size, len(d.classes_)])
            num_samples = len(d.GetSplit())
            num_batches = num_samples // args.batch_size
            indices = list(range(args.batch_size))

            for e in range(args.epochs):
                eddl.reset_loss(net)
                d.SetSplit('training')
                s = d.GetSplit()
                num_samples = len(s)
                random.shuffle(s)
                d.split_.training_ = s
                d.ResetCurrentBatch()
                # total_loss = 0.
                # total_metric = 0.
                for i in range(num_batches):
                    d.LoadBatch(images, labels)
                    images.div_(255.0)
                    tx, ty = [images], [labels]
                    eddl.train_batch(net, tx, ty, indices)
                    total_loss = net.fiterr[0]
                    total_metric = net.fiterr[1]
                    print(
                        f'Batch {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                    )

                    logging.info(
                        f'Batch {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                    )

            eddl.save(net, f'{ckpts_dir}/{weight_id}.bin')
            logging.info('Weights saved')

            logging.info('Evaluation')
            d.SetSplit('test')
            num_samples = len(d.GetSplit())
            num_batches = num_samples // args.batch_size

            d.ResetCurrentBatch()

            for i in range(num_batches):
                d.LoadBatch(images, labels)
                images.div_(255.0)
                eddl.eval_batch(net, [images], [labels], indices)
                # eddl.evaluate(net, [images], [labels])

                total_loss = net.fiterr[0]
                total_metric = net.fiterr[1]
                print(
                    f'Evaluation {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                    f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                )
                logging.info(
                    f'Evaluation {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                    f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                )
            print('<done>')
    del net
    del out
    del in_
    return 0
Esempio n. 2
0
def inference(args):
    logs_dir = os.path.join(settings.INFERENCE_DIR, 'logs/')
    preds_dir = os.path.join(settings.INFERENCE_DIR, 'predictions/')
    os.makedirs(os.path.dirname(logs_dir), exist_ok=True)
    os.makedirs(os.path.dirname(preds_dir), exist_ok=True)

    args = dotdict(args)
    weight_id = args.weight_id
    weight = dj_models.ModelWeights.objects.get(id=weight_id)
    pretrained = weight.location
    save_stdout = sys.stdout
    size = [args.input_h, args.input_w]  # Height, width
    try:
        model = bindings.models_binding.get(args.model_id)
    except KeyError:
        return -1
    try:
        dataset_path = str(
            dj_models.Dataset.objects.get(id=args.dataset_id).path)
        dataset = bindings.dataset_binding.get(args.dataset_id)
    except KeyError:
        return -1

    dataset = dataset(dataset_path, args.batch_size, size)
    d = dataset.d
    num_classes = dataset.num_classes
    in_ = eddl.Input([1, size[0], size[1]])
    layer = in_
    out = model(layer, num_classes)  # out is already softmaxed
    net = eddl.Model([in_], [out])

    with open(f'{logs_dir}/{weight_id}.log', 'w') as f:
        with redirect_stdout(f):
            eddl.build(
                net,
                eddl.sgd(),
                [bindings.losses_binding.get(args.loss)],
                [bindings.metrics_binding.get(args.metric)],
            )

            eddl.summary(net)

            if args.gpu:
                eddl.toGPU(net, [1])

            if os.path.exists(pretrained):
                eddl.load(net, pretrained)
                logging.info('Weights loaded')
            else:
                return -1
            logging.info('Reading dataset')

            images = eddlT.create(
                [args.batch_size, d.n_channels_, size[0], size[1]])
            labels = eddlT.create([args.batch_size, len(d.classes_)])
            indices = list(range(args.batch_size))

            logging.info('Starting inference')
            d.SetSplit('test')
            num_samples = len(d.GetSplit())
            num_batches = num_samples // args.batch_size
            preds = np.empty((0, num_classes + 1), np.float64)

            d.ResetCurrentBatch()

            # start_index = d.current_batch_[d.current_split_] * d.batch_size_
            # samples = d.GetSplit()[start_index:start_index + d.batch_size_]
            # names = [d.samples_[s].location_ for s in samples]

            for b in range(num_batches):
                d.LoadBatch(images, labels)
                images.div_(255.0)
                eddl.eval_batch(net, [images], [labels], indices)
                total_loss = net.fiterr[0]
                total_metric = net.fiterr[1]
                print(
                    f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                    f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                )
                logging.info(
                    f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                    f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                )
                # Save network predictions
                for i in range(args.batch_size):
                    pred = np.array(eddlT.select(eddl.getTensor(out), i),
                                    dtype=np.float64)
                    gt = np.argmax(np.array(labels)[indices])
                    pred = np.append(pred, gt).reshape((1, num_classes + 1))
                    preds = np.append(preds, pred, axis=0)
            print('<done>')
            np.save(f'{preds_dir}/{weight_id}.npy', preds.astype(np.float64))
    del net
    del out
    del in_
    return 0
Esempio n. 3
0
def training(args):
    logs_dir = os.path.join(settings.TRAINING_DIR, 'logs/')
    ckpts_dir = os.path.join(settings.TRAINING_DIR, 'ckpts/')
    os.makedirs(os.path.dirname(logs_dir), exist_ok=True)
    os.makedirs(os.path.dirname(ckpts_dir), exist_ok=True)

    args = dotdict(args)
    weight_id = args.weight_id
    weight = dj_models.ModelWeights.objects.get(id=weight_id)
    pretrained = None
    if weight.pretrained_on:
        pretrained = weight.pretrained_on.location
    save_stdout = sys.stdout
    size = [args.input_h, args.input_w]  # Height, width
    # ctype = ecvl.ColorType.GRAY
    try:
        model = bindings.models_binding.get(args.model_id)
    except KeyError:
        return 1
    try:
        dataset_path = str(
            dj_models.Dataset.objects.get(id=args.dataset_id).path)
        dataset = bindings.dataset_binding.get(args.dataset_id)
    except KeyError:
        return 1
    except:
        return 1

    # dataset = dataset(dataset_path, args.batch_size, args.split)
    dataset = dataset(dataset_path, args.batch_size, size)
    d = dataset.d
    num_classes = dataset.num_classes
    in_ = eddl.Input([3, size[0], size[1]])
    out = model(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])

    with open(f'{logs_dir}/{weight_id}.log', 'w') as f:
        with redirect_stdout(f):

            eddl.build(
                net,
                # eddl.sgd(args.lr, 0.9),
                eddl.adam(args.lr),
                [bindings.losses_binding.get(args.loss)],
                [bindings.metrics_binding.get(args.metric)],
                eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
            eddl.summary(net)

            if pretrained and os.path.exists(pretrained):
                eddl.load(net, pretrained)
                logging.info('Weights loaded')

            logging.info('Reading dataset')

            images = eddlT.create(
                [args.batch_size, d.n_channels_, size[0], size[1]])
            gts = eddlT.create([args.batch_size, 1, size[0], size[1]])
            num_samples = len(d.GetSplit())
            num_batches = num_samples // args.batch_size
            indices = list(range(args.batch_size))

            d.SetSplit('validation')
            num_samples_validation = len(d.GetSplit())
            num_batches_validation = num_samples_validation // args.batch_size
            evaluator = Evaluator()
            miou = -1
            for e in range(args.epochs):
                d.SetSplit('training')
                eddl.reset_loss(net)
                s = d.GetSplit()
                random.shuffle(s)
                d.split_.training_ = s
                d.ResetAllBatches()
                for i in range(num_batches):
                    d.LoadBatch(images, gts)
                    images.div_(255.0)
                    gts.div_(255.0)
                    eddl.train_batch(net, [images], [gts], indices)
                    total_loss = net.fiterr[0]
                    total_metric = net.fiterr[1]
                    print(
                        f'Batch {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                    )

                    logging.info(
                        f'Batch {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                    )

                logging.info('Evaluation')
                d.SetSplit('validation')
                evaluator.ResetEval()
                for j in range(num_batches_validation):
                    print('Validation - Epoch %d/%d (batch %d/%d) ' %
                          (i + 1, args.epochs, j + 1, num_batches_validation),
                          end='',
                          flush=True)
                    d.LoadBatch(images, gts)
                    images.div_(255.0)
                    gts.div_(255.0)
                    eddl.forward(net, [images])
                    output = eddl.getTensor(out_sigm)
                    for k in range(args.batch_size):
                        img_ = eddlT.select(output, k)
                        gts_ = eddlT.select(gts, k)
                        a, b = np.array(img_, copy=False), np.array(gts_,
                                                                    copy=False)
                        iou = evaluator.BinaryIoU(a, b)
                        print('- IoU: %.6g ' % iou, flush=True)

                last_miou = evaluator.MIoU()
                print(f'Validation MIoU: {last_miou:.6f}', flush=True)

                if last_miou > miou:
                    miou = last_miou
                    eddl.save(net, f'{ckpts_dir}/{weight_id}.bin', 'bin')
                    logging.info('Weights saved')

            # d.SetSplit('test')
            # num_samples = len(d.GetSplit())
            # num_batches = num_samples // args.batch_size
            #
            # d.ResetCurrentBatch()
            #
            # for i in range(num_batches):
            #     d.LoadBatch(images, gts)
            #     images.div_(255.0)
            #     eddl.eval_batch(net, [images], [gts], indices)
            #     # eddl.evaluate(net, [images], [labels])
            #
            #     total_loss = net.fiterr[0]
            #     total_metric = net.fiterr[1]
            #     print(
            #         f'Evaluation {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
            #         f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})')
            #     logging.info(
            #         f'Evaluation {i + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
            #         f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})')
            # print('<done>')
    del net
    del out
    del in_
    return 0
Esempio n. 4
0
def classificate(args):
    args = dotdict(args)

    ckpts_dir = opjoin(settings.TRAINING_DIR, 'ckpts')
    outputfile = None
    inference = None

    train = True if args.mode == 'training' else False
    batch_size = args.batch_size if args.mode == 'training' else args.test_batch_size
    weight_id = args.weight_id
    weight = dj_models.ModelWeights.objects.get(id=weight_id)
    if train:
        pretrained = None
        if weight.pretrained_on:
            pretrained = weight.pretrained_on.location
    else:
        inference_id = args.inference_id
        inference = dj_models.Inference.objects.get(id=inference_id)
        pretrained = weight.location
    save_stdout = sys.stdout
    size = [args.input_h, args.input_w]  # Height, width
    try:
        model = bindings.models_binding[args.model_id]
    except KeyError:
        raise Exception(
            f'Model with id: {args.model_id} not found in bindings.py')
    try:
        dataset_path = str(
            dj_models.Dataset.objects.get(id=args.dataset_id).path)
    except KeyError:
        raise Exception(
            f'Dataset with id: {args.dataset_id} not found in bindings.py')
    dataset = bindings.dataset_binding.get(args.dataset_id)

    if dataset is None and not train:
        # Binding does not exist. it's a single image dataset
        # Use as dataset "stub" the dataset on which model has been trained
        dataset = bindings.dataset_binding.get(weight.dataset_id.id)
    elif dataset is None and train:
        raise Exception(
            f'Dataset with id: {args.dataset_id} not found in bindings.py')

    basic_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    train_augs = basic_augs
    val_augs = basic_augs
    test_augs = basic_augs
    if args.train_augs:
        train_augs = ecvl.SequentialAugmentationContainer([
            ecvl.AugResizeDim(size),
            ecvl.AugmentationFactory.create(args.train_augs)
        ])
    if args.val_augs:
        val_augs = ecvl.SequentialAugmentationContainer([
            ecvl.AugResizeDim(size),
            ecvl.AugmentationFactory.create(args.val_augs)
        ])
    if args.test_augs:
        test_augs = ecvl.SequentialAugmentationContainer([
            ecvl.AugResizeDim(size),
            ecvl.AugmentationFactory.create(args.test_augs)
        ])

    logging.info('Reading dataset')
    print('Reading dataset', flush=True)

    dataset = dataset(
        dataset_path, batch_size,
        ecvl.DatasetAugmentations([train_augs, val_augs, test_augs]))
    d = dataset.d
    num_classes = dataset.num_classes
    in_ = eddl.Input([d.n_channels_, size[0], size[1]])
    out = model(in_,
                num_classes)  # out is already softmaxed in classific models
    net = eddl.Model([in_], [out])

    if train:
        logfile = open(Path(weight.logfile), 'w')
    else:
        logfile = open(inference.logfile, 'w')
        outputfile = open(inference.outputfile, 'w')
    with redirect_stdout(logfile):
        # Save args to file
        print('args: ' + json.dumps(args, indent=2, sort_keys=True),
              flush=True)
        logging.info('args: ' + json.dumps(args, indent=2, sort_keys=True))

        eddl.build(
            net, eddl.sgd(args.lr,
                          0.9), [bindings.losses_binding.get(args.loss)],
            [bindings.metrics_binding.get(args.metric)],
            eddl.CS_GPU([1], mem='low_mem') if args.gpu else eddl.CS_CPU())
        eddl.summary(net)

        if pretrained and os.path.exists(pretrained):
            eddl.load(net, pretrained)
            logging.info('Weights loaded')

        # Create tensor for images and labels
        images = eddlT.create([batch_size, d.n_channels_, size[0], size[1]])
        labels = eddlT.create([batch_size, num_classes])

        logging.info(f'Starting {args.mode}')
        print(f'Starting {args.mode}', flush=True)
        if train:
            num_samples_train = len(d.GetSplit(ecvl.SplitType.training))
            num_batches_train = num_samples_train // batch_size
            num_samples_val = len(d.GetSplit(ecvl.SplitType.validation))
            num_batches_val = num_samples_val // batch_size

            indices = list(range(batch_size))

            for e in range(args.epochs):
                eddl.reset_loss(net)
                d.SetSplit(ecvl.SplitType.training)
                s = d.GetSplit()
                random.shuffle(s)
                d.split_.training_ = s
                d.ResetCurrentBatch()
                # total_loss = 0.
                # total_metric = 0.
                for i in range(num_batches_train):
                    d.LoadBatch(images, labels)
                    images.div_(255.0)
                    eddl.train_batch(net, [images], [labels], indices)
                    total_loss = net.fiterr[0]
                    total_metric = net.fiterr[1]
                    print(
                        f'Train Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_train}] {net.lout[0].name}'
                        f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})',
                        flush=True)

                    logging.info(
                        f'Train Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_train}] {net.lout[0].name}'
                        f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                        f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                    )

                eddl.save(net, opjoin(ckpts_dir, f'{weight_id}.bin'))
                logging.info('Weights saved')
                print('Weights saved', flush=True)

                if len(d.split_.validation_) > 0:

                    logging.info(f'Validation {e}/{args.epochs}')
                    print(f'Validation {e}/{args.epochs}', flush=True)

                    d.SetSplit(ecvl.SplitType.validation)
                    d.ResetCurrentBatch()

                    for i in range(num_batches_val):
                        d.LoadBatch(images, labels)
                        images.div_(255.0)
                        eddl.eval_batch(net, [images], [labels], indices)
                        # eddl.evaluate(net, [images], [labels])

                        total_loss = net.fiterr[0]
                        total_metric = net.fiterr[1]
                        print(
                            f'Val Epoch: {e + 1}/{args.epochs}  [{i + 1}/{num_batches_val}] {net.lout[0].name}'
                            f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                            f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})',
                            flush=True)
                        logging.info(
                            f'Val Epoch: {e + 1}/{args.epochs}  [{i + 1}/{num_batches_val}] {net.lout[0].name}'
                            f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                            f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})'
                        )
        else:
            d.SetSplit(ecvl.SplitType.test)
            num_samples_test = len(d.GetSplit())
            num_batches_test = num_samples_test // batch_size
            preds = np.empty((0, num_classes), np.float64)

            for b in range(num_batches_test):
                d.LoadBatch(images)
                images.div_(255.0)
                eddl.forward(net, [images])

                print(f'Infer Batch {b + 1}/{num_batches_test}', flush=True)
                logging.info(f'Infer Batch {b + 1}/{num_batches_test}')

                # print(
                #     f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                #     f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})')
                # logging.info(
                #     f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},'
                #     f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})')
                # Save network predictions
                for i in range(batch_size):
                    pred = np.array(eddlT.select(eddl.getTensor(out), i),
                                    copy=False)
                    # gt = np.argmax(np.array(labels)[indices])
                    # pred = np.append(pred, gt).reshape((1, num_classes + 1))
                    preds = np.append(preds, pred, axis=0)
                    pred_name = d.samples_[d.GetSplit()[b * batch_size +
                                                        i]].location_
                    # print(f'{pred_name};{pred}')
                    outputfile.write(f'{pred_name};{pred.tolist()}\n')
            outputfile.close()
        print('<done>')
    logfile.close()
    del net
    del out
    del in_
    return
Esempio n. 5
0
def inference(args):
    logs_dir = os.path.join(settings.INFERENCE_DIR, 'logs/')
    preds_dir = os.path.join(settings.INFERENCE_DIR, 'predictions/')
    imgs_dir = os.path.join(settings.INFERENCE_DIR, 'predictions/images/')
    os.makedirs(os.path.dirname(logs_dir), exist_ok=True)
    os.makedirs(os.path.dirname(preds_dir), exist_ok=True)
    os.makedirs(os.path.dirname(imgs_dir), exist_ok=True)

    args = dotdict(args)
    weight_id = args.weight_id
    weight = dj_models.ModelWeights.objects.get(id=weight_id)
    pretrained = weight.location
    save_stdout = sys.stdout
    size = [args.input_h, args.input_w]  # Height, width
    # ctype = ecvl.ColorType.GRAY
    try:
        model = bindings.models_binding.get(args.model_id)
    except KeyError:
        return 1
    try:
        dataset_path = str(
            dj_models.Dataset.objects.get(id=args.dataset_id).path)
        dataset = bindings.dataset_binding.get(args.dataset_id)
    except KeyError:
        return 1

    dataset = dataset(dataset_path, args.batch_size, size)
    d = dataset.d
    num_classes = dataset.num_classes
    in_ = eddl.Input([3, size[0], size[1]])
    out = model(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])

    with open(f'{logs_dir}/{weight_id}.log', 'w') as f:
        with redirect_stdout(f):
            eddl.build(
                net,
                # eddl.sgd(args.lr, 0.9),
                eddl.adam(args.lr),
                [bindings.losses_binding.get(args.loss)],
                [bindings.metrics_binding.get(args.metric)],
            )
            eddl.summary(net)

            if args.gpu:
                eddl.toGPU(net, [1])

            if os.path.exists(pretrained):
                eddl.load(net, pretrained)
                logging.info('Weights loaded')
            else:
                return -1
            logging.info('Reading dataset')

            images = eddlT.create(
                [args.batch_size, d.n_channels_, size[0], size[1]])
            gts = eddlT.create([args.batch_size, 1, size[0], size[1]])
            # indices = list(range(args.batch_size))

            evaluator = Evaluator()
            d.SetSplit('test')
            num_samples = len(d.GetSplit())
            num_batches = num_samples // args.batch_size

            logging.info('test')
            evaluator.ResetEval()
            for j in range(num_batches):
                d.LoadBatch(images, gts)
                images.div_(255.0)
                gts.div_(255.0)
                eddl.forward(net, [images])
                output = eddl.getTensor(out_sigm)
                for k in range(args.batch_size):
                    img_ = eddlT.select(output, k)
                    gts_ = eddlT.select(gts, k)
                    a, b = np.array(img_, copy=False), np.array(gts_,
                                                                copy=False)
                    concat = np.concatenate([a, b], axis=-1).squeeze()
                    concat[concat >= 0.5] = 255
                    concat[concat < 0.5] = 0
                    # cv2.imwrite(f'{imgs_dir}/{j}_{k}.png', concat)
                    iou = evaluator.BinaryIoU(a, b)
                    print(f'IoU: {iou:.6f}', flush=True)
                    logging.info(f'IoU: {iou:.6f}')

            print(f'Validation MIoU: {evaluator.MIoU():.6f}', flush=True)
            logging.info(f'Validation MIoU: {evaluator.MIoU():.6f}')
            print('<done>')
    del net
    del out
    del in_
    return 0