Example #1
0
    def __init__(self, fold_indices: {}):
        model = self.create_model(pretrained=False).cuda()

        train_dts = []
        for indices in fold_indices['train']:
            train_dts.append(
                create_augmented_dataset(is_train=True,
                                         indices_path=os.path.join(
                                             INDICES_DIR, indices + '.npy')))

        val_dts = create_augmented_dataset(is_train=False,
                                           indices_path=os.path.join(
                                               INDICES_DIR,
                                               fold_indices['val'] + '.npy'))

        workers_num = 4
        self._train_data_producer = DataProducer(DatasetsContainer(train_dts), batch_size=batch_size, num_workers=workers_num). \
            global_shuffle(True)#.pin_memory(True)
        self._val_data_producer = DataProducer(val_dts, batch_size=batch_size, num_workers=workers_num). \
            global_shuffle(True)#.pin_memory(True)

        self.train_stage = TrainStage(self._train_data_producer)
        self.val_stage = ValidationStage(self._val_data_producer)

        loss = RMSELoss().cuda()
        optimizer = Adam(params=model.parameters(), lr=1e-4)

        super().__init__(model, [self.train_stage, self.val_stage], loss,
                         optimizer)
Example #2
0
    def __init__(self, fold_indices: {}):
        model = self.create_model().cuda()

        dir = os.path.join('data', 'indices')

        train_dts = []
        for indices in fold_indices['train']:
            train_dts.append(
                create_augmented_dataset(is_train=True,
                                         indices_path=os.path.join(
                                             dir, indices + '.npy')))

        val_dts = create_augmented_dataset(is_train=False,
                                           indices_path=os.path.join(
                                               dir,
                                               fold_indices['val'] + '.npy'))

        self._train_data_producer = DataProducer(DatasetsContainer(train_dts), batch_size=self.batch_size, num_workers=8). \
            global_shuffle(True).pin_memory(True)
        self._val_data_producer = DataProducer(val_dts, batch_size=self.batch_size, num_workers=8). \
            global_shuffle(True).pin_memory(True)

        self.train_stage = TrainStage(self._train_data_producer,
                                      SegmentationMetricsProcessor('train'))
        self.val_stage = ValidationStage(
            self._val_data_producer,
            SegmentationMetricsProcessor('validation'))

        loss = BCEDiceLoss(0.5, 0.5, reduction=Reduction('mean')).cuda()
        optimizer = Adam(params=model.parameters(), lr=1e-4)

        super().__init__(model, [self.train_stage, self.val_stage], loss,
                         optimizer)
def run(config_type: object, out: str):
    dataset = create_augmented_dataset(is_train=False, to_pytorch=True, indices_path='data/indices/test.npy')

    folds = generate_folds_names(TrainConfig.folds_num)

    for fold in folds:
        fsm = FileStructManager(base_dir=os.path.join(config_type.experiment_dir, os.path.splitext(fold['val'])[0]), is_continue=True)
        predictor = Predictor(config_type.create_model(False).cuda(), fsm=fsm)
        metrics = MetricsEval(dataset, predictor, SegmentationMetricsProcessor('eval'))\
            .set_data_preprocess(lambda x: torch.from_numpy(np.expand_dims(x, 0)).cuda())\
            .set_target_preprocess(lambda x: torch.reshape(x, (1, x.shape[0], x.shape[1], x.shape[2]))).run().get_metrics()
        print(metrics)
import cv2
import numpy as np
from pietoolbelt.viz import ColormapVisualizer

from train_config.dataset import create_augmented_dataset

if __name__ == '__main__':
    dataset = create_augmented_dataset(is_train=True, to_pytorch=False)
    vis = ColormapVisualizer([0.5, 0.5])

    print(len(dataset))

    for img_idx, d in enumerate(dataset):
        original_img = d['data'].copy()
        print(d['target'].max(), d['target'].shape)
        if d['target'].max() > 0:
            img = vis.process_img(d['data'],
                                  (d['target'] * 255).astype(np.uint8))
        else:
            img = d['data']

        # cv2.imwrite('original{}.jpg'.format(img_idx), original_img)
        # cv2.imwrite('img{}.jpg'.format(img_idx), img)
        # cv2.imwrite('mask{}.jpg'.format(img_idx), d['target'].astype(np.float32))

        cv2.imshow('original', original_img)
        cv2.imshow('img', img)
        cv2.imshow('mask', d['target'].astype(np.float32))
        cv2.waitKey()