Ejemplo n.º 1
0
 def manual_check(self):
     """ print examples from a toy batch to file.
     :return:
     """
     self.ra_ext = utils.import_module("ra_ext", 'custom_extensions/roi_align/roi_align.py')
     # actual mrcnn mask input
     from datasets.toy import configs
     cf = configs.Configs()
     cf.exp_dir = "datasets/toy/experiments/dev/"
     cf.plot_dir = cf.exp_dir + "plots"
     os.makedirs(cf.exp_dir, exist_ok=True)
     cf.fold = 0
     cf.n_workers = 1
     logger = utils.get_logger(cf.exp_dir)
     data_loader = utils.import_module('data_loader', os.path.join("datasets", "toy", 'data_loader.py'))
     batch_gen = data_loader.get_train_generators(cf, logger=logger)
     batch = next(batch_gen['train'])
     roi_mask = np.zeros((1, 320, 200))
     bb_target = (np.array([50, 40, 90, 120])).astype("int")
     roi_mask[:, bb_target[0]+1:bb_target[2]+1, bb_target[1]+1:bb_target[3]+1] = 1.
     #batch = {"roi_masks": np.array([np.array([roi_mask, roi_mask]), np.array([roi_mask])]), "bb_target": [[bb_target, bb_target + 25], [bb_target-20]]}
     #batch_boxes_cor = [torch.tensor(batch_el_boxes).cuda().float() for batch_el_boxes in batch_cor["bb_target"]]
     batch_boxes = [torch.tensor(batch_el_boxes).cuda().float() for batch_el_boxes in batch["bb_target"]]
     #import IPython; IPython.embed()
     for b in range(len(batch_boxes)):
         roi_masks = batch["roi_masks"][b]
         #roi_masks_cor = batch_cor["roi_masks"][b]
         if roi_masks.sum()>0:
             boxes = batch_boxes[b]
             roi_masks = torch.tensor(roi_masks).cuda().type(dtype=torch.float32)
             box_ids = torch.arange(roi_masks.shape[0]).cuda().unsqueeze(1).type(dtype=torch.float32)
             masks = tv.ops.roi_align(roi_masks, [boxes], cf.mask_shape)
             masks = masks.squeeze(1)
             masks = torch.round(masks)
             masks_own = self.ra_ext.roi_align_2d(roi_masks, torch.cat((box_ids, boxes), dim=1), cf.mask_shape)
             boxes = boxes.type(torch.int)
             #print("check roi mask", roi_masks[0, 0, boxes[0][0]:boxes[0][2], boxes[0][1]:boxes[0][3]].sum(), (boxes[0][2]-boxes[0][0]) * (boxes[0][3]-boxes[0][1]))
             #print("batch masks", batch["roi_masks"])
             masks_own = masks_own.squeeze(1)
             masks_own = torch.round(masks_own)
             #import IPython; IPython.embed()
             for mix, mask in enumerate(masks):
                 fig = plg.plt.figure()
                 ax = fig.add_subplot()
                 ax.imshow(roi_masks[mix][0].cpu().numpy(), cmap="gray", vmin=0.)
                 ax.axis("off")
                 y1, x1, y2, x2 = boxes[mix]
                 bbox = plg.mpatches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=0.9, edgecolor="c", facecolor='none')
                 ax.add_patch(bbox)
                 x1, y1, x2, y2 = boxes[mix]
                 bbox = plg.mpatches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=0.9, edgecolor="r",
                                               facecolor='none')
                 ax.add_patch(bbox)
                 debug_dir = Path("/home/gregor/Documents/regrcnn/datasets/toy/experiments/debugroial")
                 os.makedirs(debug_dir, exist_ok=True)
                 plg.plt.savefig(debug_dir/"mask_b{}_{}.png".format(b, mix))
                 plg.plt.imsave(debug_dir/"mask_b{}_{}_pooled_tv.png".format(b, mix), mask.cpu().numpy(), cmap="gray", vmin=0.)
                 plg.plt.imsave(debug_dir/"mask_b{}_{}_pooled_own.png".format(b, mix), masks_own[mix].cpu().numpy(), cmap="gray", vmin=0.)
     return
Ejemplo n.º 2
0
    def build(self):
        """Build Retina Net architecture."""

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2 ** 5 != int(h / 2 ** 5) or w / 2 ** 5 != int(w / 2 ** 5):
            raise Exception("Image size must be divisible by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 320, 384, 448, 512, ... etc. ")

        backbone = utils.import_module('bbone', self.cf.backbone_path)
        self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
        conv = backbone.ConvGenerator(self.cf.dim)


        # build Anchors, FPN, Classifier / Bbox-Regressor -head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.fpn = backbone.FPN(self.cf, conv, operate_stride1=self.cf.operate_stride1).cuda()
        self.classifier = Classifier(self.cf, conv).cuda()
        self.bb_regressor = BBRegressor(self.cf, conv).cuda()

        if 'regression' in self.cf.prediction_tasks:
            self.roi_regressor = RoIRegressor(self.cf, conv, self.cf.regression_n_features).cuda()
        elif 'regression_bin' in self.cf.prediction_tasks:
            # classify into bins of regression values
            self.roi_regressor = RoIRegressor(self.cf, conv, len(self.cf.bin_labels)).cuda()
        else:
            self.roi_regressor = lambda x: [torch.tensor([]).cuda()]

        if self.cf.model == 'retina_unet':
            self.final_conv = conv(self.cf.end_filts, self.cf.num_seg_classes, ks=1, pad=0, norm=None, relu=None)
Ejemplo n.º 3
0
    def __init__(self, cf, logger):

        super(net, self).__init__()
        self.cf = cf
        self.logger = logger
        backbone = utils.import_module('bbone', cf.backbone_path)
        self.logger.info("loaded backbone from {}".format(
            self.cf.backbone_path))
        conv_gen = backbone.ConvGenerator(cf.dim)

        # set operate_stride1=True to generate a unet-like FPN.)
        self.fpn = backbone.FPN(cf,
                                conv=conv_gen,
                                relu_enc=cf.relu,
                                operate_stride1=True)
        self.conv_final = conv_gen(cf.end_filts,
                                   cf.num_seg_classes,
                                   ks=1,
                                   pad=0,
                                   norm=cf.norm,
                                   relu=None)

        #initialize parameters
        if self.cf.weight_init == "custom":
            logger.info(
                "Tried to use custom weight init which is not defined. Using pytorch default."
            )
        elif self.cf.weight_init:
            mutils.initialize_weights(self)
        else:
            logger.info("using default pytorch weight init")
Ejemplo n.º 4
0
    def test(self, n_cases=200, box_count=30, threshold=0.5):
        # dynamically import module so that it doesn't affect other tests if import fails
        self.nms_ext = utils.import_module("nms_ext",
                                           'custom_extensions/nms/nms.py')

        self.manual_example()

        # change seed to something fix if you want exactly reproducible test
        seed0 = np.random.randint(50)
        print("NMS test progress (done/total box configurations) 2D:",
              end="\n")
        for i in tqdm.tqdm(range(n_cases)):
            self.single_case(count=box_count,
                             dim=2,
                             threshold=threshold,
                             seed=seed0 + i)
        print("NMS test progress (done/total box configurations) 3D:",
              end="\n")
        for i in tqdm.tqdm(range(n_cases)):
            self.single_case(count=box_count,
                             dim=3,
                             threshold=threshold,
                             seed=seed0 + i)

        return
    def build(self):
        """
        Build Retina Net architecture.
        """

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
            raise Exception(
                "Image size must be dividable by 2 at least 5 times "
                "to avoid fractions when downscaling and upscaling."
                "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # instanciate abstract multi dimensional conv class and backbone model.
        conv = mutils.NDConvGenerator(self.cf.dim)
        backbone = utils.import_module('bbone', self.cf.backbone_path)

        # build Anchors, FPN, Classifier / Bbox-Regressor -head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.Fpn = backbone.FPN(self.cf,
                                conv,
                                operate_stride1=self.cf.operate_stride1)
        self.Classifier = Classifier(self.cf, conv)
        self.BBRegressor = BBRegressor(self.cf, conv)
        self.final_conv = conv(self.cf.end_filts,
                               self.cf.num_seg_classes,
                               ks=1,
                               pad=0,
                               norm=self.cf.norm,
                               relu=None)
Ejemplo n.º 6
0
    def specific_example_check(self):
        # dummy input
        self.ra_ext = utils.import_module("ra_ext", 'custom_extensions/roi_align/roi_align.py')
        exp = 6
        pool_size = (2,2)
        fmap = torch.arange(exp**2).view(exp,exp).unsqueeze(0).unsqueeze(0).cuda().type(dtype=torch.float32)

        boxes = torch.tensor([[1., 1., 5., 5.]]).cuda()/exp
        ind = torch.tensor([0.]*len(boxes)).cuda().type(torch.float32)
        y_exp, x_exp = fmap.shape[2:]  # exp = expansion
        boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
        boxes = torch.cat((ind.unsqueeze(1), boxes), dim=1)
        aligned_tv = tv.ops.roi_align(fmap, boxes, output_size=pool_size, sampling_ratio=-1)
        aligned = self.ra_ext.roi_align_2d(fmap, boxes, output_size=pool_size, sampling_ratio=-1)

        boxes_3d = torch.cat((boxes, torch.tensor([[-1.,1.]]*len(boxes)).cuda()), dim=1)
        fmap_3d = fmap.unsqueeze(dim=-1)
        pool_size = (*pool_size,1)
        ra_object = self.ra_ext.RoIAlign(output_size=pool_size, spatial_scale=1.,)
        aligned_3d = ra_object(fmap_3d, boxes_3d)

        expected_res = torch.tensor([[[[10.5000, 12.5000],
                                       [22.5000, 24.5000]]]]).cuda()
        expected_res_3d = torch.tensor([[[[[10.5000],[12.5000]],
                                          [[22.5000],[24.5000]]]]]).cuda()
        assert torch.all(aligned==expected_res), "2D RoIAlign check vs. specific example failed. res: {}\n expected: {}\n".format(aligned, expected_res)
        assert torch.all(aligned_3d==expected_res_3d), "3D RoIAlign check vs. specific example failed. res: {}\n expected: {}\n".format(aligned_3d, expected_res_3d)
Ejemplo n.º 7
0
    def build(self):
        """
        Build Retina Net architecture.
        """

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2 ** 5 != int(h / 2 ** 5) or w / 2 ** 5 != int(w / 2 ** 5):
            raise Exception("Image size must be dividable by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # instanciate abstract multi dimensional conv class and backbone model.
        conv = mutils.NDConvGenerator(self.cf.dim)
        backbone = utils.import_module('bbone', self.cf.backbone_path)

        # build Anchors, FPN, Classifier / Bbox-Regressor -head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        # self.Fpn = backbone.FPN(self.cf, conv, operate_stride1=self.cf.operate_stride1)
        self.num_classes=self.cf.head_classes
        
        # i3d_rgb = backbone.I3D(num_classes=self.num_classes)
        ######## HEART OF I3D RGB Weight Transfer
        # print("Transferring weight from i3d_rgb model...........")
        # i3d_rgb = backbone.I3D(num_classes=self.num_classes)
        # i3d_rgb.load_state_dict(torch.load('weight/model_rgb.pth'), strict=False)

        # print('Success..........')
        # self.Fpn = i3d_rgb
        self.Fpn = backbone.I3D(num_classes=self.num_classes)
        self.Classifier = Classifier(self.cf, conv)
        self.BBRegressor = BBRegressor(self.cf, conv)
Ejemplo n.º 8
0
 def test(self):
     cf = utils.import_module("toy_cf", 'datasets/toy/configs.py').Configs()
     exp_dir = "./unittesting/"
     #checks = {"retina_net": False, "mrcnn": False}
     #print("Testing for runtime errors with models {}".format(list(checks.keys())))
     #for model in tqdm.tqdm(list(checks.keys())):
     # cf.model = model
     # cf.model_path = 'models/{}.py'.format(cf.model if not 'retina' in cf.model else 'retina_net')
     # cf.model_path = os.path.join(cf.source_dir, cf.model_path)
     # {'mrcnn': cf.add_mrcnn_configs,
     #  'retina_net': cf.add_mrcnn_configs, 'retina_unet': cf.add_mrcnn_configs,
     #  'detection_unet': cf.add_det_unet_configs, 'detection_fpn': cf.add_det_fpn_configs
     #  }[model]()
     # todo change structure of configs-handling with exec.py so that its dynamically parseable instead of needing to
     # todo be changed in the file all the time.
     checks = {cf.model: False}
     completed_process = subprocess.run(
         "python exec.py --dev --dataset_name toy -m train_test --exp_dir {}"
         .format(exp_dir),
         shell=True,
         capture_output=True,
         text=True)
     if completed_process.returncode != 0:
         print("Runtime test of model {} failed due to\n{}".format(
             cf.model, completed_process.stderr))
     else:
         checks[cf.model] = True
     subprocess.call("rm -rf {}".format(exp_dir), shell=True)
     assert all(checks.values()), "A runtime test crashed."
Ejemplo n.º 9
0
    def test(self):
        # dynamically import module so that it doesn't affect other tests if import fails
        self.ra_ext = utils.import_module("ra_ext", 'custom_extensions/roi_align/roi_align.py')

        # 2d test
        self.check_2d()

        # 3d test
        self.check_3d()

        return
Ejemplo n.º 10
0
    def __init__(self, cf, logger):
        super(net, self).__init__()

        self.cf = cf
        self.dim = cf.dim
        self.norm = cf.norm
        self.logger = logger
        backbone = utils.import_module('bbone', cf.backbone_path)
        self.c_gen = backbone.ConvGenerator(cf.dim)
        self.Interpolator = backbone.Interpolate

        #down = DownBlockGen(cf.dim)
        #up = UpBlockGen(cf.dim, backbone.Interpolate)
        down = self.down
        up = self.up

        pad = cf.pad
        if pad=="same":
            pad = (cf.kernel_size-1)//2

        
        self.dims = "not yet recorded"
        self.is_cuda = False
              
        self.init = horiz_conv(len(cf.channels), cf.init_filts, cf.kernel_size, self.c_gen, self.norm, pad=pad,
                               relu=cf.relu)
        
        self.down1 = down(cf.init_filts,    cf.init_filts*2,  cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu)
        self.down2 = down(cf.init_filts*2,  cf.init_filts*4,  cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu)
        self.down3 = down(cf.init_filts*4,  cf.init_filts*6,  cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu)
        self.down4 = down(cf.init_filts*6,  cf.init_filts*8,  cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu,
                          maintain_z=True)
        self.down5 = down(cf.init_filts*8,  cf.init_filts*12, cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu,
                          maintain_z=True)
        #self.down6 = down(cf.init_filts*10, cf.init_filts*14, cf.kernel_size, cf.kernel_size_m, pad=pad, relu=cf.relu)
        
        #self.up1 = up(cf.init_filts*14, cf.init_filts*10, cf.kernel_size, pad=pad, relu=cf.relu)
        self.up2 = up(cf.init_filts*12, cf.init_filts*8,  cf.kernel_size, pad=pad, relu=cf.relu, maintain_z=True)
        self.up3 = up(cf.init_filts*8,  cf.init_filts*6,  cf.kernel_size, pad=pad, relu=cf.relu, maintain_z=True)
        self.up4 = up(cf.init_filts*6,  cf.init_filts*4,  cf.kernel_size, pad=pad, relu=cf.relu)
        self.up5 = up(cf.init_filts*4,  cf.init_filts*2,  cf.kernel_size, pad=pad, relu=cf.relu)
        self.up6 = up(cf.init_filts*2,  cf.init_filts,    cf.kernel_size, pad=pad, relu=cf.relu)
        
        self.seg = self.c_gen(cf.init_filts, cf.num_seg_classes, 1, norm=None, relu=None)


        # initialize parameters
        if self.cf.weight_init == "custom":
            logger.info("Tried to use custom weight init which is not defined. Using pytorch default.")
        elif self.cf.weight_init:
            mutils.initialize_weights(self)
        else:
            logger.info("using default pytorch weight init")
Ejemplo n.º 11
0
    def __init__(self, cf, logger):

        super(net, self).__init__()
        self.cf = cf
        self.logger = logger
        backbone = utils.import_module('bbone', cf.backbone_path)
        conv = mutils.NDConvGenerator(cf.dim)

        # set operate_stride1=True to generate a unet-like FPN.)
        self.fpn = backbone.FPN(cf, conv, operate_stride1=True).cuda()
        self.conv_final = conv(cf.end_filts, cf.num_seg_classes, ks=1, pad=0, norm=cf.norm, relu=None)

        if self.cf.weight_init is not None:
            logger.info("using pytorch weight init of type {}".format(self.cf.weight_init))
            mutils.initialize_weights(self)
        else:
            logger.info("using default pytorch weight init")
from skimage.morphology import convex_hull_image
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.morphology import binary_dilation, generate_binary_structure

LS_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                       'lung-segmentation')
sys.path.append(LS_PATH)
import predict
from data import utils as data_utils

PROJECT_ROOT = Path(__file__).absolute().parent.parent.parent
sys.path.append(str(PROJECT_ROOT))
import utils.exp_utils as utils

dir_path = os.path.dirname(os.path.realpath(__file__))
cf_file = utils.import_module("cf", os.path.join(dir_path, "configs.py"))
cf = cf_file.configs()
# Load lung segmentation model from MLFlow
remote_server_uri = "http://mlflow.10.7.13.202.nip.io/"
mlflow.set_tracking_uri(remote_server_uri)
model_name = "2-lungs-segmentation"
unet = mlflow.pytorch.load_model(f"models:/{model_name}/production")


def resample_array(src_imgs, src_spacing, target_spacing):

    src_spacing = np.round(src_spacing, 3)
    target_shape = [
        int(src_imgs.shape[ix] * src_spacing[::-1][ix] /
            target_spacing[::-1][ix]) for ix in range(len(src_imgs.shape))
    ]
Ejemplo n.º 13
0
        os.path.join(fold_dir, 'file_list.txt'), source_dir, target_dir),
                    shell=True)
    n_threads = 8
    dutils.unpack_dataset(target_dir, threads=n_threads)
    copied_files = os.listdir(target_dir)
    t = utils.get_formatted_duration(time.time() - start_time)
    logger.info(
        "\ncopying and unpacking data set finished using {} threads.\n{} files in target dir: {}. Took {}\n"
        .format(n_threads, len(copied_files), target_dir, t))


if __name__ == "__main__":

    total_stime = time.time()

    cf_file = utils.import_module("cf", "configs.py")
    cf = cf_file.configs()

    cf.created_fold_id_pickle = False
    cf.exp_dir = "dev/"
    cf.plot_dir = cf.exp_dir + "plots"
    os.makedirs(cf.exp_dir, exist_ok=True)
    cf.fold = 0
    logger = utils.get_logger(cf.exp_dir)

    #batch_gen = get_train_generators(cf, logger)
    #train_batch = next(batch_gen["train"])

    test_gen = get_test_generator(cf, logger)
    test_batch = next(test_gen["test"])
Ejemplo n.º 14
0
        type=str,
        default='experiments/toy_exp',
        help=
        'specifies, from which source experiment to load configs and data_loader.'
    )

    args = parser.parse_args()
    folds = args.folds
    resume_to_checkpoint = args.resume_to_checkpoint

    if args.mode == 'train' or args.mode == 'train_test':

        cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env,
                            args.use_stored_settings)
        cf.slurm_job_id = args.slurm_job_id
        model = utils.import_module('model', cf.model_path)
        data_loader = utils.import_module(
            'dl', os.path.join(args.exp_source, 'data_loader.py'))
        if folds is None:
            folds = range(cf.n_cv_splits)

        for fold in folds:
            cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
            cf.fold = fold
            cf.resume_to_checkpoint = resume_to_checkpoint
            if not os.path.exists(cf.fold_dir):
                os.mkdir(cf.fold_dir)
            logger = utils.get_logger(cf.fold_dir)
            train(logger)
            cf.resume_to_checkpoint = None
            if args.mode == 'train_test':
Ejemplo n.º 15
0
            #self.dataset_name = "datasets/prostate"
            self.dataset_name = "datasets/lidc"
            #self.exp_dir = "datasets/toy/experiments/mrcnnal2d_clkengal"  # detunet2d_di_bs16_ps512"
            #self.exp_dir = "/home/gregor/networkdrives/E132-Cluster-Projects/prostate/experiments/gs6071_retinau3d_cl_bs6"
            #self.exp_dir = "/home/gregor/networkdrives/E132-Cluster-Projects/prostate/experiments/gs6071_frcnn3d_cl_bs6"
            #self.exp_dir = "/home/gregor/networkdrives/E132-Cluster-Projects/prostate/experiments_t2/gs6071_mrcnn3d_cl_bs6_lessaug"
            #self.exp_dir = "/home/gregor/networkdrives/E132-Cluster-Projects/prostate/experiments/gs6071_detfpn3d_cl_bs6"
            #self.exp_dir = "/home/gregor/networkdrives/E132-Cluster-Projects/lidc_sa/experiments/ms12345_mrcnn3d_rgbin_bs8"
            self.exp_dir = '/home/gregor/Documents/medicaldetectiontoolkit/datasets/lidc/experiments/ms12345_mrcnn3d_rg_bs8'
            #self.exp_dir = '/home/gregor/Documents/medicaldetectiontoolkit/datasets/lidc/experiments/ms12345_mrcnn3d_rgbin_bs8'

            self.server_env = False
    args = Args()


    data_loader = utils.import_module('dl', os.path.join(args.dataset_name, "data_loader.py"))

    config_file = utils.import_module('cf', os.path.join(args.exp_dir, "configs.py"))
    cf = config_file.Configs()
    cf.exp_dir = args.exp_dir
    cf.test_dir = cf.exp_dir

    pid = '0811a'
    cf.fold = find_pid_in_splits(pid)
    #cf.fold = 0
    cf.merge_2D_to_3D_preds = False
    if cf.merge_2D_to_3D_preds:
        cf.dim==3
    cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(cf.fold))
    anal_dir = os.path.join(cf.exp_dir, "inference_analysis")
Ejemplo n.º 16
0
        cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env,
                            args.use_stored_settings)
        if args.dev:
            folds = [0, 1]
            cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim == 2 else 1, 2, 0, 2
            cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1
            cf.test_n_epochs, cf.max_test_patients = cf.save_n_models, 2
            torch.backends.cudnn.benchmark = cf.dim == 3
        else:
            torch.backends.cudnn.benchmark = cf.cuda_benchmark
        if args.data_dest is not None:
            cf.data_dest = args.data_dest

        logger = utils.get_logger(cf.exp_dir, cf.server_env,
                                  cf.sysmetrics_interval)
        data_loader = utils.import_module(
            'data_loader', os.path.join(args.dataset_name, 'data_loader.py'))
        model = utils.import_module('model', cf.model_path)
        logger.info("loaded model from {}".format(cf.model_path))
        if folds is None:
            folds = range(cf.n_cv_splits)

        for fold in folds:
            """k-fold cross-validation: the dataset is split into k equally-sized folds, one used for validation,
            one for testing, the rest for training. This loop iterates k-times over the dataset, cyclically moving the
            splits. k==folds, fold in [0,folds) says which split is used for testing.
            """
            cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
            cf.fold = fold
            logger.set_logfile(fold=fold)
            cf.resume = resume
            if not os.path.exists(cf.fold_dir):
Ejemplo n.º 17
0
                            args.use_stored_settings)
        if args.dev:
            folds = [0, 1]
            cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim == 2 else 1, 1, 0, 2
            cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1
            cf.test_n_epochs = cf.save_n_models
            cf.max_test_patients = 2

        cf.data_dest = args.data_dest
        logger = utils.get_logger(cf.exp_dir, cf.server_env)
        logger.info("cudnn benchmark: {}, deterministic: {}.".format(
            torch.backends.cudnn.benchmark,
            torch.backends.cudnn.deterministic))
        logger.info("sending tensors to CUDA device: {}.".format(
            torch.cuda.get_device_name(args.cuda_device)))
        data_loader = utils.import_module(
            'dl', os.path.join(args.exp_source, 'data_loader.py'))
        model = utils.import_module('mdt_model', cf.model_path)
        logger.info("loaded model from {}".format(cf.model_path))
        if folds is None:
            folds = range(cf.n_cv_splits)

        # MLFLow new experiment
        try:
            if args.mlflow_artifacts_uri is not None:
                exp_id = mlflow.create_experiment(
                    args.mlflow_experiment_id,
                    artifact_location=args.mlflow_artifacts_uri)
            else:
                exp_id = mlflow.create_experiment(args.mlflow_experiment_id)
        except:
            exp_id = mlflow.set_experiment(args.mlflow_experiment_id)
Ejemplo n.º 18
0
    def test(self):
        print("Testing multithreaded iterator.")

        dataset = "toy"
        exp_dir = Path("datasets/{}/experiments/dev".format(dataset))
        cf_file = utils.import_module("cf_file", exp_dir / "configs.py")
        cf = cf_file.Configs()
        dloader = utils.import_module(
            'data_loader', 'datasets/{}/data_loader.py'.format(dataset))
        cf.exp_dir = Path(exp_dir)
        cf.n_workers = 5

        cf.batch_size = 3
        cf.fold = 0
        cf.plot_dir = cf.exp_dir / "plots"
        logger = utils.get_logger(cf.exp_dir, cf.server_env,
                                  cf.sysmetrics_interval)
        cf.num_val_batches = "all"
        cf.val_mode = "val_sampling"
        cf.n_workers = 8
        batch_gens = dloader.get_train_generators(cf,
                                                  logger,
                                                  data_statistics=False)
        val_loader = batch_gens["val_sampling"]

        for epoch in range(4):
            produced_ids = []
            for i in range(batch_gens['n_val']):
                batch = next(val_loader)
                produced_ids.append(batch["pid"])
            uni, cts = np.unique(np.concatenate(produced_ids),
                                 return_counts=True)
            assert np.all(
                cts < 3
            ), "with batch size one: every item should occur exactly once.\n uni {}, cts {}".format(
                uni[cts > 2], cts[cts > 2])
            #assert len(np.setdiff1d(val_loader.generator.dataset_pids, uni))==0, "not all val pids were shown."
            assert len(np.setdiff1d(uni, val_loader.generator.dataset_pids)
                       ) == 0, "pids shown that are not val set. impossible?"

        cf.n_workers = os.cpu_count()
        cf.batch_size = int(
            val_loader.generator.dataset_length / cf.n_workers) + 2
        val_loader = dloader.create_data_gen_pipeline(
            cf,
            val_loader.generator._data,
            do_aug=False,
            sample_pids_w_replace=False,
            max_batches=None,
            raise_stop_iteration=True)
        for epoch in range(2):
            produced_ids = []
            for b, batch in enumerate(val_loader):
                produced_ids.append(batch["pid"])
            uni, cts = np.unique(np.concatenate(produced_ids),
                                 return_counts=True)
            assert np.all(
                cts == 1
            ), "with batch size one: every item should occur exactly once.\n uni {}, cts {}".format(
                uni[cts > 1], cts[cts > 1])
            assert len(np.setdiff1d(val_loader.generator.dataset_pids,
                                    uni)) == 0, "not all val pids were shown."
            assert len(np.setdiff1d(uni, val_loader.generator.dataset_pids)
                       ) == 0, "pids shown that are not val set. impossible?"

        pass