'n_epochs': 300, 'lr': 1e-4, 'device': 'cuda', } try: CONFIG.update(load_json(sys.argv[5])) except (IndexError, FileNotFoundError): pass PATCH_SIZE = np.array([64, 64, 32]) # dataset raw_dataset = BraTS2013(BRATS_PATH) dataset = apply(CropToBrain(raw_dataset), load_image=partial(min_max_scale, axes=0)) train_dataset = cache_methods(ChangeSliceSpacing(dataset, new_slice_spacing=CONFIG['source_slice_spacing'])) # cross validation split = load_json(SPLIT_PATH) train_ids, val_ids, test_ids = split[int(FOLD)] # batch iterator batch_iter = Infinite( load_by_random_id(train_dataset.load_image, train_dataset.load_gt, ids=train_ids), unpack_args(tumor_sampling, patch_size=PATCH_SIZE, tumor_p=.5), random_apply(.5, unpack_args(lambda image, gt: (np.flip(image, 1), np.flip(gt, 0)))), batch_size=CONFIG['batch_size'], batches_per_epoch=CONFIG['batches_per_epoch'] ) # model model = nn.Sequential(
'batch_size': 30, 'batches_per_epoch': 100, 'n_epochs': 100, 'lr': 1e-3, 'device': 'cuda', } try: CONFIG.update(load_json(sys.argv[5])) except (IndexError, FileNotFoundError): pass # dataset raw_dataset = BinaryGT(BraTS2013(BRATS_PATH), positive_classes=CONFIG['positive_classes']) dataset = cache_methods(ZooOfSpacings( apply(CropToBrain(raw_dataset), load_image=partial(min_max_scale, axes=0)), slice_spacings=CONFIG['slice_spacings'] )) # cross validation split = load_json(SPLIT_PATH) train_ids, val_ids, test_ids = split[int(FOLD)] # batch iterator batch_iter = Infinite( load_by_random_id(dataset.load_image, dataset.load_gt, ids=train_ids), unpack_args(get_random_slice), random_apply(.5, unpack_args(lambda image, gt: (np.flip(image, 1), np.flip(gt, 0)))), apply_at(1, prepend_dims), apply_at(1, np.float32), batch_size=CONFIG['batch_size'], batches_per_epoch=CONFIG['batches_per_epoch'], combiner=combine_pad )