def cli_main(args): pl.seed_everything(args.seed) # ------------ # data # ------------ # this creates a k-space mask for transforming input data mask = create_mask_for_mask_type(args.mask_type, args.center_fractions, args.accelerations) # use random masks for train transform, fixed masks for val transform train_transform = UnetDataTransform(args.challenge, mask_func=mask, use_seed=False) val_transform = UnetDataTransform(args.challenge, mask_func=mask) test_transform = UnetDataTransform(args.challenge) # ptl data module - this handles data loaders data_module = FastMriDataModule( data_path=args.data_path, challenge=args.challenge, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, test_split=args.test_split, test_path=args.test_path, sample_rate=args.sample_rate, batch_size=args.batch_size, num_workers=args.num_workers, distributed_sampler=(args.accelerator in ("ddp", "ddp_cpu")), proportion=args.proportion, ) # ------------ # model # ------------ model = UnetModule( in_chans=args.in_chans, out_chans=args.out_chans, chans=args.chans, num_pool_layers=args.num_pool_layers, drop_prob=args.drop_prob, lr=args.lr, lr_step_size=args.lr_step_size, lr_gamma=args.lr_gamma, weight_decay=args.weight_decay, ) # ------------ # trainer # ------------ trainer = pl.Trainer.from_argparse_args(args) # ------------ # run # ------------ if args.mode == "train": trainer.fit(model, datamodule=data_module) elif args.mode == "test": trainer.test(model, datamodule=data_module) else: raise ValueError(f"unrecognized mode {args.mode}")
def __init__(self, in_chans, out_chans, dropout, decoder_channels, lr, lr_step_size, lr_gamma, weight_decay, data_path, batch_size, mask_type, center_fractions, accelerations, optim_eps): super().__init__() self.save_hyperparameters() self.in_chans = in_chans self.out_chans = out_chans self.decoder_channels = decoder_channels self.lr = lr self.lr_step_size = lr_step_size self.lr_gamma = lr_gamma self.weight_decay = weight_decay self.optim_eps = optim_eps self.net = ENet(in_channels=in_chans, out_channels=out_chans, decoder_channels=decoder_channels, dropout=dropout) mask = create_mask_for_mask_type(mask_type, center_fractions, accelerations) train_transform = UnetDataTransform('singlecoil', mask_func=mask, use_seed=False) val_transform = UnetDataTransform('singlecoil', mask_func=mask) test_transform = UnetDataTransform('singlecoil') self.data_module = FastMriDataModule(data_path=pathlib.Path(data_path), challenge='singlecoil', train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, test_split='test', test_path=None, sample_rate=1.0, batch_size=batch_size, num_workers=4, distributed_sampler=False)
def test_unet_trainer(fastmri_mock_dataset, backend, tmp_path, monkeypatch): knee_path, _, metadata = fastmri_mock_dataset def retrieve_metadata_mock(a, fname): return metadata[str(fname)] monkeypatch.setattr(SliceDataset, "_retrieve_metadata", retrieve_metadata_mock) params = build_unet_args(knee_path, tmp_path, backend) params.fast_dev_run = True params.backend = backend mask = create_mask_for_mask_type(params.mask_type, params.center_fractions, params.accelerations) train_transform = UnetDataTransform(params.challenge, mask_func=mask, use_seed=False) val_transform = UnetDataTransform(params.challenge, mask_func=mask) test_transform = UnetDataTransform(params.challenge) data_module = FastMriDataModule( data_path=params.data_path, challenge=params.challenge, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, test_split=params.test_split, sample_rate=params.sample_rate, batch_size=params.batch_size, num_workers=params.num_workers, distributed_sampler=(params.accelerator == "ddp"), use_dataset_cache_file=False, ) model = UnetModule( in_chans=params.in_chans, out_chans=params.out_chans, chans=params.chans, num_pool_layers=params.num_pool_layers, drop_prob=params.drop_prob, lr=params.lr, lr_step_size=params.lr_step_size, lr_gamma=params.lr_gamma, weight_decay=params.weight_decay, ) trainer = Trainer.from_argparse_args(params) trainer.fit(model, data_module)
def get_dataloaders_fastmri(mask_type = 'random', center_fractions = [0.08], accelerations = [4], challenge = 'singlecoil', batch_size = 8, num_workers = 4, distributed_bool = False, dataset_dir = dataset_dir, mri_dir = 'fastmri/knee/', worker_init_fn = None, include_test = False, **kwargs): data_path = Path(os.path.join(dataset_dir, mri_dir)) mask = create_mask_for_mask_type(mask_type_str = mask_type, center_fractions = center_fractions, accelerations = accelerations ) # use random masks for train transform, fixed masks for val transform train_transform = UnetDataTransform(challenge, mask_func=mask, use_seed=False) val_transform = UnetDataTransform(challenge, mask_func=mask) test_transform = UnetDataTransform(challenge) # ptl data module - this handles data loaders data_module = FastMriDataModule( data_path= data_path, challenge= challenge, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, batch_size=batch_size, num_workers=num_workers, distributed_sampler = distributed_bool ) if include_test: dataloaders = {'train': data_module.train_dataloader() , 'validation': data_module.val_dataloader(), 'test': data_module.test_dataloader()} else: dataloaders = {'train': data_module.train_dataloader() , 'validation': data_module.val_dataloader()} return dataloaders
def init_model(args): # initialize model with given args if torch.cuda.is_available(): device = torch.device("cuda") print(f'There are {torch.cuda.device_count()} GPU(s) available.') print('Device name:', torch.cuda.get_device_name(0)) else: print('No GPU available, using the CPU instead.') device = torch.device("cpu") # this creates a k-space mask for transforming input data mask = create_mask_for_mask_type( args.mask_type, args.center_fractions, args.accelerations ) # use random masks for train transform, fixed masks for val transform train_transform = UnetDataTransform('singlecoil', mask_func=mask, use_seed=False) val_transform = UnetDataTransform('singlecoil', mask_func=mask) test_transform = UnetDataTransform('singlecoil') # Initialize Process Group dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1) # define the data loaders batch_size = args.batch_size # create object for data module data_module = FastMriDataModule( data_path=args.data_path, challenge='singlecoil', train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, test_split='test', test_path=args.data_path+'/singlecoil_test', sample_rate=1, batch_size=batch_size, # may can use multiple workers here with linux? num_workers=0, distributed_sampler="ddp", ) # save data to dataloader dataloader_tr = data_module.train_dataloader() dataloader_val = data_module.val_dataloader() dataloader_test = data_module.test_dataloader() return dataloader_tr, dataloader_val, dataloader_test, device
def cli_main(args): pl.seed_everything(args.seed) # ------------ # data # ------------ # this creates a k-space mask for transforming input data mask = create_mask_for_mask_type(args.mask_type, args.center_fractions, args.accelerations) # use random masks for train transform, fixed masks for val transform train_transform = UnetDataTransform(args.challenge, mask_func=mask, use_seed=False) val_transform = UnetDataTransform(args.challenge, mask_func=mask) test_transform = UnetDataTransform(args.challenge) # ptl data module - this handles data loaders data_module = FastMriDataModule( data_path=args.data_path, challenge=args.challenge, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, test_split=args.test_split, test_path=args.test_path, sample_rate=args.sample_rate, batch_size=args.batch_size, num_workers=args.num_workers, distributed_sampler=(args.accelerator in ("ddp", "ddp_cpu")), ) # ------------ # model # ------------ model = None if args.unet_module == "unet": model = UnetModule( in_chans=args.in_chans, out_chans=args.out_chans, chans=int(args.chans), num_pool_layers=args.num_pool_layers, drop_prob=args.drop_prob, lr=args.lr, lr_step_size=args.lr_step_size, lr_gamma=args.lr_gamma, weight_decay=args.weight_decay, optimizer=args.optmizer, ) elif args.unet_module == "nestedunet": model = NestedUnetModule( in_chans=args.in_chans, out_chans=args.out_chans, chans=args.chans, num_pool_layers=args.num_pool_layers, drop_prob=args.drop_prob, lr=args.lr, lr_step_size=args.lr_step_size, lr_gamma=args.lr_gamma, weight_decay=args.weight_decay, optimizer=args.optmizer, ) if args.device == "cuda" and not torch.cuda.is_available(): raise ValueError( "The requested cuda device isn't available please set --device cpu" ) pretrained_dict = torch.load(args.state_dict_file, map_location=args.device) model_dict = model.unet.state_dict() if args.unet_module == "unet": model_dict = { k: pretrained_dict["classy_state_dict"]["base_model"]["model"] ["trunk"]["_feature_blocks.unetblock." + k] for k, _ in model_dict.items() } elif args.unet_module == "nestedunet": model_dict = { k: pretrained_dict["classy_state_dict"]["base_model"]["model"] ["trunk"]["_feature_blocks.nublock." + k] for k, v in model_dict.items() } model.unet.load_state_dict(model_dict) # ------------ # trainer # ------------ trainer = pl.Trainer.from_argparse_args(args) # ------------ # run # ------------ output_filename = f"fine_tuned_{args.unet_module}.torch" output_model_filepath = f"{args.output_path}/{output_filename}" if args.mode == "train": trainer.fit(model, datamodule=data_module) print(f"Saving model: {output_model_filepath}") torch.save(model.state_dict(), output_model_filepath) print("DONE!") elif args.mode == "test": trainer.test(model, datamodule=data_module) else: raise ValueError(f"unrecognized mode {args.mode}")