コード例 #1
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--out-path',
                        type=pathlib.Path,
                        required=True,
                        help='Path to save the reconstructions to')
    return parser
コード例 #2
0
 def delegate(self, args, callback):
     print('delegating to the callback method')
     #for example for kafka, start the consumer here that listens on certain topic
     newArgs = Args()
     newArgs.timestamp = time.time()
     newArgs.requestid = 'abc'
     newArgs.data = 'data'
     callback(newArgs)
コード例 #3
0
ファイル: train_epoch.py プロジェクト: idan94/mri_project
def train():
    # Args and output stuff
    args = Args().parse_args()
    writer = SummaryWriter(log_dir=args.output_dir)
    pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
    with open(args.output_dir + '/args.txt', "w") as text_file:
        for arg in vars(args):
            print(str(arg) + ': ' + str(getattr(args, arg)), file=text_file)
    # Define models:
    sampler = Sampler(args.resolution, args.decimation_rate,
                      args.decision_levels, args.sampler_convolution_channels,
                      args.sampler_convolution_layers,
                      args.sampler_linear_layers)
    adversarial = Adversarial(args.resolution,
                              args.adversarial_convolution_channels,
                              args.adversarial_convolution_layers,
                              args.adversarial_linear_layers)
    reconstructor = UnetModel(
        in_chans=2,
        out_chans=1,
        chans=args.reconstruction_unet_chans,
        num_pool_layers=args.reconstruction_unet_num_pool_layers,
        drop_prob=args.reconstruction_unet_drop_prob)
    # Define optimizer:
    adversarial_optimizer = torch.optim.Adam(
        adversarial.parameters(),
        lr=args.adversarial_lr,
    )
    sampler_optimizer = torch.optim.Adam(sampler.parameters(),
                                         lr=args.sampler_lr)
    reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(),
                                               lr=args.reconstructor_lr)

    # TODO: check this
    # this will be used to reset the gradients of the entire model
    over_all_optimizer = torch.optim.Adam(
        list(adversarial.parameters()) + list(sampler.parameters()) +
        list(reconstructor.parameters()))

    # TODO: remove this line, each NN needs it's own data loader with it's own sample rate
    args.sample_rate = 0.2
    train_data_loader, val_data_loader, display_data_loader = load_data(args)

    if args.loss_fn == "MSE":
        loss_function = nn.MSELoss()
    else:
        loss_function = nn.L1Loss()
    print('~~~Starting Training~~~')
    print('We will run now ' + str(args.num_epochs) + ' epochs')
    for epoch_number in range(args.num_epochs):
        train_epoch(sampler, adversarial, reconstructor, train_data_loader,
                    display_data_loader, loss_function,
                    args.reconstructor_sub_epochs, adversarial_optimizer,
                    sampler_optimizer, reconstructor_optimizer,
                    over_all_optimizer, epoch_number + 1, writer)
    print('~~~Finished Training~~~')
    writer.close()
コード例 #4
0
 def delegate(self, args, callback):
     print('delegating to the callback method')
     #sends message whenever message is received
     for message in self.consumer:
         #print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, message.offset, message.key, message.value))
         newArgs = Args() #other args
         newArgs.name = self.name
         newArgs.timestamp = time.time()
         newArgs.requestid = 'abc'
         newArgs.topic = message.topic
         newArgs.partition = message.partition
         newArgs.offset = message.offset
         newArgs.key = message.key
         newArgs.value = message.value
          
         nnDataBase = NNImageData(message.key, message.value, newArgs) #key and value are standard data for this
         callback(nnDataBase)
コード例 #5
0
def main():
    torch.manual_seed(0)
    args = Args().parse_args()
    args.data_path = "../" + args.data_path
    index = 3
    train_data_loader, val_data_loader, display_data_loader = load_data(args)
    for k_space, target, f_name, slice in display_data_loader:
        sampling_vector = [[[i, j] for i in range(k_space.shape[1])]
                           for j in range(k_space.shape[2])]
        sampling_vector = torch.tensor(sampling_vector).float()
        sampling_vector = sampling_vector - 0.5 * k_space.shape[1]
        sampling_vector = sampling_vector.reshape(-1, 2)
        sampling_vector = sampling_vector.expand(k_space.shape[0], -1, -1)
        images = sample_vector(k_space, sampling_vector)
        break

    for i in range(images.shape[0]):
        show(ifft2(images[i]))
コード例 #6
0
ファイル: subsampeling_NN.py プロジェクト: idan94/mri_project
def main():
    args = Args().parse_args()
    train_data_loader, val_data_loader, display_data_loader = load_data(args)
    model = subsampeling_NN(args.resolution, 6, 3, 3, 3, 3)
    for k_space, target, f_name, slice in display_data_loader:
        window_index = (k_space.shape[1] // 2, k_space.shape[1] // 2)
        a = model(k_space,4,window_index,-1)
        a = torch.norm(a)
        a.backward()
        break
コード例 #7
0
def main():
    args = Args().parse_args()
    train_data_loader, val_data_loader, display_data_loader = load_data(args)
    model = sampeling_pattern_intersection(args.resolution, 5, 2, 0.01)
    for k_space, target, f_name, slice in display_data_loader:
        indexes = torch.randint(0, 320, (50, 2))

        a = model(k_space, indexes)
        a = torch.norm(a)
        a.backward()
        break
コード例 #8
0
def create_arg_parser():
    parser = Args()

    parser.add_argument('--data-path', type=str, help='Path to the dataset')
    parser.add_argument('--recons-path',
                        type=str,
                        help='Path where reconstructions are to be saved')
    parser.add_argument(
        '--acceleration',
        type=int,
        help='Ratio of k-space columns to be sampled. 5x or 10x masks provided'
    )

    return parser
コード例 #9
0
__version__ = "1.0.1"
__maintainer__ = "Hammad"
__email__ = "*****@*****.**"
__status__ = "NA"

# AutoGenerated by VX ML Pipeline #
from source.sourcetemplate import SourceTemplate
from target.targettemplate import TargetTemplate
from source.kafkasource import KafkaSource
from target.kafkatarget import KafkaTarget
from nn.nntemplate import NNTemplate
from nn.nnResNet50v2 import NNResNet50v2
from common.args import Args

# BEGIN Section: Based on Templates
sourceArgs = Args()
sourceArgs.ip = '192.168.1.144'
sourceArgs.port = '9092'
sourceArgs.user = '******'
sourceArgs.password = '******'

targetArgs = Args()
targetArgs.ip = '192.168.1.144'
targetArgs.port = '9092'
targetArgs.user = '******'
targetArgs.password = '******'

nnArgs = Args()
nnArgs.ip = '192.168.1.144'
nnArgs.port = '9092'
nnArgs.user = '******'
コード例 #10
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--test-name',
                        type=str,
                        default='gaussiantsp-d24-a1e-3-v1e-3',
                        help='name for the output dir')
    parser.add_argument('--exp-dir',
                        type=pathlib.Path,
                        default='summary/testepi',
                        help='Path where model and results should be saved')
    parser.add_argument(
        '--resume',
        action='store_true',
        help='If set, resume the training from a previous model checkpoint. '
        '"--checkpoint" should be set with this')
    parser.add_argument(
        '--checkpoint',
        type=str,
        default='summary/test/model.pt',
        help='Path to an existing checkpoint. Used along with "--resume"')
    parser.add_argument('--report-interval',
                        type=int,
                        default=100,
                        help='Period of loss reporting')

    # model parameters
    #parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
    #parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
    #parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')

    parser.add_argument('--f_maps',
                        type=int,
                        default=32,
                        help='Number of U-Net feature maps')
    parser.add_argument(
        '--data-parallel',
        action='store_true',
        default=False,
        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument(
        '--acceleration-factor',
        default=20,
        type=int,
        help=
        'Number of shots in the multishot trajectory is calculated: res*depth/acceleration_factor'
    )

    # optimization parameters
    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini batch size')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=40,
                        help='Number of training epochs')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='Learning rate')
    parser.add_argument('--lr-step-size',
                        type=int,
                        default=30,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.01,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=0.,
                        help='Strength of weight decay regularization')
    parser.add_argument('--sub-lr',
                        type=float,
                        default=1e-1,
                        help='lerning rate of the sub-samping layel')

    # trajectory learning parameters
    parser.add_argument(
        '--trajectory-learning',
        default=True,
        action='store_false',
        help=
        'trajectory_learning, if set to False, fixed trajectory, only reconstruction learning.'
    )
    parser.add_argument('--acc-weight',
                        type=float,
                        default=1e-2,
                        help='weight of the acceleration loss')
    parser.add_argument('--vel-weight',
                        type=float,
                        default=1e-1,
                        help='weight of the velocity loss')
    parser.add_argument('--rec-weight',
                        type=float,
                        default=1,
                        help='weight of the reconstruction loss')
    parser.add_argument('--gamma',
                        type=float,
                        default=42576,
                        help='gyro magnetic ratio - kHz/T')
    parser.add_argument('--G-max',
                        type=float,
                        default=40,
                        help='maximum gradient (peak current) - mT/m')
    parser.add_argument('--S-max',
                        type=float,
                        default=200,
                        help='maximum slew-rate - T/m/s')
    parser.add_argument('--FOV',
                        type=float,
                        default=0.2,
                        help='Field Of View - in m')
    parser.add_argument('--dt',
                        type=float,
                        default=1e-5,
                        help='sampling time - sec')
    parser.add_argument('--a-max',
                        type=float,
                        default=0.17,
                        help='maximum acceleration')
    parser.add_argument('--v-max',
                        type=float,
                        default=3.4,
                        help='maximum velocity')
    parser.add_argument(
        '--TSP',
        action='store_true',
        default=False,
        help='Using the PILOT-TSP algorithm,if False using PILOT.')
    parser.add_argument('--KMEANS',
                        action='store_true',
                        default=False,
                        help='Using the PILOT-KMEANS-TSP algorithm,'
                        'if False using PILOT-TSP.')

    parser.add_argument('--TSP-epoch',
                        default=1,
                        type=int,
                        help='Epoch to preform the TSP reorder at')
    parser.add_argument(
        '--initialization',
        type=str,
        default='spiral',
        help=
        'Trajectory initialization when using PILOT (spiral, EPI, rosette, uniform, gaussian).'
    )
    return parser
コード例 #11
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--test-name',
                        type=str,
                        default='new0.2/3/random_0.0001',
                        help='name for the output dir')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        default='summary/test/checkpoint/best_model.pt',
                        help='Path to the U-Net model')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        default='summary/test/rec',
                        help='Path to save the reconstructions to')
    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini-batch size')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='Which device to run on')
    parser.add_argument('--num_vol',
                        type=int,
                        default=2,
                        help='num of volumes to reconstruct')

    return parser
コード例 #12
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
    parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')

    parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
    parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
    parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
    parser.add_argument('--lr-step-size', type=int, default=40,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma', type=float, default=0.1,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay', type=float, default=0.,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')
    parser.add_argument('--data-parallel', action='store_true',
                        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--exp-name', type=str, default='gan',
                        help='Path where model and results should be saved')
    parser.add_argument('--resume', action='store_true',
                        help='If set, resume the training from a previous model checkpoint. '
                             '"--checkpoint" should be set with this')
    parser.add_argument('--checkpoint', type=str,
                        help='Path to an existing checkpoint. Used along with "--resume"')
    parser.add_argument('--overfit', action='store_true',
                        help='If set, it will use the same dataset for training and val')
    parser.add_argument('--start-gan', type=int, default=0, help='Number of epochs of generator pretraining')
    parser.add_argument('--num-workers', type=int, default=8, help='Number of PyTorch workers')
    parser.add_argument('--use-dicom', action='store_true', help='Use DICOM images as fake reconstruction')
    return parser
コード例 #13
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
    parser.add_argument('--num-chans', type=int, default=128, help='Number of U-Net channels')

    parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
    parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
    parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
    parser.add_argument('--lr-step-size', type=int, default=15,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma', type=float, default=0.1,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay', type=float, default=0.,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')
    parser.add_argument('--data-parallel', action='store_true',
                        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--exp-dir', type=pathlib.Path, default='checkpoints',
                        help='Path where model and results should be saved')
    parser.add_argument('--resume', action='store_true',
                        help='If set, resume the training from a previous model checkpoint. '
                             '"--checkpoint" should be set with this')
    parser.add_argument('--checkpoint', type=str,
                        help='Path to an existing checkpoint. Used along with "--resume"')
    parser.add_argument('--aug', type=bool,default=False, 
                        help='Augmentation data')
    parser.add_argument('--netG', type=str, default='unet_transpose',
                        help='name of gen net')
    return parser
コード例 #14
0
def main(args=None):
    parser = Args()
    parser.add_argument('--mode', choices=['train', 'test'], default='train')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=50,
                        help='Number of training epochs')
    parser.add_argument('--gpus', type=int, default=1)
    parser.add_argument('--nodes', type=int, default=1)
    parser.add_argument('--exp-dir',
                        type=pathlib.Path,
                        default='experiments',
                        help='Path where model and results should be saved')
    parser.add_argument('--exp', type=str, help='Name of the experiment')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        help='Path to pre-trained model. Use with --mode test')
    parser = VariationalNetworkModel.add_model_specific_args(parser)
    if args is not None:
        parser.set_defaults(**args)

    args, _ = parser.parse_known_args()
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    run(args)
コード例 #15
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--test-name',
                        type=str,
                        default='test',
                        help='name for the output dir')
    parser.add_argument('--data-split',
                        choices=['val', 'test'],
                        default='val',
                        help='Which data partition to run on: "val" or "test"')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        default='summary/test/checkpoint/best_model.pt',
                        help='Path to the U-Net model')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        default='summary/test/rec',
                        help='Path to save the reconstructions to')
    parser.add_argument('--batch-size',
                        default=24,
                        type=int,
                        help='Mini-batch size')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='Which device to run on')
    parser.add_argument('--SNR',
                        action='store_true',
                        default=False,
                        help='add SNR decay')

    return parser
コード例 #16
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools',
                        type=int,
                        default=4,
                        help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob',
                        type=float,
                        default=0.0,
                        help='Dropout probability')
    parser.add_argument('--num-chans',
                        type=int,
                        default=32,
                        help='Number of U-Net channels')

    parser.add_argument('--batch-size',
                        default=1,
                        type=int,
                        help='Mini batch size')
    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--model-path',
                        type=str,
                        help='Path where model is present')
    parser.add_argument('--data-path',
                        type=pathlib.Path,
                        help='Path to the dataset')
    parser.add_argument('--recons-path',
                        type=str,
                        help='Path where reconstructions are to be saved')
    parser.add_argument(
        '--acceleration',
        type=int,
        help='Ratio of k-space columns to be sampled. 5x or 10x masks provided'
    )

    return parser
コード例 #17
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
    parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')

    parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
    parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
    parser.add_argument('--lr', type=float, default=0.00001, help='Learning rate')
    parser.add_argument('--lr-step-size', type=int, default=40,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma', type=float, default=0.1,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay', type=float, default=0.,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval', type=int, default=5000, help='Period of loss reporting')
    parser.add_argument('--data-parallel', action='store_true',
                        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--exp-dir', type=pathlib.Path, default='checkpoints',
                        help='Path where model and results should be saved')
    parser.add_argument('--resume', type=str, default='False',
                        help='If set, resume the training from a previous model checkpoint. '
                             '"--checkpoint" should be set with this')
    parser.add_argument('--data-path', type=pathlib.Path, default='/media/student1/RemovableVolume/calgary/',
                          help='Path to the dataset')
    parser.add_argument('--checkpoint', type=str,
                        help='Path to an existing Falsecheckpoint. Used along with "--resume"')
    parser.add_argument('--pretrained', type=str,
                        help='Path to an existing checkpoint. Used along with "--resume"')

    parser.add_argument('--residual', type=str, default='False')
    parser.add_argument('--acceleration', type=int,help='Ratio of k-space columns to be sampled. 5x or 10x masks provided')

    return parser
コード例 #18
0
ファイル: train_unet.py プロジェクト: fbbradheintz/fastMRI
        logger = TensorBoardLogger(save_dir=args.exp_dir,
                                   name=args.exp,
                                   version=load_version)
        trainer = create_trainer(args, logger)
        model = UnetMRIModel(args)
        trainer.fit(model)
    else:  # args.mode == 'test'
        assert args.checkpoint is not None
        model = UnetMRIModel.load_from_checkpoint(str(args.checkpoint))
        model.hparams.sample_rate = 1.
        trainer = create_trainer(args, logger=False)
        trainer.test(model)


if __name__ == '__main__':
    parser = Args()
    parser.add_argument('--mode', choices=['train', 'test'], default='train')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=50,
                        help='Number of training epochs')
    parser.add_argument('--gpus', type=int, default=1)
    parser.add_argument('--exp-dir',
                        type=pathlib.Path,
                        default='experiments',
                        help='Path where model and results should be saved')
    parser.add_argument('--exp', type=str, help='Name of the experiment')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        help='Path to pre-trained model. Use with --mode test')
    parser.add_argument(
コード例 #19
0
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
    parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')

    parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
    parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
    parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
    parser.add_argument('--lr-step-size', type=int, default=40,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma', type=float, default=0.1,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay', type=float, default=0.,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')
    parser.add_argument('--data-parallel', action='store_true',
                        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--exp-name', type=str, required=True,
                        help='Path where model and results should be saved')
    parser.add_argument('--resume', action='store_true',
                        help='If set, resume the training from a previous model checkpoint. '
                             '"--checkpoint" should be set with this')
    parser.add_argument('--eval', action='store_true',
                        help='If set, evaluate model on validation and test data and dump results'
                             '"--checkpoint" should be set with this')
    parser.add_argument('--out_dir', type=str,
                        help='Directory to save validation and test dump')
    parser.add_argument('--checkpoint', type=str,
                        help='Path to an existing checkpoint. Used along with "--resume"')
    parser.add_argument('--overfit', action='store_true',
                        help='If set, it will use the same dataset for training and val')
    parser.add_argument('--num-workers', type=int, default=8, help='Number of PyTorch workers')
    parser.add_argument('--batches-per-volume', type=int, default=1,
                        help='Number of batches to break up volume into when evaluting. Set to higher if you run OOM.')
    parser.add_argument('--model', type=str, required=True,
                        help='Model directory.')
    parser.add_argument('--num-volumes', type=int, default=3,
                        help='Number of input volumes - only relevant for model_volumes.')
    parser.add_argument('--exp-dir', type=str, default='experiments/v2v/', help='Directory to save')
    parser.add_argument('--evaluate-only', action='store_true')
    return parser
コード例 #20
0
ファイル: run_bart_test.py プロジェクト: jaragumura/MRI-code
        start_time = time.perf_counter()
        outputs = []
        for i in range(len(data)):
            outputs.append(run_model(i))
        time_taken = time.perf_counter() - start_time
    else:
        with multiprocessing.Pool(args.num_procs) as pool:
            start_time = time.perf_counter()
            outputs = pool.map(run_model, range(len(data)))
            time_taken = time.perf_counter() - start_time
    logging.info(f'Run Time = {time_taken:}s')
    save_outputs(outputs, args.output_path)


if __name__ == '__main__':
    parser = Args()
    parser.add_argument('--output-path',
                        type=pathlib.Path,
                        default=None,
                        help='Path to save the reconstructions to')
    parser.add_argument(
        '--num-iters',
        type=int,
        default=200,
        help='Number of iterations to run the reconstruction algorithm')
    parser.add_argument(
        '--num-procs',
        type=int,
        default=20,
        help='Number of processes. Set to 0 to disable multiprocessing.')
    args = parser.parse_args()
コード例 #21
0
def create_arg_parser():
    parser = Args()
    exp_dir = "/home/aditomer/baseline-wp/4_1_False/"
    parser.add_argument('--origin_file',
                        type=pathlib.Path,
                        default=f'/home/aditomer/Datasets/2',
                        help='Path to the U-Net model')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        default=f'{exp_dir}best_model.pt',
                        help='Path to the U-Net model')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        default=f'{exp_dir}/rec',
                        help='Path to save the reconstructions to')
    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini-batch size')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='Which device to run on')

    return parser
コード例 #22
0
import torch
from SSIM import ssim
from train import load_data
from common.args import Args
import numpy as np
import matplotlib.pyplot as plt

if __name__ == '__main__':
    args = Args().parse_args()
    torch.random.manual_seed(0)
    data = load_data(args)
    images = [target.squeeze() for (k_space, target, f_name, slice) in data[1]]
    image1 = images[3]
    image_to_compare = image1.unsqueeze(0).unsqueeze(0)
    image_to_find = torch.zeros_like(image_to_compare)
    image_to_compare.requires_grad = True
    image_to_find.requires_grad = True
    optimizer = torch.optim.Adam([image_to_find], lr=0.001)
    for i in range(100):
        optimizer.zero_grad()
        loss = 1 - ssim(image_to_compare, image_to_find, window_size=11)
        loss.backward()
        optimizer.step()
        if i % 10 == 9:
            print('iteration number: ' + str(i + 1))
            print('loss is: ' + str(loss.detach().item()))
    SSIM = ssim(image_to_compare, image_to_find)
    print('the SSIM between the images' + str(SSIM.item()))
    plt.figure()
    plt.imshow(image1.numpy(), cmap='gray')
    plt.title('the image')
コード例 #23
0
ファイル: run_bart_test.py プロジェクト: yanbo007/fastMRI
    prediction = cs_total_variation(args, masked_kspace, acquisition,
                                    acceleration, num_low_freqs)
    return fname, slice, prediction


def main():
    with multiprocessing.Pool(20) as pool:
        start_time = time.perf_counter()
        outputs = pool.map(run_model, range(len(data)))
        time_taken = time.perf_counter() - start_time
        logging.info(f'Run Time = {time_taken:}s')
        save_outputs(outputs, args.output_path)


if __name__ == '__main__':
    parser = Args()
    parser.add_argument('--output-path',
                        type=pathlib.Path,
                        default=None,
                        help='Path to save the reconstructions to')
    parser.add_argument(
        '--num-iters',
        type=int,
        default=200,
        help='Number of iterations to run the reconstruction algorithm')
    args = parser.parse_args()

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
コード例 #24
0
ファイル: train_unet.py プロジェクト: divelab/mri
def create_arg_parser():
    parser = Args()
    parser.add_argument('--num-pools',
                        type=int,
                        default=4,
                        help='Number of U-Net pooling layers')
    parser.add_argument('--drop-prob',
                        type=float,
                        default=0.0,
                        help='Dropout probability')
    parser.add_argument('--num-chans',
                        type=int,
                        default=32,
                        help='Number of U-Net channels')

    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini batch size')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=2000,
                        help='Number of training epochs')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='Learning rate')
    parser.add_argument('--lr-step-size',
                        type=int,
                        default=40,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.1,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=0.,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval',
                        type=int,
                        default=10,
                        help='Period of loss reporting')
    parser.add_argument(
        '--data-parallel',
        action='store_true',
        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--exp-dir',
                        type=pathlib.Path,
                        default='./models/unet/checkpoints',
                        help='Path where model and results should be saved')
    parser.add_argument(
        '--resume',
        action='store_true',
        help='If set, resume the training from a previous model checkpoint. '
        '"--checkpoint" should be set with this')
    parser.add_argument(
        '--checkpoint',
        type=str,
        default='./models/unet/checkpoints/model.pt',
        help='Path to an existing checkpoint. Used along with "--resume"')

    # get metrics
    parser.add_argument('--metric-interval',
                        type=int,
                        default=1,
                        help='Period of report metrics')
    parser.add_argument('--checkpoint-metrics',
                        type=pathlib.Path,
                        default='./models/unet/checkpoints/model.pt',
                        help='Which model to use, best model or recent model.')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        default='./models/unet/reconstructions_val',
                        help='Path to save the reconstructions to')
    parser.add_argument(
        '--acquisition',
        type=str,
        default='CORPDFS_FBK',
        help=
        'PD or PDFS, if set, only volumes of the specified acquisition type are used for evaluation. By default, all volumes are included.'
    )
    parser.add_argument(
        '--accelerations-metrics',
        nargs='+',
        default=[4],
        type=int,
        help=
        'If set, only volumes of the specified acceleration rate are used for evaluation. By default, all volumes are included.'
    )
    parser.add_argument(
        '--center-fractions-metrics',
        nargs='+',
        default=[0.08],
        type=float,
        help=
        'fraction of low-frequency k-space columns to be sampled. Should have the same length as accelerations.'
    )

    return parser
コード例 #25
0
        save_outputs(outputs, args.output_path)


def save_outputs(outputs, output_path):
    reconstructions = defaultdict(list)
    for fname, slice, pred in outputs:
        reconstructions[fname].append((slice, pred))
    reconstructions = {
        fname: np.stack([pred for _, pred in sorted(slice_preds)])
        for fname, slice_preds in reconstructions.items()
    }
    utils.save_reconstructions(reconstructions, output_path)


if __name__ == '__main__':
    parser = Args()
    parser.add_argument('--output-path',
                        type=pathlib.Path,
                        default=None,
                        help='Path to save the reconstructions to')
    parser.add_argument(
        '--num-iters',
        type=int,
        default=200,
        help='Number of iterations to run the reconstruction algorithm')
    parser.add_argument('--reg-wt',
                        type=float,
                        default=0.01,
                        help='Regularization weight parameter')
    args = parser.parse_args()
コード例 #26
0
def create_arg_parser(device):
    parser = Args()
    parser.add_argument('--data-split',
                        choices=['val', 'test'],
                        default='val',
                        help='Which data partition to run on: "val" or "test"')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        default=f'{device}checkpoint/best_model.pt',
                        help='Path to the U-Net model')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        default=f'{device}rec_without',
                        help='Path to save the reconstructions to')
    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini-batch size')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='Which device to run on')

    return parser
コード例 #27
0
ファイル: train.py プロジェクト: 3d-flat/3dflat
def create_arg_parser():
    parser = Args()
    parser.add_argument('--test-name', type=str, default='test', help='name for the output dir')
    parser.add_argument('--exp-dir', type=pathlib.Path, default='summary/test',
                        help='Path where model and results should be saved')
    parser.add_argument('--resume', action='store_true',
                        help='If set, resume the training from a previous model checkpoint. '
                             '"--checkpoint" should be set with this')
    parser.add_argument('--checkpoint', type=str, default='summary/test/model.pt',
                        help='Path to an existing checkpoint. Used along with "--resume"')
    parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')


    parser.add_argument('--f_maps', type=int, default=32, help='Number of U-Net feature maps')
    parser.add_argument('--data-parallel', action='store_true', default=False,
                        help='If set, use multiple GPUs using data parallelism')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Which device to train on. Set to "cuda" to use the GPU')
    parser.add_argument('--acceleration-factor', default=20, type=int,
                        help='Number of shots in the multishot trajectory is calculated: res*depth/acceleration_factor')

    # optimization parameters
    parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
    parser.add_argument('--num-epochs', type=int, default=40, help='Number of training epochs')
    parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
    parser.add_argument('--lr-step-size', type=int, default=30,
                        help='Period of learning rate decay')
    parser.add_argument('--lr-gamma', type=float, default=0.01,
                        help='Multiplicative factor of learning rate decay')
    parser.add_argument('--weight-decay', type=float, default=0.,
                        help='Strength of weight decay regularization')
    parser.add_argument('--sub-lr', type=float, default=1e-1, help='lerning rate of the sub-samping layel')

    # trajectory learning parameters
    parser.add_argument('--trajectory-learning', default=True,action='store_false',
                        help='trajectory_learning, if set to False, fixed trajectory, only reconstruction learning.')

    parser.add_argument('--weight-increase-epoch', default=100, type=int, help='Epoch when the velocity and acceleration weights start increasing.')
    parser.add_argument('--acc-weight', type=float, default=1e-2, help='weight of the acceleration loss')
    parser.add_argument('--vel-weight', type=float, default=1e-1, help='weight of the velocity loss')
    parser.add_argument('--rec-weight', type=float, default=1, help='weight of the reconstruction loss')
    parser.add_argument('--gamma', type=float, default=42576, help='gyro magnetic ratio - kHz/T')
    parser.add_argument('--G-max', type=float, default=40, help='maximum gradient (peak current) - mT/m')
    parser.add_argument('--S-max', type=float, default=200, help='maximum slew-rate - T/m/s')
    parser.add_argument('--FOV', type=float, default=0.2, help='Field Of View - in m')
    parser.add_argument('--dt', type=float, default=1e-5, help='sampling time - sec')
    parser.add_argument('--realworld-points-per-shot', type=int, default=3000, help='Points sampled in every actual shot')
    parser.add_argument('--points-per-shot', type=int, default=500, help='Length of shot in learned trajectory - actual shot'
                                                                          'consists of missing points on the line')
    parser.add_argument('--a-max', type=float, default=0.17, help='maximum acceleration')
    parser.add_argument('--v-max', type=float, default=3.4, help='maximum velocity')
    parser.add_argument('--initialization', type=str, default='spiral',
                        help='Trajectory initialization ')
    return parser
コード例 #28
0
def create_arg_parser():
    parser = Args()
    parser.add_argument(
        '--mask-kspace',
        action='store_true',
        help='Whether to apply a mask (set to True for val data and False '
        'for test data')
    parser.add_argument('--data-split',
                        choices=['val', 'test_v2', 'challenge'],
                        required=True,
                        help='Which data partition to run on: "val" or "test"')
    parser.add_argument('--checkpoint',
                        type=pathlib.Path,
                        required=True,
                        help='Path to the U-Net model')
    parser.add_argument('--out-dir',
                        type=pathlib.Path,
                        required=True,
                        help='Path to save the reconstructions to')
    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini-batch size')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='Which device to run on')
    return parser
コード例 #29
0
ファイル: train.py プロジェクト: idan94/mri_project
def main():
    # Args stuff:
    args = Args().parse_args()
    args.output_dir = 'outputs/' + args.output_dir
    writer = SummaryWriter(log_dir=args.output_dir)
    pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
    with open(args.output_dir + '/args.txt', "w") as text_file:
        for arg in vars(args):
            print(str(arg) + ': ' + str(getattr(args, arg)), file=text_file)

    # Load data
    train_data_loader, val_data_loader, display_data_loader = load_data(args)

    # Define model:
    model = SubSamplingModel(
        decimation_rate=args.decimation_rate,
        resolution=args.resolution,
        trajectory_learning=True,
        subsampling_trajectory=args.subsampling_init,
        spiral_density=args.spiral_density,
        unet_chans=args.unet_chans,
        unet_num_pool_layers=args.unet_num_pool_layers,
        unet_drop_prob=args.unet_drop_prob
    )
    # # Multiple GPUs:
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model = model.to(device)

    start_epoch = 0
    # Define optimizer:
    if torch.cuda.device_count() > 1:
        sub_parameters = model.module.sub_sampling_layer.parameters()
        recon_parameters = model.module.reconstruction_model.parameters()
    else:
        sub_parameters = model.sub_sampling_layer.parameters()
        recon_parameters = model.reconstruction_model.parameters()

    optimizer = optim.Adam(
        [{'params': sub_parameters, 'lr': args.sub_lr},
         {'params': recon_parameters}]
        , args.lr)
    # Check if to resume or new train
    if args.resume is True:
        checkpoint = torch.load(pathlib.Path('outputs/' + args.checkpoint + '/model.pt'))
        old_args = checkpoint['args']
        # Check if the old and new args are matching
        assert (args.resolution == old_args.resolution)
        assert (args.challenge == old_args.challenge)
        assert (args.unet_chans == old_args.unet_chans)
        assert (args.unet_drop_prob == old_args.unet_drop_prob)
        assert (args.unet_num_pool_layers == old_args.unet_num_pool_layers)
        assert (args.decimation_rate == old_args.decimation_rate)
        # Load model
        model.load_state_dict(checkpoint['model'])
        # Load optimizer
        optimizer.load_state_dict(checkpoint['optimizer'])
        # Set epoch number
        start_epoch = checkpoint['epoch'] + 1
    # Train
    train_model(model, optimizer, train_data_loader, display_data_loader, args, writer, start_epoch)
コード例 #30
0
def create_arg_parser():
    parser = Args()

    parser.add_argument('--num-pools',
                        type=int,
                        default=4,
                        help='Number of U-Net pooling layers')

    parser.add_argument('--drop-prob',
                        type=float,
                        default=0.0,
                        help='Dropout probability')

    parser.add_argument('--num-chans',
                        type=int,
                        default=32,
                        help='Number of U-Net channels')

    parser.add_argument('--batch-size',
                        default=16,
                        type=int,
                        help='Mini batch size')

    parser.add_argument('--num-epochs',
                        type=int,
                        default=50,
                        help='Number of training epochs')

    parser.add_argument('--lr', type=float, default=0.04, help='Learning rate')

    parser.add_argument('--lr-step-size',
                        type=int,
                        default=40,
                        help='Period of learning rate decay')

    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.1,
                        help='Multiplicative factor of learning rate decay')

    parser.add_argument('--weight-decay',
                        type=float,
                        default=1e-4,
                        help='Strength of weight decay regularization')

    parser.add_argument('--report-interval',
                        type=int,
                        default=100,
                        help='Period of loss reporting')

    parser.add_argument(
        '--data-parallel',
        action='store_true',
        help='If set, use multiple GPUs using data parallelism')

    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        help='Which device to train on. Set to "cuda" to use the GPU')

    parser.add_argument('--exp-dir',
                        type=pathlib.Path,
                        default='checkpoints',
                        help='Path where model and results should be saved')

    parser.add_argument(
        '--resume',
        action='store_true',
        help='If set, resume the training from a previous model checkpoint. '
        '"--checkpoint" should be set with this')
    parser.add_argument(
        '--checkpoint',
        type=str,
        help='Path to an existing checkpoint. Used along with "--resume"')

    #from efficient net example
    parser.add_argument('-a',
                        '--arch',
                        metavar='ARCH',
                        default='resnet18',
                        help='model architecture (default: resnet18)')

    parser.add_argument('--momentum',
                        default=0.5,
                        type=float,
                        metavar='M',
                        help='momentum')

    parser.add_argument('--image_size',
                        default=320,
                        type=int,
                        help='image size')

    parser.add_argument('--advprop',
                        default=False,
                        action='store_true',
                        help='use advprop or not')

    return parser