예제 #1
0
encoder = Encoder()
# encoder = Encoder()
encoder.to(device)
encoder.eval()

for i, orient in enumerate(orients):
    print("Processing {} features".format(orient))
    data_path = os.path.join(experiment_dir, 'tractseg_data', subj_id,
                             'training_slices', orient)
    features_save_path = os.path.join(experiment_dir, 'tractseg_data', subj_id,
                                      'learned_features')

    dataset = Datasets.OrientationDataset(data_path,
                                          scale=True,
                                          normalize=False,
                                          bg_zero=True,
                                          noise_prob=noise_prob,
                                          alpha=1)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=10)

    encoder_path = "{}saved_models/{}_encoder_epoch_{}".format(
        experiment_dir, model_name, epoch)
    encoder.load_state_dict(torch.load(encoder_path))
    print("Loaded pretrained weights starting from epoch {} for {}".format(
        epoch, model_name))

    with torch.no_grad():
        if bool(noise_prob):
예제 #2
0
noise_prob = None
num_latent = 128

model = ConvVAE(num_latent, device)
model.to(device)
model.eval()

for i, orient in enumerate(orients):
    print("Processing {} features".format(orient))
    data_path = os.path.join(experiment_dir, 'tractseg_data', subj_id,
                             'training_slices', orient)
    features_save_path = os.path.join(experiment_dir, 'tractseg_data', subj_id,
                                      'learned_features')

    dataset = Datasets.OrientationDataset(data_path,
                                          scale=True,
                                          normalize=False)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=10)

    model_path = "{}saved_models/{}_epoch_{}".format(experiment_dir,
                                                     model_name, epoch)
    model.load_state_dict(torch.load(model_path))
    print("Loaded pretrained weights starting from epoch {} for {}".format(
        epoch, model_name))

    with torch.no_grad():

        orient_features = torch.zeros(feature_shapes[i])
예제 #3
0
model_name = 'MODEL1_DENOISING_P50'

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # device
deterministic = True  # reproducibility
seed = 0  # random seed for reproducibility
if deterministic:
    torch.manual_seed(seed)
torch.backends.cudnn.benchmark = (not deterministic)  # set False whenever input size varies
torch.backends.cudnn.deterministic = deterministic

batch_size = 2 ** 15
start_epoch = 10
channels = 10
noise_prob = None
trainset = Datasets.VoxelDataset(data_path,
                                 file_name='data.nii.gz',
                                 normalize=False,
                                 scale=True)
total_examples = len(trainset)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False, num_workers=6)
print("Total training examples: {}, Batch size: {}, Iters per epoch: {}".format(total_examples,
                                                                                batch_size,
                                                                                len(trainloader)))

# ------------------------------------------Model-----------------------------------------------------------------------
# model settings
encoder = Encoder(h=10)
decoder = Decoder(h=10)
# encoder = Encoder(288, 10, 10, device)
# decoder = Decoder(288, 10, 10)
encoder.to(device)
decoder.to(device)
예제 #4
0
파일: h_train.py 프로젝트: torayeff/DeepMRI
batch_size = 1

start_epoch = 0  # for loading pretrained weights
num_epochs = 10  # number of epochs to trains
checkpoint = 10  # save model every checkpoint epoch

masked_loss = False
denoising = False
# ------------------------------------------Data------------------------------------------------------------------------

# trainset = Datasets.VoxelDataset(data_path,
#                                  file_name='data.nii.gz',
#                                  normalize=False,
#                                  scale=True)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=10)
trainset = Datasets.OrientationDataset(data_path, scale=True, normalize=False, bg_zero=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=10)
total_examples = len(trainset)
print("Total training examples: {}, Batch size: {}, Iters per epoch: {}".format(total_examples,
                                                                                batch_size,
                                                                                len(trainloader)))
# ------------------------------------------Model-----------------------------------------------------------------------
for h in range(1, 101):
    model_name = "ConvModel1_prelu_h{}".format(h)

    # model settings
    # encoder = Encoder(h=h)
    # decoder = Decoder(h=h)
    encoder = Encoder(input_size=(145, 145), h=h)
    decoder = Decoder(h)
    encoder.to(device)