예제 #1
0
        grid=(tensorGrid + tensorFlow).permute(0, 2, 3, 1),
        mode='bilinear',
        padding_mode='border')


## Create the model
'''modelOF = OFModel(Drr, Dtheta, T, PRE, lamOF, gpu_id)
modelOF.cuda(gpu_id)
optimizerOF = torch.optim.Adam(modelOF.parameters(), lr=LR)
schedulerOF = lr_scheduler.MultiStepLR(optimizerOF, milestones=[50, 130], gamma=0.5)'''

ofmodel, ofDrr, ofDtheta = loadModel(ckpt_file_OF)

modelrgb = OFModel(Drr, Dtheta, T + 1, PRE, lam, gpu_id)
modelrgb.cuda(gpu_id)
optimizerrgb = torch.optim.Adam(modelrgb.parameters(), lr=LR)
schedulerrgb = lr_scheduler.MultiStepLR(optimizerrgb,
                                        milestones=[50, 130],
                                        gamma=0.5)
loss_mse = nn.MSELoss()
start_epoch = 1

## If want to continue training from a checkpoint
if (load_ckpt):
    # loadedcheckpoint = torch.load(ckpt_file_OF)
    # start_epoch = loadedcheckpoint['epoch']
    # modelOF.load_state_dict(loadedcheckpoint['state_dict'])
    # optimizerOF.load_state_dict(loadedcheckpoint['optimizer'])
    loadedcheckpoint = torch.load(ckpt_file_rgb)
    start_epoch = loadedcheckpoint['epoch']  # Cuidao
    modelrgb.load_state_dict(loadedcheckpoint['state_dict'])
예제 #2
0
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        num_workers=1)

## Initializing r, theta
P, Pall = gridRing(N)
Drr = abs(P)
Drr = torch.from_numpy(Drr).float()
Dtheta = np.angle(P)
Dtheta = torch.from_numpy(Dtheta).float()
# What and where is gamma

## Create the time model
model = OFModel(Drr, Dtheta, T, PRE, lam, gpu_id)
model.cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scheduler = lr_scheduler.MultiStepLR(optimizer,
                                     milestones=[100, 150],
                                     gamma=0.1)

model1 = OFModel1(Drr, Dtheta, T, PRE, lambd1, gpu_id)
model1.cuda(gpu_id)
optimizer1 = torch.optim.Adam(model1.parameters(), lr=LR)
scheduler1 = lr_scheduler.MultiStepLR(optimizer1,
                                      milestones=[100, 150],
                                      gamma=0.1)

model2 = OFModel2(Drr, Dtheta, T, PRE, lambd2, gpu_id)
model2.cuda(gpu_id)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=LR)
scheduler2 = lr_scheduler.MultiStepLR(optimizer2,