예제 #1
0
def loadOpticalFlowModel(ckpt_file):
    loadedcheckpoint = torch.load(ckpt_file)
    stateDict = loadedcheckpoint['state_dict']

    # load parameters
    Dtheta = stateDict['l1.theta']
    Drr    = stateDict['l1.rr']
    model = OFModel(Drr, Dtheta, FRA,PRE,gpu_id)
    model.cuda(gpu_id)

    return model
예제 #2
0
def loadModel(ckpt_file):
    loadedcheckpoint = torch.load(ckpt_file)
    #model.load_state_dict(loadedcheckpoint['state_dict'])
    #optimizer.load_state_dict(loadedcheckpoint['optimizer'])
    stateDict = loadedcheckpoint['state_dict']

    # load parameters
    Dtheta = stateDict['l1.theta']
    Drr = stateDict['l1.rr']
    model = OFModel(Drr, Dtheta, T, PRE, lam, gpu_id)
    model.cuda(gpu_id)
    Drr = Variable(Drr.cuda(gpu_id))
    Dtheta = Variable(Dtheta.cuda(gpu_id))
    # dictionary = creatRealDictionary(N_FRAME,Drr,Dtheta, gpu_id)

    return model, Drr, Dtheta
예제 #3
0
    return torch.nn.functional.grid_sample(
        input=input,
        grid=(tensorGrid + tensorFlow).permute(0, 2, 3, 1),
        mode='bilinear',
        padding_mode='border')


## Create the model
'''modelOF = OFModel(Drr, Dtheta, T, PRE, lamOF, gpu_id)
modelOF.cuda(gpu_id)
optimizerOF = torch.optim.Adam(modelOF.parameters(), lr=LR)
schedulerOF = lr_scheduler.MultiStepLR(optimizerOF, milestones=[50, 130], gamma=0.5)'''

ofmodel, ofDrr, ofDtheta = loadModel(ckpt_file_OF)

modelrgb = OFModel(Drr, Dtheta, T + 1, PRE, lam, gpu_id)
modelrgb.cuda(gpu_id)
optimizerrgb = torch.optim.Adam(modelrgb.parameters(), lr=LR)
schedulerrgb = lr_scheduler.MultiStepLR(optimizerrgb,
                                        milestones=[50, 130],
                                        gamma=0.5)
loss_mse = nn.MSELoss()
start_epoch = 1

## If want to continue training from a checkpoint
if (load_ckpt):
    # loadedcheckpoint = torch.load(ckpt_file_OF)
    # start_epoch = loadedcheckpoint['epoch']
    # modelOF.load_state_dict(loadedcheckpoint['state_dict'])
    # optimizerOF.load_state_dict(loadedcheckpoint['optimizer'])
    loadedcheckpoint = torch.load(ckpt_file_rgb)
예제 #4
0
                            nfra=N_FRAME)
dataloader = DataLoader(trainingData,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        num_workers=1)

## Initializing r, theta
P, Pall = gridRing(N)
Drr = abs(P)
Drr = torch.from_numpy(Drr).float()
Dtheta = np.angle(P)
Dtheta = torch.from_numpy(Dtheta).float()
# What and where is gamma

## Create the time model
model = OFModel(Drr, Dtheta, T, PRE, lam, gpu_id)
model.cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scheduler = lr_scheduler.MultiStepLR(optimizer,
                                     milestones=[100, 150],
                                     gamma=0.1)

model1 = OFModel1(Drr, Dtheta, T, PRE, lambd1, gpu_id)
model1.cuda(gpu_id)
optimizer1 = torch.optim.Adam(model1.parameters(), lr=LR)
scheduler1 = lr_scheduler.MultiStepLR(optimizer1,
                                      milestones=[100, 150],
                                      gamma=0.1)

model2 = OFModel2(Drr, Dtheta, T, PRE, lambd2, gpu_id)
model2.cuda(gpu_id)
예제 #5
0
파일: train.py 프로젝트: sadjadasghari/DYAN
                            rootDir=rootDir)

dataloader = DataLoader(trainingData,
                        batch_size=BATCH_SIZE ,
                        shuffle=True, num_workers=1)

## Initializing r, theta
P,Pall = gridRing(N)
Drr = abs(P)
Drr = torch.from_numpy(Drr).float()
Dtheta = np.angle(P)
Dtheta = torch.from_numpy(Dtheta).float()
# What and where is gamma

## Create the time model
model_ti = OFModel(Drr, Dtheta, T, PRE, gpu_id)
model_ti.cuda(gpu_id)
optimizer = torch.optim.Adam(model_ti.parameters(), lr=LR)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1) # if Kitti: milestones=[100,150]
loss_mse = nn.MSELoss()
start_epoch = 1

## Create the spatial model
Encoder_sp = Encoder(Drr, Dtheta, W, gpu_id)
Encoder_sp.cuda(gpu_id) #parallelize it?
optimizer_sp = torch.optim.Adam(Encoder_sp.parameters(), lr=LR)
scheduler_sp = lr_scheduler.MultiStepLR(optimizer, milestones=[50,100], gamma=0.1) # Parameters?
loss_mse_sp = nn.MSELoss()

## If want to continue training from a checkpoint
if(load_ckpt):