## Initializing r, theta
stateDict = torch.load(trained_encoder)['state_dict']
Dtheta = stateDict['l1.theta']
Drr = stateDict['l1.rr']

## Create the model
model = unfrozen_DyanC(Drr, Dtheta, T, PRE, gpu_id)
# model = DyanC(T, PRE, pretrained_dyan, gpu_id);
model.cuda(gpu_id)
model.train()

stateDict = torch.load(trained_encoder)['state_dict']
Dtheta = stateDict['l1.theta']
Drr = stateDict['l1.rr']
baseDyan = OFModel(Drr, Dtheta, T, PRE, gpu_id)
baseDyan.eval()

optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scheduler = lr_scheduler.MultiStepLR(
    optimizer, milestones=[50,
                           100], gamma=0.1)  # if Kitti: milestones=[100,150]

criterion = nn.CrossEntropyLoss()
# loss_mse = nn.MSELoss()
start_epoch = 1

## If want to continue training from a checkpoint
if (load_ckpt):
    loadedcheckpoint = torch.load(ckpt_file)
    start_epoch = loadedcheckpoint['epoch']
# Hyper Parameters
FRA = 4  # if Kitti: FRA = 9
PRE = 0  # represents predicting 1 frame
N_FRAME = FRA + PRE
T = FRA
numOfPixels = 240 * 320  # if Kitti: 128*160

gpu_id = 3
opticalflow_ckpt_file = '../preTrainedModel/UCFModel.pth'  # if Kitti: 'KittiModel.pth'
# classifier_ckpt_file = '/storage/truppr/models/experiment2/DYAN-ResNet50-PGOF42.pth' # './DyanC.pth'
classifier_ckpt_file = './DYAN-ResNet50-PGOF94.pth'

stateDict = torch.load(opticalflow_ckpt_file)['state_dict']
Dtheta = stateDict['l1.theta']
Drr = stateDict['l1.rr']
baseDyan = OFModel(Drr, Dtheta, T, PRE, gpu_id)
baseDyan.cuda(gpu_id)
baseDyan.eval()
'''
loadedcheckpoint = torch.load(opticalflow_ckpt_file)
stateDict = loadedcheckpoint['state_dict']
        
# load parameters
Dtheta = stateDict['l1.theta'] 
Drr    = stateDict['l1.rr']
dyanEncoder = OFModel(Drr, Dtheta, FRA,PRE,gpu_id)
dyanEncoder.cuda(gpu_id)
'''

## Load the classifier network
classifier = unfrozen_DyanC(Drr, Dtheta, T, PRE, gpu_id)
Exemple #3
0
## Initializing r, theta
stateDict = torch.load(trained_encoder)['state_dict']
Dtheta = stateDict['l1.theta']
Drr = stateDict['l1.rr']

## Create the model
model = unfrozen_DyanC(Drr, Dtheta, T, PRE, gpu_id)
# model = DyanC(T, PRE, pretrained_dyan, gpu_id);
model.cuda(gpu_id)
model.train()

stateDict = torch.load(trained_encoder)['state_dict']
Dtheta = stateDict['l1.theta']
Drr = stateDict['l1.rr']
baseDyan = OFModel(Drr, Dtheta, T, PRE, gpu_id)
baseDyan.eval()

optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scheduler = lr_scheduler.MultiStepLR(
    optimizer, milestones=[50,
                           100], gamma=0.1)  # if Kitti: milestones=[100,150]

criterion = nn.CrossEntropyLoss()
# loss_mse = nn.MSELoss()
start_epoch = 1

## If want to continue training from a checkpoint
if (load_ckpt):
    loadedcheckpoint = torch.load(ckpt_file)
    start_epoch = loadedcheckpoint['epoch']