Example #1
0
 def _load_training_state(self):
     training_state = load_training_state()
     if training_state:
         self.agent.scaler = training_state['scaler']
         self.last_step = training_state['last_step']
         self.ep_reward_queue = training_state['ep_reward_queue']
         print('training state loaded.')
Example #2
0
 def _load_training_state(self):
     training_state = load_training_state()
     if training_state:
         self.agent.replay_memory.load_memory(
             training_state['replay_memory'])
         self.last_step = training_state['last_step']
         self.ep_reward_queue = training_state['ep_reward_queue']
         print('training state loaded.')
Example #3
0
def finetuneparam(pre_imgnet, nllr, use_pre_ours=0, epocs=3):
    torch.manual_seed(123)
    param_val = 'p%dlr%d' % (pre_imgnet, nllr)
    if (use_pre_ours == 0):
        model = Frame2dResNet50(use_pretrain=pre_imgnet).to(device)
        optimizer = optim.Adam(model.parameters(), lr=10**(-nllr))
        torch.manual_seed(123)
    else:
        model = Frame2dResNet50().to(device)
        optimizer = optim.Adam(model.parameters(), lr=10**(-nllr))
        load_training_state(
            os.path.join(savedPath,
                         '2dResNet-' + param_val + '-%d.pth' % (use_pre_ours)),
            model, optimizer)
        torch.manual_seed(123)
    log_file = open('log-train/log-p%dlr%d.txt' % (pre_imgnet, nllr), 'a')
    train_save(epocs,
               model,
               optimizer,
               param_val,
               print_to=log_file,
               epoc_start=use_pre_ours)
    log_file.close()
Example #4
0
 def _load_training_state(self):
     training_state = load_training_state()
     if training_state:
         self.agent.scaler = training_state['scaler']
         print('training state loaded.')