def load(self, ckpt_path): load_dict = { 'detector': self.detector, } if opt.resume: load_dict.update({ 'optimizer': self.optimizer, 'scheduler': self.scheduler, }) utils.color_print( 'Load checkpoint from %s, resume training.' % ckpt_path, 3) else: utils.color_print('Load checkpoint from %s.' % ckpt_path, 3) ckpt_info = load_checkpoint(load_dict, ckpt_path, map_location=opt.device) s = torch.load(ckpt_path) if opt.resume: self.optimizer.load_state_dict(s['optimizer']) epoch = ckpt_info.get('epoch', 0) return epoch
def load(self, ckpt_path): if ckpt_path[-2:] != 'pt': return 0 load_dict = { 'detector': self.detector, } if opt.resume or 'RESUME' in self.config.MISC: load_dict.update({ 'optimizer': self.optimizer, 'scheduler': self.scheduler, }) utils.color_print('Load checkpoint from %s, resume training.' % ckpt_path, 3) else: utils.color_print('Load checkpoint from %s.' % ckpt_path, 3) ckpt_info = load_checkpoint(load_dict, ckpt_path, map_location=opt.device) s = torch.load(ckpt_path, map_location='cpu') if opt.resume or 'RESUME' in self.config.MISC: self.optimizer.load_state_dict(s['optimizer']) self.scheduler.step() epoch = ckpt_info.get('epoch', 0) return epoch
def load(self, ckpt_path): load_dict = { 'cleaner': self.cleaner, } if opt.resume: load_dict.update({ 'optimizer': self.g_optimizer, 'scheduler': self.scheduler, }) utils.color_print('Load checkpoint from %s, resume training.' % ckpt_path, 3) else: utils.color_print('Load checkpoint from %s.' % ckpt_path, 3) ckpt_info = load_checkpoint(load_dict, ckpt_path, map_location=opt.device) epoch = ckpt_info.get('epoch', 0) return epoch