print('load from pretrained checkpoint %s ...' % resume, flush=True) elif args.auto_resume: resume = auto_load_resume(Config.save_dir) print('load from %s ...' % resume, flush=True) else: raise Exception("no checkpoints to load") model_dict = model.state_dict() pretrained_dict = torch.load(resume) pretrained_dict = { k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict } model_dict.update(pretrained_dict) model.load_state_dict(model_dict) print('Set cache dir', flush=True) time = datetime.datetime.now() filename = '%s_%d%d%d_%s' % (args.discribe, time.month, time.day, time.hour, Config.dataset) save_dir = os.path.join(Config.save_dir, filename) if not os.path.exists(save_dir): os.makedirs(save_dir) model.cuda() model = nn.DataParallel(model) # optimizer prepare if Config.use_backbone: ignored_params = list(map(id, model.module.classifier.parameters()))
model = MainModel(Config) model_dict = model.state_dict() #for k, v in model_dict.items(): # print(k) # net_model/training_descibe_41419_CUB/weights_0_64099_0.6820_0.9446.pth resume = './net_model/training_descibe_5412_CUB/weights_0_66599_0.5753_0.8661.pth' ''' pretrained_dict=torch.load(resume) pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict} for k, v in pretrained_dict.items(): print(k) model_dict.update(pretrained_dict) ''' #model.load_state_dict(pretrained_dict['net_state_dict']) #load_state_dict(ckpt['net_state_dict']) model.load_state_dict(torch.load(resume)) model.cuda() example = torch.rand(1, 3, 448, 448).cuda() print(example.shape) onnx.export(model, example, "./dcl_yt.onnx", verbose=False, operator_export_type=OperatorExportTypes.ONNX) i_debug = 1 if 1 == i_debug: print('生成onnx文件成功!') sys.exit(0) model = nn.DataParallel(model)