Esempio n. 1
0
 def _load_ckpt(self):
     if self.args.EXPER.resume == "load_voc":
         load_tf_weights(self.model, 'vocweights.pkl')
     else:  # iter or best
         ckptfile = torch.load(os.path.join(self.save_path, 'checkpoint-{}.pth'.format(self.args.EXPER.resume)))
         self.model.load_state_dict(ckptfile['state_dict'])
         #load_checkpoint(self.model,ckptfile)
         if not self.args.finetune and not self.args.do_test and not self.args.Prune.do_test:
             self.optimizer.load_state_dict(ckptfile['opti_dict'])
             self.global_epoch = ckptfile['epoch']
             self.global_iter = ckptfile['iter']
             self.best_mAP = ckptfile['metric']
     print("successfully load checkpoint {}".format(self.args.EXPER.resume))
Esempio n. 2
0
    def _load_ckpt(self):
        if self.args.EXPER.resume == "load_voc":
            load_tf_weights(self.model, 'vocweights.pkl')
        else:  # iter or best
            ckptfile = torch.load(
                os.path.join(
                    self.save_path,
                    'checkpoint-{}.pth'.format(self.args.EXPER.resume)))
            if next(iter(ckptfile['state_dict'])).startswith(
                    'module.'):  #if checkpoint comes from parallelized model
                self.model = nn.DataParallel(self.model)
            self.model.load_state_dict(ckptfile['state_dict'])

            #load_checkpoint(self.model,ckptfile)
            if not self.args.finetune:
                self.optimizer.load_state_dict(ckptfile['opti_dict'])
                self.global_epoch = ckptfile['epoch']
                self.global_iter = ckptfile['iter']
                self.best_mAP = ckptfile['metric']
        print("successfully load checkpoint {}".format(self.args.EXPER.resume))
 def _load_ckpt_quantile_adjusted(self, name):
     cfg2.merge_from_file("configs/strongerv3_asff.yaml")
     net = eval(cfg2.MODEL.modeltype)(cfg=cfg2.MODEL).cuda()
     if self.args.EXPER.resume == "load_voc":
         load_tf_weights(self.model, 'vocweights.pkl')
     else:  # iter or best
         ckptfile = torch.load(
             os.path.join('./checkpoints/strongerv3_asff/',
                          'checkpoint-{}.pth'.format(name)))
         state_dict = ckptfile['state_dict']
         new_state_dict = {}
         for k, v in state_dict.items():
             name = k  #remove 'module.' of DataParallel
             #name = "module."+k
             new_state_dict[name] = v
         net.load_state_dict(new_state_dict)
         backbone, headslarge, detlarge, mergelarge, headsmid, detmid, mergemid, headsmall, detsmall = net.get_info(
         )
         self.model.load_partial_state(backbone, headslarge, detlarge,
                                       mergelarge, headsmid, detmid,
                                       mergemid, headsmall, detsmall)
 def _load_ckpt(self):
     if self.args.EXPER.resume == "load_voc":
         load_tf_weights(self.model, 'vocweights.pkl')
     else:  # iter or best
         ckptfile = torch.load(
             os.path.join(
                 self.save_path,
                 'checkpoint-{}.pth'.format(self.args.EXPER.resume)))
         state_dict = ckptfile['state_dict']
         new_state_dict = {}
         for k, v in state_dict.items():
             #name=k #remove 'module.' of DataParallel
             name = "module." + k
             new_state_dict[name] = v
         self.model.load_state_dict(new_state_dict)
         #load_checkpoint(self.model,ckptfile)
         if not self.args.finetune and not self.args.do_test and not self.args.Prune.do_test:
             self.optimizer.load_state_dict(ckptfile['opti_dict'])
             self.global_epoch = ckptfile['epoch']
             self.global_iter = ckptfile['iter']
             self.best_mAP = ckptfile['metric']
     print("successfully load checkpoint {}".format(self.args.EXPER.resume))
 def _load_ckpt(self):
     if self.args.EXPER.resume == "load_voc":
         load_tf_weights(self.model, 'vocweights.pkl')
     else:  # iter or best
         ckptfile = torch.load(os.path.join(self.save_path, 'checkpoint-{}.pth'.format(self.args.EXPER.resume)))
         # take care of the distributed model
         if 'module.' in list(self.model.state_dict().keys())[0]:
             newdict = OrderedDict()
             for k, v in ckptfile['state_dict'].items():
                 if 'module.' not in k:
                     newdict['module.' + k] = v
                 else:
                     newdict[k] = v
             ckptfile['state_dict'] = newdict
         else:
             newdict = OrderedDict()
             for k, v in ckptfile['state_dict'].items():
                 if 'module.' in k:
                     newdict[k[7:]] = v
                 else:
                     newdict[k] = v
             ckptfile['state_dict'] = newdict
         # check the consistency
         for k,v in ckptfile['state_dict'].items():
             if k not in self.model.state_dict():
                 ckptfile['state_dict'].pop(k)
         for (k1,v1) in self.model.state_dict().items():
             if k1 not in ckptfile['state_dict'] or ckptfile['state_dict'][k1].shape!=self.model.state_dict()[k1].shape:
                 print("weight {} will be initialized from scratch".format(k1))
                 ckptfile['state_dict'][k1]=self.model.state_dict()[k1]
         # just ignore the bn_not_save parameters
         self.model.load_state_dict(ckptfile['state_dict'], strict=True)
         # load_checkpoint(self.model,ckptfile)
         if not self.args.finetune and not self.args.do_test and not self.args.Prune.do_test:
             self.optimizer.load_state_dict(ckptfile['opti_dict'])
             self.global_epoch = ckptfile['epoch']
             self.global_iter = ckptfile['iter']
             self.best_mAP = ckptfile['metric']
     print("successfully load checkpoint {}".format(self.args.EXPER.resume))
Esempio n. 6
0
 def _load_ckpt(self):
     if self.args.EXPER.resume == "load_voc":
         load_tf_weights(self.model, 'vocweights.pkl')
     else:  # iter or best
         ckptfile = torch.load(os.path.join(self.save_path, 'checkpoint-{}.pth'.format(self.args.EXPER.resume)))
         # take care of the distributed model
         if 'module.' in list(self.model.state_dict().keys())[0]:
             newdict=OrderedDict()
             for k,v in ckptfile['state_dict'].items():
                 if 'module.' not in k:
                     newdict['module.'+k]=v
                 else:
                     newdict[k]=v
             ckptfile['state_dict']=newdict
         # just ignore the bn_not_save parameters
         self.model.load_state_dict(ckptfile['state_dict'],strict=False)
         # load_checkpoint(self.model,ckptfile)
         if not self.args.finetune and not self.args.do_test and not self.args.Prune.do_test:
             self.optimizer.load_state_dict(ckptfile['opti_dict'])
             self.global_epoch = ckptfile['epoch']
             self.global_iter = ckptfile['iter']
             self.best_mAP = ckptfile['metric']
     print("successfully load checkpoint {}".format(self.args.EXPER.resume))