示例#1
0
def export_model_0_3_1(checkpoint_path, export_model_name, inputsize=[1, 3, 112, 112], combine_conv_bn=False, half=False):

    device = f"cuda:0" if torch.cuda.is_available() else "cpu"
    pytorch_version_to_0_3_1()
    check_point = torch.load(checkpoint_path, map_location=device)
    model = init_backbone(name="MobileFaceNet", input_size=[112, 112])
    if combine_conv_bn:
        print("combine conv and bn...")
        fuse_module(model)
    state_dict = check_point['backbone']
    model = model.cuda() if device == f"cuda:0" else model
    mapped_state_dict = OrderedDict()
    for name, module in model._modules.items():
        recursion_change_bn1(module)
    for key, value in state_dict.items():
        # print(key)
        mapped_key = key
        mapped_state_dict[mapped_key] = value
        if 'num_batches_tracked' in key:
            del mapped_state_dict[key]
    model.load_state_dict(mapped_state_dict)
    if half:
        print("convert to fp16...")
        model = model.half()
    model.eval()
    # for key, value in model.state_dict().items():
    #     print(key)
    dummy_input = torch.autograd.Variable(torch.randn(inputsize))
    dummy_input = dummy_input.cuda() if device == f"cuda:0" else dummy_input
    torch.onnx.export(model=model, args=dummy_input, f=export_model_name, verbose=True)
示例#2
0
def export_jit(checkpoint_path, export_model_name, combine_conv_bn=False, half=False):
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    check_point = torch.load(checkpoint_path, map_location=device)
    state_dict = check_point['backbone']
    model = init_backbone(name="MobileFaceNet", input_size=[112, 112])
    if combine_conv_bn:
        print("combine conv and bn...")
        fuse_module(model)
    model = model.cuda() if device == "cuda:0" else model
    model.load_state_dict(state_dict)
    if half:
        print("convert to fp16...")
        model = model.half()
    pth_to_jit(model, export_model_name, device, half=half)
示例#3
0
def export_feature(checkpoint_path, export_model_name, combine_conv_bn=False, half=False):
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    check_point = torch.load(checkpoint_path, map_location=device)
    state_dict = check_point['net']
    model = init_backbone(name="MobileFaceNet", input_size=[112, 112])
    if combine_conv_bn:
        print("combine conv and bn...")
        fuse_module(model)
    model = model.cuda() if device == "cuda:0" else model
    model.load_state_dict(state_dict)
    del model.classifier
    if half:
        print("convert to fp16...")
        model = model.half()
    print("export feature layers...")
    torch.save(model.state_dict(), export_model_name)
示例#4
0
def export_model(checkpoint_path, export_model_name, inputsize=[1, 3, 112, 112], combine_conv_bn=False, half=False):
    device = f"cuda:0" if torch.cuda.is_available() else "cpu"
    check_point = torch.load(checkpoint_path, map_location=device)
    state_dict = check_point['backbone']
    model = init_backbone(name="MobileFaceNet", input_size=[112, 112])
    if combine_conv_bn:
        print("combine conv and bn...")
        fuse_module(model)
    model = model.to(device) if device == f"cuda:0" else model
    model.load_state_dict(state_dict)
    model.eval()
    # for key, value in state_dict.items():
    #     print(key)
    dummy_input = torch.randn(inputsize).to(device)
    if half:
        print("convert to fp16...")
        model = model.half()
        dummy_input = dummy_input.half()
    # torch.onnx.export(model=model, args=dummy_input, f=export_model_name, verbose=True)
    torch.onnx.export(model=model, args=dummy_input, f=export_model_name, verbose=True, input_names=['image'],
                      output_names=['outTensor'], opset_version=11)  # 0.4.0以上支持更改输入输出层名称
示例#5
0
    def load_checkpoint(self, check_point, finetune=False, pretrained=False):
        check_point = torch.load(check_point)
        if pretrained:
            self.backbone.load_state_dict(check_point)
            return
        if finetune:
            # 导入特征提取部分网络参数
            mapped_state_dict = self.backbone.state_dict()
            for key, value in check_point['backbone'].items():
                mapped_state_dict[key] = value
            self.backbone.load_state_dict(mapped_state_dict)
            # 导入特征提取部分优化子参数
            optimizer_state_dict = self.optimizer.state_dict()
            param_len = len(optimizer_state_dict['param_groups'][0]['params'])
            for index in range(param_len):
                optimizer_state_dict['state'].update({
                    optimizer_state_dict['param_groups'][0]['params'][index]:
                        check_point['optimizer']['state'].get(
                            check_point['optimizer']['param_groups'][0]['params'][index])})
            self.optimizer.load_state_dict(optimizer_state_dict)
        else:
            self.lowest_train_loss = check_point['loss']
            self.epoch = check_point['epoch']
            if self.epoch > 150:
                fuse_module(self.backbone)
                fuse_module(self.head)
            print("lowest_train_loss: ", self.lowest_train_loss)
            mapped_state_dict = self.backbone.state_dict()
            for key, value in check_point['backbone'].items():
                mapped_state_dict[key] = value
            self.backbone.load_state_dict(mapped_state_dict)

            mapped_state_dict = self.head.state_dict()
            for key, value in check_point['head'].items():
                mapped_state_dict[key] = value
            self.head.load_state_dict(mapped_state_dict)
            self.optimizer.load_state_dict(check_point['optimizer'])
示例#6
0
 def train(self, epoches=10, save_flag=True, finetune=False):
     if len(self.gpu_list) > 1:
         self.backbone = torch.nn.DataParallel(self.backbone, device_ids=self.gpu_list)
         self.head = torch.nn.DataParallel(self.head, device_ids=self.gpu_list)
         cudnn.benchmark = True
     # scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=20, gamma=0.1, last_epoch=self.epoch)
     # scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[30, 60, 100],
     #                                                  gamma=0.1, last_epoch=self.epoch)
     while self.epoch < epoches:
         print("Epoch: ", self.epoch)
         self.adjust_lr_exp(self.optimizer, self.epoch + 1, epoches, int(finetune) * 10 + 10)
         if self.epoch % 10 == 0:
             print(self.optimizer)
         self.reset_meter()
         if finetune and self.epoch < 10:
             self.finetune_model()
         else:
             self.backbone.train()
             self.head.train()
         if self.epoch == self.combine_conv_bn_epoch:
             # 冻结BN参数更新
             # self.model.apply(self.set_bn_eval)
             # 融合conv+bn
             fuse_module(self.backbone)
             fuse_module(self.head)
         self.train_epoch()
         # scheduler.step()
         if (self.epoch + 1) % 10 == 0:
             self.eval()
         if save_flag:
             self.save_model()
             self.save_model(False, False)
         self.epoch += 1
     torch.cuda.empty_cache()
     self.writer.close()
     print("Finished training.")