Exemplo n.º 1
0
def main():
    parser = ArgumentParser()

    # add task level args
    parser = add_common_specific_args(parser)
    parser = add_tune_specific_args(parser)
    parser = add_task_specific_args(parser)

    # add model specific args
    parser = Model.add_model_specific_args(parser)
    parser = optimization.add_optimizer_specific_args(parser)
    parser = Trainer.add_argparse_args(parser)

    parser.set_defaults(min_epochs=1, max_epochs=10)
    parser.set_defaults(gradient_clip_val=1.0,
                        lr_layers_getter='get_layer_lrs_with_crf')
    args = parser.parse_args()

    if args.ltp_model is not None and args.resume_from_checkpoint is not None:
        deploy_model(args, args.ltp_version)
    elif args.tune:
        tune_train(args,
                   model_class=Model,
                   task_info=task_info,
                   build_method=build_method)
    else:
        common_train(args,
                     model_class=Model,
                     task_info=task_info,
                     build_method=build_method)
Exemplo n.º 2
0
def main():
    # 如果要输出 LTP master 分支可以使用的模型,传入 ltp_adapter 参数为输出文件夹路径,如 ltp_model
    parser = ArgumentParser()
    parser = add_task_specific_args(parser)
    parser = Model.add_model_specific_args(parser)
    parser = optimization.add_optimizer_specific_args(parser)
    parser = Trainer.add_argparse_args(parser)
    parser.set_defaults(min_epochs=1, max_epochs=10)
    parser.set_defaults(gradient_clip_val=1.0,
                        lr_layers_getter='get_layer_lrs_with_crf')
    args = parser.parse_args()

    if args.ltp_model is not None and args.resume_from_checkpoint is not None:
        deploy_model(args, args.ltp_version)
    elif args.build_ner_dataset:
        build_ner_distill_dataset(args)
    elif args.tune:
        tune_train(args,
                   model_class=Model,
                   task_info=task_info,
                   build_method=build_method)
    else:
        common_train(args,
                     model_class=Model,
                     task_info=task_info,
                     build_method=build_method)
Exemplo n.º 3
0
def main():
    # 如果要输出 LTP master 分支可以使用的模型,传入 ltp_adapter 参数为输出文件夹路径,如 ltp_model
    parser = ArgumentParser()

    # add task level args
    parser = add_common_specific_args(parser)
    parser = add_tune_specific_args(parser)
    parser = add_task_specific_args(parser)

    # add model specific args
    parser = Model.add_model_specific_args(parser)
    parser = optimization.add_optimizer_specific_args(parser)
    parser = Trainer.add_argparse_args(parser)

    parser.set_defaults(min_epochs=1, max_epochs=10)
    parser.set_defaults(gradient_clip_val=1.0, lr_layers_getter='get_layer_lrs_with_crf')
    args = parser.parse_args()

    if args.ltp_model is not None and args.resume_from_checkpoint is not None:
        deploy_model(args, args.ltp_version)
    elif args.build_ner_dataset:
        build_ner_distill_dataset(args)
    elif args.tune:
        from ltp.utils.common_train import tune
        tune_config = {
            # 3e-4 for Small, 1e-4 for Base, 5e-5 for Large
            "lr": tune.loguniform(args.tune_min_lr, args.tune_max_lr),

            # dataset split
            "tau": tune.choice([0.8, 0.9, 1.0]),

            # 梯度衰减
            "weight_decay": tune.choice([0.0, 0.01]),

            # 梯度裁剪
            "gradient_clip_val": tune.choice([1.0, 2.0, 3.0, 4.0, 5.0]),

            # lr scheduler
            "lr_scheduler": tune.choice([
                'linear_schedule_with_warmup',
                'polynomial_decay_schedule_with_warmup',
            ]),
        }
        tune_train(args, model_class=Model, task_info=task_info, build_method=build_method, tune_config=tune_config)
    else:
        common_train(args, model_class=Model, task_info=task_info, build_method=build_method)
Exemplo n.º 4
0
def main():
    parser = ArgumentParser()
    parser = add_task_specific_args(parser)
    parser = Model.add_model_specific_args(parser)
    parser = optimization.add_optimizer_specific_args(parser)
    parser = Trainer.add_argparse_args(parser)
    parser.set_defaults(gradient_clip_val=1.0)
    args = parser.parse_args()

    if args.ltp_model is not None and args.resume_from_checkpoint is not None:
        deploy_model(args, args.ltp_version)
    else:
        common_train(args,
                     metric=f'val_{task_info.metric_name}',
                     model_class=Model,
                     build_method=build_method,
                     task=task_info.task_name)
Exemplo n.º 5
0
def deploy_model_4_1(args, version):
    from argparse import Namespace

    fake_parser = ArgumentParser()
    fake_parser = Model.add_model_specific_args(fake_parser)
    model_args, _ = fake_parser.parse_known_args(namespace=args)

    transformer_config = AutoConfig.from_pretrained(model_args.transformer)
    model = Model.load_from_checkpoint(args.resume_from_checkpoint,
                                       strict=False,
                                       hparams=model_args,
                                       config=transformer_config)

    model_config = Namespace(**model.hparams)
    # LOAD VOCAB
    pos_labels = load_labels(args.pos_data_dir, 'vocabs', 'xpos.txt')
    ner_labels = load_labels(args.ner_data_dir, 'ner_labels.txt')
    srl_labels = load_labels(args.srl_data_dir, 'srl_labels.txt')
    dep_labels = load_labels(args.dep_data_dir, 'vocabs', 'deprel.txt')
    sdp_labels = load_labels(args.sdp_data_dir, 'vocabs', 'deps.txt')

    # MODEL CLIP
    if not len(pos_labels):
        del model.pos_classifier
        model_config.pos_num_labels = 0

    if not len(ner_labels):
        del model.ner_classifier
        model_config.ner_num_labels = 0

    if not len(srl_labels):
        del model.srl_classifier
        model_config.srl_num_labels = 0

    if not len(dep_labels):
        del model.dep_classifier
        model_config.dep_num_labels = 0

    if not len(sdp_labels):
        del model.sdp_classifier
        model_config.sdp_num_labels = 0

    model_state_dict = OrderedDict(model.state_dict().items())

    ltp_model = {
        'version': version,
        'model': model_state_dict,
        'model_config': model_config,
        'transformer_config': model.transformer.config.to_dict(),
        'seg': ['I-W', 'B-W'],
        'pos': pos_labels,
        'ner': ner_labels,
        'srl': srl_labels,
        'dep': dep_labels,
        'sdp': sdp_labels,
    }
    os.makedirs(args.ltp_model, exist_ok=True)
    torch.save(ltp_model, os.path.join(args.ltp_model, 'ltp.model'))

    from transformers import AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained(args.transformer)
    tokenizer.save_pretrained(args.ltp_model)
Exemplo n.º 6
0
    def __init__(self, path: str = 'small', device=None, **kwargs):
        if device is not None:
            if isinstance(device, torch.device):
                self.device = device
            elif isinstance(device, str):
                self.device = torch.device(device)
        elif torch.cuda.is_available():
            self.device = torch.device('cuda')
        else:
            self.device = torch.device('cpu')

        if path in model_map or is_remote_url(path) or os.path.isfile(path):
            proxies = kwargs.pop("proxies", None)
            cache_dir = kwargs.pop("cache_dir", LTP_CACHE)
            force_download = kwargs.pop("force_download", False)
            resume_download = kwargs.pop("resume_download", False)
            local_files_only = kwargs.pop("local_files_only", False)
            path = cached_path(model_map.get(path, path),
                               cache_dir=cache_dir,
                               force_download=force_download,
                               proxies=proxies,
                               resume_download=resume_download,
                               local_files_only=local_files_only,
                               extract_compressed_file=True)
        elif not os.path.isdir(path):
            raise FileNotFoundError()
        try:
            ckpt = torch.load(os.path.join(path, "ltp.model"),
                              map_location=self.device)
        except Exception as e:
            fake_import_pytorch_lightning()
            ckpt = torch.load(os.path.join(path, "ltp.model"),
                              map_location=self.device)

        model_patch_4_1_3(ckpt)

        self.cache_dir = path
        transformer_config = ckpt['transformer_config']
        transformer_config['torchscript'] = True
        config = AutoConfig.for_model(**transformer_config)

        parser = ArgumentParser()
        parser = Model.add_model_specific_args(parser)
        model_args = parser.parse_args(args=[], namespace=ckpt['model_config'])

        self.model = Model(model_args, config=config).to(self.device)
        self.model.load_state_dict(ckpt['model'], strict=False)
        self.model.eval()

        self.seg_vocab = ckpt.get('seg', [WORD_MIDDLE, WORD_START])
        self.seg_vocab_dict = {
            tag: idx
            for idx, tag in enumerate(self.seg_vocab)
        }
        self.pos_vocab = ckpt.get('pos', [])
        self.ner_vocab = ckpt.get('ner', [])
        self.dep_vocab = ckpt.get('dep', [])
        self.sdp_vocab = ckpt.get('sdp', [])
        self.srl_vocab = [
            re.sub(r'ARG(\d)', r'A\1', tag.lstrip('ARGM-'))
            for tag in ckpt.get('srl', [])
        ]
        self.tokenizer = AutoTokenizer.from_pretrained(
            path, config=self.model.transformer.config, use_fast=True)
        self.trie = Trie()
        self._model_version = ckpt.get('version', None)