def test_bert_get_pretrained(model_name, ctx): assert len(list_pretrained_bert()) > 0 with tempfile.TemporaryDirectory() as root, ctx: cfg, tokenizer, backbone_params_path, mlm_params_path =\ get_pretrained_bert(model_name, load_backbone=True, load_mlm=True, root=root) assert cfg.MODEL.vocab_size == len(tokenizer.vocab) bert_model = BertModel.from_cfg(cfg) bert_model.load_parameters(backbone_params_path) bert_mlm_model = BertForMLM(cfg) if mlm_params_path is not None: bert_mlm_model.load_parameters(mlm_params_path) bert_mlm_model = BertForMLM(cfg) bert_mlm_model.backbone_model.load_parameters(backbone_params_path)
def test_list_pretrained_bert(): assert len(list_pretrained_bert()) > 0
assert_allclose(contextual_embedding.asnumpy(), mx.np.swapaxes(contextual_embedding_tn, 0, 1).asnumpy(), 1E-3, 1E-3) assert_allclose(pooled_out.asnumpy(), pooled_out_tn.asnumpy(), 1E-3, 1E-3) assert_allclose(nsp_score.asnumpy(), nsp_score_tn.asnumpy(), 1E-3, 1E-3) assert_allclose(mlm_score.asnumpy(), mlm_score_tn.asnumpy(), 1E-3, 1E-3) # Test BertModel FP16 device_type = ctx.device_type if device_type == 'gpu': verify_backbone_fp16(model_cls=BertModel, cfg=cfg, ctx=ctx, inputs=[inputs, token_types, valid_length]) @pytest.mark.slow @pytest.mark.remote_required @pytest.mark.parametrize('model_name', list_pretrained_bert()) def test_bert_get_pretrained(model_name, ctx): assert len(list_pretrained_bert()) > 0 with tempfile.TemporaryDirectory() as root, ctx: cfg, tokenizer, backbone_params_path, mlm_params_path =\ get_pretrained_bert(model_name, load_backbone=True, load_mlm=True, root=root) assert cfg.MODEL.vocab_size == len(tokenizer.vocab) bert_model = BertModel.from_cfg(cfg) bert_model.load_parameters(backbone_params_path) bert_mlm_model = BertForMLM(cfg) if mlm_params_path is not None: bert_mlm_model.load_parameters(mlm_params_path) bert_mlm_model = BertForMLM(cfg) bert_mlm_model.backbone_model.load_parameters(backbone_params_path)