Beispiel #1
0
def test_conversion_adaptive_model_ner():
    farm_model = Converter.convert_from_transformers("dslim/bert-base-NER", device="cpu")
    transformer_model = farm_model.convert_to_transformers()[0]
    transformer_model2 = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
    # compare weights
    for p1, p2 in zip(transformer_model.parameters(), transformer_model2.parameters()):
        assert (p1.data.ne(p2.data).sum() == 0)
Beispiel #2
0
def test_conversion_adaptive_model_classification():
    farm_model = Converter.convert_from_transformers("deepset/bert-base-german-cased-hatespeech-GermEval18Coarse", device="cpu")
    transformer_model = farm_model.convert_to_transformers()[0]
    transformer_model2 = AutoModelForSequenceClassification.from_pretrained("deepset/bert-base-german-cased-hatespeech-GermEval18Coarse")
    # compare weights
    for p1, p2 in zip(transformer_model.parameters(), transformer_model2.parameters()):
        assert (p1.data.ne(p2.data).sum() == 0)
Beispiel #3
0
def test_conversion_adaptive_model_lm():
    farm_model = Converter.convert_from_transformers("bert-base-german-cased", device="cpu")
    transformer_model = farm_model.convert_to_transformers()[0]
    transformer_model2 = AutoModelWithLMHead.from_pretrained("bert-base-german-cased")
    # compare weights
    for p1, p2 in zip(transformer_model.parameters(), transformer_model2.parameters()):
        assert (p1.data.ne(p2.data).sum() == 0)
Beispiel #4
0
def test_conversion_adaptive_model_qa():
    farm_model = Converter.convert_from_transformers("deepset/bert-base-cased-squad2", device="cpu")
    transformer_model = farm_model.convert_to_transformers()[0]
    transformer_model2 = AutoModelForQuestionAnswering.from_pretrained("deepset/bert-base-cased-squad2")
    # compare weights
    for p1, p2 in zip(transformer_model.parameters(), transformer_model2.parameters()):
        assert (p1.data.ne(p2.data).sum() == 0)
Beispiel #5
0
def convert_from_transformers():
    transformers_input_name = "deepset/bert-base-german-cased-hatespeech-GermEval18Coarse"
    farm_output_dir = Path(
        "../saved_models/farm-bert-base-german-cased-hatespeech-GermEval18Coarse"
    )

    # # CASE 1: MODEL
    # # Load model from transformers model hub (-> continue training / compare models / ...)
    model = Converter.convert_from_transformers(transformers_input_name,
                                                device="cpu")

    # # Alternative way to load from transformers model hub:
    #model = AdaptiveModel.convert_from_transformers(transformers_input_name, device="cpu", task_type="text_classification")
    # # ... continue as in the other examples e.g. to fine-tune this QA model on your own data
    #
    # # CASE 2: INFERENCER
    # # Load Inferencer from transformers, incl. model & tokenizer (-> just get predictions)
    nlp = Inferencer.load(transformers_input_name,
                          task_type="text_classification")
    #
    # # run predictions
    result = nlp.inference_from_dicts(dicts=[{"text": "Was ein scheiß Nazi!"}])
    pprint.pprint(result)
    nlp.close_multiprocessing_pool()

    # save it
    nlp.save(farm_output_dir)
def convert_from_transformers():
    # CASE 1: MODEL
    # Load model from transformers model hub (-> continue training / compare models / ...)
    model = Converter.convert_from_transformers("deepset/bert-large-uncased-whole-word-masking-squad2", device="cpu")
    #Alternative way to load from transformers model hub:
    #model = AdaptiveModel.convert_from_transformers("deepset/bert-large-uncased-whole-word-masking-squad2", device="cpu", task_type="question_answering")
    # ... continue as in the other examples e.g. to fine-tune this QA model on your own data

    # CASE 2: INFERENCER
    # Load Inferencer from transformers, incl. model & tokenizer (-> just get predictions)
    nlp = Inferencer.load("deepset/bert-large-uncased-whole-word-masking-squad2", task_type="question_answering")

    # run predictions
    QA_input = [{"questions": ["Why is model conversion important?"],
                 "text": "The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks."}]
    result = nlp.inference_from_dicts(dicts=QA_input)
    pprint.pprint(result)
    nlp.close_multiprocessing_pool()

    # save it
    farm_model_dir = Path("../saved_models/bert-english-qa-large")
    nlp.save(farm_model_dir)