Exemple #1
0
def test_dictionary_save_and_load():
    dictionary = Dictionary(add_unk=False)
    dictionary.add_item('class_1')
    dictionary.add_item('class_2')
    dictionary.add_item('class_3')
    file_path = 'dictionary.txt'
    dictionary.save(file_path)
    loaded_dictionary = dictionary.load_from_file(file_path)
    assert (len(dictionary) == len(loaded_dictionary))
    assert (len(dictionary.get_items()) == len(loaded_dictionary.get_items()))
    os.remove(file_path)
Exemple #2
0
    def from_corpus(cls, corpus: Corpus, grammar: SupertagGrammar,
                    parameters: ModelParameters):
        """ Construct an instance of the model using
            * supertags and pos tags from `grammar`, and
            * word embeddings (as specified in `parameters`) from `corpus`.
        """
        supertags = Dictionary(add_unk=False)
        for tag in grammar.tags:
            supertags.add_item(tag.pos())
        postags = Dictionary(add_unk=False)
        for tag in grammar.pos:
            postags.add_item(tag)

        rnn_droupout = parameters.lstm_dropout
        if rnn_droupout < 0:
            rnn_droupout = parameters.dropout

        sequence_tagger = SequenceMultiTagger(
            parameters.lstm_size,
            EmbeddingFactory(parameters, corpus), [supertags, postags],
            ["supertag", "pos"],
            use_rnn=(parameters.lstm_layers > 0),
            rnn_layers=parameters.lstm_layers,
            dropout=parameters.dropout,
            word_dropout=parameters.word_dropout,
            locked_dropout=parameters.locked_dropout,
            lstm_dropout=rnn_droupout,
            reproject_embeddings=False)

        return cls(sequence_tagger, grammar)
Exemple #3
0
def load_task(data_folder):
    X = {'train': [], 'test': []}
    y = {'train': [], 'test': []}
    tag_dictionary = Dictionary()
    tag_dictionary.add_item('<START>')
    tag_dictionary.add_item('<STOP>')

    for part in ('train', 'test'):
        dataset = load_file(data_folder, f'{part}.txt')

        for sentence in dataset.split('\n\n'):
            X_sentence = []
            y_sentence = []

            for tagged_token in sentence.split('\n'):
                if not tagged_token:
                    continue
                token, _, _, tag = re.split(' ', tagged_token)
                if not token.startswith("-DOCSTART-"):
                    X_sentence.append(token)
                    y_sentence.append(tag)
                    tag_dictionary.add_item(tag)

            if X_sentence:
                X[part].append(X_sentence)
                y[part].append(y_sentence)

    return X['train'], X['test'], y['train'], y['test'], tag_dictionary
Exemple #4
0
 def _determine_if_span_prediction_problem(self,
                                           dictionary: Dictionary) -> bool:
     for item in dictionary.get_items():
         if item.startswith("B-") or item.startswith(
                 "S-") or item.startswith("I-"):
             return True
     return False
Exemple #5
0
def create_corpus(args, load_dict_from_lm=False, return_back='both'):
    if not load_dict_from_lm:
        dictionary: Dictionary = Dictionary.load(
            os.path.join(args.corpus_path, args.mapfile))

    else:
        print("loading dictionary from finetune model")
        from flair.embeddings import FlairEmbeddings
        dictionary = FlairEmbeddings('he-forward').lm.dictionary

    language_model = LanguageModel(dictionary,
                                   args.is_forward_lm,
                                   hidden_size=args.hidden_size,
                                   nlayers=1)

    corpus = TextCorpus(args.corpus_path,
                        dictionary,
                        args.is_forward_lm,
                        character_level=True)
    if return_back == 'both':
        return language_model, corpus
    elif return_back == 'language_model':
        return language_model
    elif return_back == 'corpus':
        return corpus
    else:
        print('Specified what to return back')
Exemple #6
0
def train_LM(file_path, model_path, is_forward_lm=True):
    from flair.data import Dictionary
    from flair.models import LanguageModel
    from flair.trainers.language_model_trainer import LanguageModelTrainer, TextCorpus

    dictionary = Dictionary.load_from_file(file_path + 'mappings')

    # get your corpus, process forward and at the character level
    corpus = TextCorpus(file_path,
                        dictionary,
                        is_forward_lm,
                        character_level=True)

    # instantiate your language model, set hidden size and number of layers
    language_model = LanguageModel(dictionary,
                                   is_forward_lm,
                                   hidden_size=128,
                                   nlayers=1)

    # train your language model
    trainer = LanguageModelTrainer(language_model, corpus)

    trainer.train(model_path,
                  sequence_length=100,
                  mini_batch_size=32,
                  max_epochs=10)
Exemple #7
0
def test_train_resume_language_model_training(resources_path,
                                              results_base_path,
                                              tasks_base_path):
    dictionary = Dictionary.load(u'chars')
    language_model = LanguageModel(dictionary,
                                   is_forward_lm=True,
                                   hidden_size=128,
                                   nlayers=1)
    corpus = TextCorpus((resources_path / u'corpora/lorem_ipsum'),
                        dictionary,
                        language_model.is_forward_lm,
                        character_level=True)
    trainer = LanguageModelTrainer(language_model, corpus, test_mode=True)
    trainer.train(results_base_path,
                  sequence_length=10,
                  mini_batch_size=10,
                  max_epochs=2,
                  checkpoint=True)
    trainer = LanguageModelTrainer.load_from_checkpoint(
        (results_base_path / u'checkpoint.pt'), corpus)
    trainer.train(results_base_path,
                  sequence_length=10,
                  mini_batch_size=10,
                  max_epochs=2)
    shutil.rmtree(results_base_path)
Exemple #8
0
def test_training():
    # get default dictionary
    dictionary: Dictionary = Dictionary.load('chars')

    # init forward LM with 128 hidden states and 1 layer
    language_model: LanguageModel = LanguageModel(dictionary,
                                                  is_forward_lm=True,
                                                  hidden_size=128,
                                                  nlayers=1)

    # get the example corpus and process at character level in forward direction
    corpus: TextCorpus = TextCorpus('resources/corpora/lorem_ipsum',
                                    dictionary,
                                    language_model.is_forward_lm,
                                    character_level=True)

    # train the language model
    trainer: LanguageModelTrainer = LanguageModelTrainer(
        language_model, corpus)
    trainer.train('./results',
                  sequence_length=10,
                  mini_batch_size=10,
                  max_epochs=5)

    # use the character LM as embeddings to embed the example sentence 'I love Berlin'
    char_lm_embeddings = CharLMEmbeddings('./results/best-lm.pt')
    sentence = Sentence('I love Berlin')
    char_lm_embeddings.embed(sentence)
    print(sentence[1].embedding.size())

    # clean up results directory
    shutil.rmtree('./results', ignore_errors=True)
def test_train_language_model(results_base_path, resources_path):
    # get default dictionary
    dictionary: Dictionary = Dictionary.load('chars')

    # init forward LM with 128 hidden states and 1 layer
    language_model: LanguageModel = LanguageModel(dictionary, is_forward_lm=True, hidden_size=128, nlayers=1)

    # get the example corpus and process at character level in forward direction
    corpus: TextCorpus = TextCorpus(resources_path / 'corpora/lorem_ipsum',
                                    dictionary,
                                    language_model.is_forward_lm,
                                    character_level=True)

    # train the language model
    trainer: LanguageModelTrainer = LanguageModelTrainer(language_model, corpus, test_mode=True)
    trainer.train(results_base_path, sequence_length=10, mini_batch_size=10, max_epochs=2)

    # use the character LM as embeddings to embed the example sentence 'I love Berlin'
    char_lm_embeddings = FlairEmbeddings(str(results_base_path / 'best-lm.pt'))
    sentence = Sentence('I love Berlin')
    char_lm_embeddings.embed(sentence)

    text, likelihood = language_model.generate_text(number_of_characters=100)
    assert (text is not None)
    assert (len(text) >= 100)

    # clean up results directory
    shutil.rmtree(results_base_path, ignore_errors=True)
Exemple #10
0
    def __init__(self, params: Dict) -> None:
        """Train a Language Model from scratch. This model can then be used as Flair embeddings.

        Args:
            params (dict): training config.
        """
        self.checkpoint = params.get('checkpoint', True)
        self.sequence_length = params.get('seq_len', 250)
        self.mini_batch_size = params.get('batch_size', 100)
        self.learning_rate = params.get('lr', 20)
        self.patience = params.get('patience', 25)

        # forward LM predicts the next word, backward LM reads the sentence backwards and predicts the previous word.
        self.is_forward_lm = params.get('forward', True)

        self.corpus_dir = params.get('corpus_dir', '../')
        if not os.path.exists(self.corpus_dir):
            raise ValueError('Expected a corpus to train a language model.')

        # define corpus, dictionary and instantiate LM
        self.dictionary = Dictionary.load('chars')
        self.corpus = self._define_corpus()
        self.lm = self._define_model()

        self.save_dir = params.get('save_dir', '../')
Exemple #11
0
    def predict_zero_shot(
        self,
        sentences: Union[List[Sentence], Sentence],
        candidate_label_set: Union[List[str], Set[str], str],
        multi_label: bool = True,
    ):
        """
        Method to make zero shot predictions from the TARS model
        :param sentences: input sentence objects to classify
        :param candidate_label_set: set of candidate labels
        :param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
        """

        # check if candidate_label_set is empty
        if candidate_label_set is None or len(candidate_label_set) == 0:
            log.warning("Provided candidate_label_set is empty")
            return

        # make list if only one candidate label is passed
        if isinstance(candidate_label_set, str):
            candidate_label_set = {candidate_label_set}

        # create label dictionary
        label_dictionary = Dictionary(add_unk=False)
        for label in candidate_label_set:
            label_dictionary.add_item(label)

        # note current task
        existing_current_task = self._current_task

        # create a temporary task
        self.add_and_switch_to_new_task(
            task_name="ZeroShot",
            label_dictionary=label_dictionary,
            label_type="-".join(label_dictionary.get_items()),
            multi_label=multi_label,
        )

        try:
            # make zero shot predictions
            self.predict(sentences)
        finally:
            # switch to the pre-existing task
            self.switch_to_task(existing_current_task)
            self._drop_task("ZeroShot")

        return
Exemple #12
0
def convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:
    """
    Convert list of labels (strings) to a one hot list.
    :param label_list: list of labels
    :param label_dict: label dictionary
    :return: converted label list
    """
    return [[1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list]
Exemple #13
0
def test_dictionary_get_item_for_index():
    dictionary = Dictionary(add_unk=False)
    dictionary.add_item('class_1')
    dictionary.add_item('class_2')
    dictionary.add_item('class_3')
    item = dictionary.get_item_for_index(0)
    assert ('class_1' == item)
Exemple #14
0
def test_dictionary_get_idx_for_item():
    dictionary = Dictionary(add_unk=False)
    dictionary.add_item('class_1')
    dictionary.add_item('class_2')
    dictionary.add_item('class_3')
    idx = dictionary.get_idx_for_item('class_2')
    assert (1 == idx)
    def __init__(
        self,
        word_embeddings: flair.embeddings.TokenEmbeddings,
        label_dictionary: Dictionary,
        pooling_operation: str = "first&last",
        label_type: str = "nel",
        dropout: float = 0.5,
        skip_unk_probability: Optional[float] = None,
        **classifierargs,
    ):
        """
        Initializes an EntityLinker
        :param word_embeddings: embeddings used to embed the words/sentences
        :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>
        :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.
        E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates
        the embedding of the first and the embedding of the last word.
        :param label_type: name of the label you use.
        """

        super(EntityLinker, self).__init__(
            label_dictionary=label_dictionary,
            final_embedding_size=word_embeddings.embedding_length *
            2 if pooling_operation == "first&last" else
            word_embeddings.embedding_length,
            **classifierargs,
        )

        self.word_embeddings = word_embeddings
        self.pooling_operation = pooling_operation
        self._label_type = label_type
        self.skip_unk_probability = skip_unk_probability
        if self.skip_unk_probability:
            self.known_entities = label_dictionary.get_items()

        # ----- Dropout parameters -----
        # dropouts
        self.use_dropout: float = dropout
        if dropout > 0.0:
            self.dropout = torch.nn.Dropout(dropout)

        cases = {
            "average": self.emb_mean,
            "first": self.emb_first,
            "last": self.emb_last,
            "first&last": self.emb_firstAndLast,
        }

        if pooling_operation not in cases:
            raise KeyError(
                'pooling_operation has to be one of "average", "first", "last" or "first&last"'
            )

        self.aggregated_embedding = cases[pooling_operation]

        self.to(flair.device)
Exemple #16
0
def test_dictionary_get_item_for_index():
    dictionary: Dictionary = Dictionary(add_unk=False)

    dictionary.add_item("class_1")
    dictionary.add_item("class_2")
    dictionary.add_item("class_3")

    item = dictionary.get_item_for_index(0)

    assert "class_1" == item
Exemple #17
0
def test_dictionary_get_idx_for_item():
    dictionary: Dictionary = Dictionary(add_unk=False)

    dictionary.add_item("class_1")
    dictionary.add_item("class_2")
    dictionary.add_item("class_3")

    idx = dictionary.get_idx_for_item("class_2")

    assert 1 == idx
Exemple #18
0
def make_relations_tag_dictionary(corpus: Corpus,
                                  tag_type='dependency',
                                  special_tags=[]) -> Dictionary:

    tag_dictionary: Dictionary = Dictionary(add_unk=False)
    # for tag in special_tags:
    #     tag_dictionary.add_item(tag)
    for sentence in corpus.get_all_sentences():
        for token in sentence.tokens:
            tag_dictionary.add_item(token.get_tag(tag_type).value)
    return tag_dictionary
Exemple #19
0
def test_dictionary_get_items_without_unk():
    dictionary = Dictionary(add_unk=False)
    dictionary.add_item('class_1')
    dictionary.add_item('class_2')
    dictionary.add_item('class_3')
    items = dictionary.get_items()
    assert (3 == len(items))
    assert ('class_1' == items[0])
    assert ('class_2' == items[1])
    assert ('class_3' == items[2])
Exemple #20
0
def make_tag_dic(corpus, tag_type: str, use_w=False) -> Dictionary:
    # Make the tag dictionary
    tag_dictionary: Dictionary = Dictionary()
    if not use_w:
        tag_dictionary.add_item("O")
    for sentence in corpus.get_all_sentences():
        for token in sentence.tokens:
            token: Token = token
            tag_dictionary.add_item(token.get_tag(tag_type).value)
    tag_dictionary.add_item("<START>")
    tag_dictionary.add_item("<STOP>")
    return tag_dictionary
Exemple #21
0
def test_train_resume_language_model_training(resources_path,
                                              results_base_path,
                                              tasks_base_path):
    dictionary = Dictionary.load(u'chars')
    corpus = TextCorpus((resources_path / u'corpora/lorem_ipsum'),
                        dictionary,
                        forward=True,
                        character_level=True)
    assert (corpus.test is not None)
    assert (corpus.train is not None)
    assert (corpus.valid is not None)
    assert (len(corpus.train) == 2)
Exemple #22
0
def test_dictionary_get_items_without_unk():
    dictionary: Dictionary = Dictionary(add_unk=False)

    dictionary.add_item("class_1")
    dictionary.add_item("class_2")
    dictionary.add_item("class_3")

    items = dictionary.get_items()

    assert 3 == len(items)
    assert "class_1" == items[0]
    assert "class_2" == items[1]
    assert "class_3" == items[2]
Exemple #23
0
def test_transformers_keep_tokenizer_when_saving(results_base_path):
    embeddings = TransformerWordEmbeddings(
        "sentence-transformers/paraphrase-albert-small-v2")
    results_base_path.mkdir(exist_ok=True, parents=True)
    initial_tagger_path = results_base_path / "initial_tokenizer.pk"
    reloaded_tagger_path = results_base_path / "reloaded_tokenizer.pk"

    initial_tagger = SequenceTagger(embeddings, Dictionary(), "ner")

    initial_tagger.save(initial_tagger_path)
    reloaded_tagger = SequenceTagger.load(initial_tagger_path)

    reloaded_tagger.save(reloaded_tagger_path)
Exemple #24
0
def init(tasks_base_path) -> Tuple[TaggedCorpus, TextRegressor]:
    corpus = NLPTaskDataFetcher.load_corpus(NLPTask.REGRESSION,
                                            tasks_base_path)

    glove_embedding: WordEmbeddings = WordEmbeddings("glove")
    document_embeddings: DocumentRNNEmbeddings = DocumentRNNEmbeddings(
        [glove_embedding], 128, 1, False, 64, False, False)

    model = TextRegressor(document_embeddings, Dictionary(), False)

    trainer = RegressorTrainer(model, corpus)

    return corpus, model, trainer
Exemple #25
0
def test_dictionary_get_items_with_unk():
    dictionary = Dictionary()
    dictionary.add_item('class_1')
    dictionary.add_item('class_2')
    dictionary.add_item('class_3')
    items = dictionary.get_items()
    assert (4 == len(items))
    assert ('<unk>' == items[0])
    assert ('class_1' == items[1])
    assert ('class_2' == items[2])
    assert ('class_3' == items[3])
Exemple #26
0
    def __init__(
            self,
            task_name: str,
            label_dictionary: Dictionary,
            label_type: str,
            embeddings: str = 'bert-base-uncased',
            num_negative_labels_to_sample: int = 2,
            prefix: bool = True,
            **tagger_args,
    ):
        """
        Initializes a TextClassifier
        :param task_name: a string depicting the name of the task
        :param label_dictionary: dictionary of labels you want to predict
        :param embeddings: name of the pre-trained transformer model e.g.,
        'bert-base-uncased' etc
        :param num_negative_labels_to_sample: number of negative labels to sample for each
        positive labels against a sentence during training. Defaults to 2 negative
        labels for each positive label. The model would sample all the negative labels
        if None is passed. That slows down the training considerably.
        :param multi_label: auto-detected by default, but you can set this to True
        to force multi-label predictionor False to force single-label prediction
        :param multi_label_threshold: If multi-label you can set the threshold to make predictions
        :param beta: Parameter for F-beta score for evaluation and training annealing
        """
        super(TARSClassifier, self).__init__()

        from flair.embeddings import TransformerDocumentEmbeddings

        if not isinstance(embeddings, TransformerDocumentEmbeddings):
            embeddings = TransformerDocumentEmbeddings(model=embeddings,
                                                       fine_tune=True,
                                                       layers='-1',
                                                       layer_mean=False,
                                                       )

        # prepare TARS dictionary
        tars_dictionary = Dictionary(add_unk=False)
        tars_dictionary.add_item('False')
        tars_dictionary.add_item('True')

        # initialize a bare-bones sequence tagger
        self.tars_model = TextClassifier(document_embeddings=embeddings,
                                         label_dictionary=tars_dictionary,
                                         label_type=self.static_label_type,
                                         **tagger_args,
                                         )

        # transformer separator
        self.separator = str(self.tars_embeddings.tokenizer.sep_token)
        if self.tars_embeddings.tokenizer._bos_token:
            self.separator += str(self.tars_embeddings.tokenizer.bos_token)

        self.prefix = prefix
        self.num_negative_labels_to_sample = num_negative_labels_to_sample

        # Store task specific labels since TARS can handle multiple tasks
        self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
Exemple #27
0
def test_dictionary_get_items_with_unk():
    dictionary: Dictionary = Dictionary()

    dictionary.add_item("class_1")
    dictionary.add_item("class_2")
    dictionary.add_item("class_3")

    items = dictionary.get_items()

    assert 4 == len(items)
    assert "<unk>" == items[0]
    assert "class_1" == items[1]
    assert "class_2" == items[2]
    assert "class_3" == items[3]
def _get_tag_dictionary_no_prefix(tag_dictionary):
    candidate_tag_list = []
    for tag in tag_dictionary.idx2item:
        tag = tag.decode("utf-8")
        prefix, tag_no_prefix = _split_tag(tag)
        if prefix == "B" or prefix == "I":
            candidate_tag_list.append(tag_no_prefix)
    candidate_tag_list = _remove_not_unique_items_from_list(candidate_tag_list)

    tag_dictionary_no_prefix: Dictionary = Dictionary(add_unk=False)
    for tag in candidate_tag_list:
        tag_dictionary_no_prefix.add_item(tag)

    return tag_dictionary_no_prefix
Exemple #29
0
def test_convert_labels_to_one_hot():
    label_dict = Dictionary(add_unk=False)
    label_dict.add_item(u'class-1')
    label_dict.add_item(u'class-2')
    label_dict.add_item(u'class-3')
    one_hot = convert_labels_to_one_hot([[u'class-2']], label_dict)
    assert (one_hot[0][0] == 0)
    assert (one_hot[0][1] == 1)
    assert (one_hot[0][2] == 0)
def load_task(data_folder, task, tag_column, preprocess):
    X = {'train': [], 'test': []}
    y = {'train': [], 'test': []}
    tag_dictionary = Dictionary()
    tag_dictionary.add_item('<START>')
    tag_dictionary.add_item('<STOP>')

    for part in ('train', 'test'):
        #dataset = load_file(data_folder, task, f'{part}.txt')

        file_path = Path(f'{data_folder}/{task}/{part}.txt')
        print('Loading: ', file_path)

        corpus = ColumnDataset(
            path_to_column_file=file_path,
            column_name_map={
                0: 'text',
                tag_column: 'ner'
            },
            tag_to_bioes=None,
            encoding='utf8',
            comment_symbol=None,
            in_memory=True,
            document_separator_token=None,
        )

        for sent in corpus:
            tokens = [w.text for w in sent]
            if preprocess:
                X[part].append(
                    list(
                        zip(tokens,
                            [nltk.pos_tag([tok])[0][1] for tok in tokens])))
            else:
                X[part].append(tokens)

            labels = [w.get_tag('ner').value for w in sent]
            y[part].append(labels)

            for tag in labels:
                tag_dictionary.add_item(tag)

    print('Train size:', len(X['train']))
    print('Test size:', len(X['test']))

    return X['train'], X['test'], y['train'], y['test'], tag_dictionary