Ejemplo n.º 1
0
    def embed_sentence(sentence: str,
                       layers: str = "1",
                       use_scalar_mix: bool = False) -> Sentence:
        embeddings = TransformerXLEmbeddings(model=transfo_model,
                                             layers=layers,
                                             use_scalar_mix=use_scalar_mix)
        flair_sentence = Sentence(sentence)
        embeddings.embed(flair_sentence)

        return flair_sentence
Ejemplo n.º 2
0
 def embed_sentence(sentence: str,
                    layers: str = '1',
                    use_scalar_mix: bool = False) -> Sentence:
     embeddings = TransformerXLEmbeddings(
         pretrained_model_name_or_path=transfo_model,
         layers=layers,
         use_scalar_mix=use_scalar_mix)
     flair_sentence = Sentence(sentence)
     embeddings.embed(flair_sentence)
     return flair_sentence
Ejemplo n.º 3
0
cos = CosineSimilarity(dim=1, eps=1e-6)

dynamic = True
graph = False
doc_embeddings = []
scores = []

stacked_embeddings = DocumentPoolEmbeddings([
    #WordEmbeddings('en'),
    #WordEmbeddings('glove'),
    #WordEmbeddings('extvec'),#ELMoEmbeddings('original'),
    #BertEmbeddings('bert-base-cased'),
    #FlairEmbeddings('news-forward-fast'),
    #FlairEmbeddings('news-backward-fast'),
    #OpenAIGPTEmbeddings()
    TransformerXLEmbeddings()
])  #, mode='max')


def set_card():
    print("Input the Card Text, press Ctrl-D to end text entry")
    card = sys.stdin.read()  #input("Input the Card Text: ")
    card_tag = input(
        "Input the card_tag, or a -1 to summarize in-terms of the card itself: "
    )
    card = str(card)
    if str(
            card_tag
    ) == "-1":  #This will not work with large documents when bert is enabled
        card_tag = Sentence(str(card))
        tag_str = ""
Ejemplo n.º 4
0
def get_transformerxl(model_name):
    return TransformerXLEmbeddings(model_name)