Пример #1
0
import re
import sys
import nltk
import time

from SPARQLWrapper import SPARQLWrapper, JSON
from multiprocessing.pool import ThreadPool
#import matplotlib.pyplot as plt
import statistics
from difflib import SequenceMatcher
import spacy
nlp = spacy.load('en')

dbpediaSPARQL="http://localhost:3030/ds/sparql" #"http://sparql.cs.upb.de:8891/sparql"
dbpediaSPARQL2="http://localhost:3030/ds/sparql" #"http://sparql.cs.upb.de:8891/sparql"
stopWordsList=stopwords.getStopWords()
comparsion_words=stopwords.getComparisonWords()

def get_verbs(question):
    verbs=[]
    text = nlp(question)
    for token in text:
        if token.pos_=="VERB":
            verbs.append(token.text)
    return verbs
    

def split_base_on_verb(combinations,question):
    newCombinations=[]
    verbs=get_verbs(question)
    flag=False
Пример #2
0
import spacy
import time
import statistics

from src import stopwords as wiki_stopwords
from Elastic import searchIndex as wiki_search_elastic
#from falcon2.evaluation import evaluation as wiki_evaluation
#from evaluateFalcon2 import read_dataset
from SPARQLWrapper import SPARQLWrapper, JSON, POST
from multiprocessing.pool import ThreadPool
from difflib import SequenceMatcher

nlp = spacy.load('en_core_web_sm')
wikidataSPARQL = "http://node3.research.tib.eu:4010/sparql"

stopWordsList = wiki_stopwords.getStopWords()
comparsion_words = wiki_stopwords.getComparisonWords()
evaluation = False


def get_verbs(question):
    verbs = []
    text = nlp(question)
    for token in text:
        if token.pos_ == "VERB":
            verbs.append(token.text)
    return verbs


def split_base_on_verb(combinations, question):
    newCombinations = []