예제 #1
0
def execute_query(query, yes_no_grammar, wh_grammar, database):
    query = query.translate(string.maketrans("", ""), string.punctuation)
    query_list = [i for i in query.lower().split()]
    yes_no = set([
        "was", "did", "is", "does", "were", "could", "do", "are", "have",
        "had", "should"
    ])
    if (query_list[0] in yes_no):
        cp = load_parser(yes_no_grammar, trace=3)
    else:
        cp = load_parser(wh_grammar)  #, trace=3)
    trees = list(cp.parse(query_list))
    answer = trees[0].label()['SEM']
    answer = [s for s in answer if s]
    q = ' '.join(answer)
    print(q)

    results = []
    rows = chat80.sql_query(database, q)
    for r in rows:
        results.append(r[0])
        print(r[0])

    if (len(results) == 0 and query_list[0] in yes_no):
        print "No"
예제 #2
0
def english_to_sql():
  nltk.data.show_cfg("grammars/book_grammars/sql0.fcfg")
  from nltk import load_parser
  cp = load_parser("grammars/book_grammars/sql0.fcfg", trace=3)
  query = "What cities are located in China"
  trees = cp.nbest_parse(query.split())
  answer = trees[0].node['SEM']
  q = " ".join(answer)
  print q
  from nltk.sem import chat80
  rows = chat80.sql_query('corpora/city_database/city.db', q)
  for r in rows:
    print r[0],
예제 #3
0
def english_to_sql():
    nltk.data.show_cfg("grammars/book_grammars/sql0.fcfg")
    from nltk import load_parser
    cp = load_parser("grammars/book_grammars/sql0.fcfg", trace=3)
    query = "What cities are located in China"
    trees = cp.nbest_parse(query.split())
    answer = trees[0].node['SEM']
    q = " ".join(answer)
    print q
    from nltk.sem import chat80
    rows = chat80.sql_query('corpora/city_database/city.db', q)
    for r in rows:
        print r[0],
예제 #4
0
 def sql_run(self):
     self.query = input("Enter the Natural Language: ")
     self.trees = list(self.parser.parse(self.query.split()))
     self.answer = self.trees[0].label()['SEM']
     self.answer = [s for s in self.answer if s]
     self.q = ' '.join(self.answer)
     print("\n the Sql query is as follows \n")
     print(self.q)
     print("\n Querry is running....")
     self.rows = chat80.sql_query('corpora/city_database/city.db', self.q)
     self.count = 0
     self.my_res = []
     for r in self.rows:
         self.my_res.append(r)
         self.count = self.count + 1
     if self.count is 0:
         print("no data found")
     else:
         for iter_var in range(self.count):
             temp_str = self.my_res[iter_var][0]
             print(temp_str)
         self.sum_er()
예제 #5
0
from nltk.data import show_cfg
#show_cfg('grammars/book_grammars/sql0.fcfg')
show_cfg('file:sql0.fcfg')

# The FCFG grammar uses feature structures with rules represented in []; there is a SEM feature which represents the semantics of the string; semantics is an SQL representation; 'which' corresponds to 'SELECT'; 'in' and 'are' don't contribute anything; the terminal expressions are combined with other rules; ? indicates a variable (an addition to Python); + concatenates strings;
#

# ## From strings to semantic representations

# In[2]:

from nltk import load_parser
#cp = load_parser('grammars/book_grammars/sql0.fcfg') # data from (Warren and Prereira, 1982)
cp = load_parser('file:sql0.fcfg')
query = 'What cities are located in China'
trees = list(cp.parse(query.split()))
answer = trees[0].label()['SEM']
answer = [s for s in answer if s]  # makes a tuple a list
q = ' '.join(answer)
print(q)

# ## Evaluating a semantic representation in a model

# In[3]:

from nltk.sem import chat80
rows = chat80.sql_query('corpora/city_database/city.db', q)
# rows = chat80.sql_query('./city.db', q)
for r in rows:
    print(r[0], end=" ")  # each row is a tuple from which we extract city
import nltk
nltk.data.show_cfg('grammars/book_grammars/sql1.fcfg')


from nltk import load_parser
test = load_parser('grammars/book_grammars/sql1.fcfg')
q=" What cities are in Greece"
t = list(test.parse(q.split()))
ans = t[0].label()['SEM']
ans = [s for s in ans if s]
q = ' '.join(ans)
print(q)
from nltk.sem import chat80
r = chat80.sql_query('corpora/city_database/city.db', q)
for p in r:
    print(p[0], end=" ")
예제 #7
0
    button.config(state=ACTIVE)
    submit_button.config(state=ACTIVE)
    prob_button.pack_forget()
    T.pack_forget()
    T2.pack_forget()
    var.set(1)


def prob():
    T2.pack(padx=20, pady=5, expand=True, fill='both')
    #T2.pack(side=TOP , fill = X ,padx=20 , pady = 5)


q = "SELECT City FROM city_table"
CC = []
for answer in a.sql_query('corpora/city_database/city.db', q):
    CC.append(("%-10s" % answer).strip())
q = "SELECT country FROM city_table"
for answer in a.sql_query('corpora/city_database/city.db', q):
    CC.append(("%-10s" % answer).strip())
file = open("IndianPeopleSorted.csv", encoding="utf8")
corpus = file.read().splitlines()
Li = []
for name in corpus:
    Li.extend(name.strip().split())
file.close()


class news_classifier():

    features = []