def update_on_priv_msg(self, data):
     if data['message'].find('.w ') == -1:
         return
     i18n_server = i18n()
     w = wikipedia.set_lang(i18n_server.get_text('wiki_lang'))
     q = data['message'].split(' ')
     query = ''
     for word in q:
         if word.strip() != '.w':
             query += word + ' '
     w = wikipedia.search(query)
     if w.__len__() == 0:
         Connection.singleton().send_back(data['nick'] + ', ' + i18n_server.get_text('wiki_fail'), data)
         return
     try:
         page = wikipedia.WikipediaPage(w.pop(0))
     except wikipedia.DisambiguationError as error:
         print('disambiguation page')
         page = wikipedia.WikipediaPage(error.args[1][0])
     Connection.singleton().send_back(data['nick'] + ' ' + page.url, data)
     index = 51 + page.summary[50:230].find('. ')
     if index == -1 or index > 230:
         Connection.singleton().send_back(page.summary[0:230], data)
     else:
         Connection.singleton().send_back(page.summary[0:index], data)
Exemple #2
0
 def update_on_priv_msg(self, data):
     if data['message'].find('.w ') == -1:
         return
     i18n_server = i18n()
     w = wikipedia.set_lang(i18n_server.get_text('wiki_lang'))
     q = data['message'].split(' ')
     query = ''
     for word in q:
         if word.strip() != '.w':
             query += word + ' '
     w = wikipedia.search(query)
     if w.__len__() == 0:
         Connection.singleton().send_channel(data['nick'] + ', ' + i18n_server.get_text('wiki_fail'))
         return
     page = wikipedia.WikipediaPage(w.pop(0))
     Connection.singleton().send_channel(data['nick'] + ' ' + page.url)
     Connection.singleton().send_channel(page.summary[0:230])
def wiki(bot, event, *args):
    """
    **Wikipedia:**
    Usage: /wiki <keywords to search for> <optional: sentences to display [defaults to 3]>
    Purpose: Get summary from Wikipedia on keywords.
    """
    from wikipedia import wikipedia, PageError, DisambiguationError

    def summary(self, sentences=3):
        if not getattr(self, '_summary', False):
            query_params = {
                'prop': 'extracts',
                'explaintext': '',
                'exintro': '',
            }
        query_params['exsentences'] = sentences
        if not getattr(self, 'title', None) is None:
            query_params['titles'] = self.title
        else:
            query_params['pageids'] = self.pageid

        request = wikipedia._wiki_request(query_params)
        self._summary = request['query']['pages'][self.pageid]['extract']

        return self._summary

    wikipedia.WikipediaPage.summary = summary
    try:
        sentences = 3
        try:
            if args[-1].isdigit():
                sentences = args[-1]
                args = args[:-1]
            page = wikipedia.page(' '.join(args))
        except DisambiguationError as e:
            page = wikipedia.page(wikipedia.search(e.options[0], results=1)[0])
        segments = [
            hangups.ChatMessageSegment(page.title, hangups.SegmentType.LINK, is_bold=True, link_target=page.url),
            hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK),
            hangups.ChatMessageSegment(page.summary(sentences=sentences))]

        bot.send_message_segments(event.conv, segments)
    except PageError:
        bot.send_message(event.conv, "Couldn't find \"{}\". Try something else.".format(' '.join(args)))
 def test_offset(self):
   """Test paginating a request results."""
   self.assertEqual(wikipedia.search("Porsche", results=1, page=2), mock_data['data']["porsche.search.paginated"])
Exemple #5
0
 def test_limit(self):
     """Test limiting a request results."""
     self.assertEqual(wikipedia.search("Porsche", results=3), mock_data['data']["porsche.search"])
     self.assertEqual(_wiki_request.calls["{'list': 'search', 'srprop': '', 'srlimit': 3, 'limit': 3, 'srsearch': 'Porsche'}"], 1)
Exemple #6
0
 def test_search(self):
     """Test parsing a Wikipedia request result."""
     self.assertEqual(wikipedia.search("Barack Obama"), mock_data['data']["barack.search"])
     self.assertEqual(_wiki_request.calls["{'list': 'search', 'srprop': '', 'srlimit': 10, 'limit': 10, 'srsearch': 'Barack Obama'}"], 1)
Exemple #7
0
def test():
    form = LoginForm()
    if form.validate_on_submit():
        flash(form.openid.data , 'Question')
        text = form.openid.data.lower()
        data = form.openid.data.lower()
        data1 = form.openid.data
        text = text.split()
        negator = ['not', 'never', 'not possible', 'does not', 'abort', 'neither', 'nor', 'no', 'negative', 'negate']
        assertor = ['may be', 'can be', 'not sure', 'might', 'may']
        preposition = ['have', 'is', 'are', 'about', 'above', 'across', 'after', 'against', 'along', 'among', 'around', 'at', 'before', 'behind', 'below', 'beneath', 'beside', 'between', 'by', 'down', 'during', 'except', 'for', 'from', 'front', 'inside', 'instead', 'into', 'like', 'near', 'of', 'off', 'on', 'onto', 'top', 'out', 'outside', 'over', 'past', 'since', 'through', 'to', 'toward', 'under', 'underneath', 'until', 'up', 'upon', 'with', 'within', 'without']
        wh = ['why', 'what', 'how', 'Who', 'whoever', 'whom', 'whomever', 'whose', 'which']
        pronoun = ['i', 'me', 'you', 'she', 'her', 'he', 'him', 'it', 'we', 'us', 'you', 'they', 'them', 'my', 'mine', 'your', 'yours', 'hers', 'his', 'its', 'yours', 'ours', 'theirs', 'myself', 'yourself', 'himself', 'herself', 'itself', 'all', 'another', 'any', 'anybody', 'anyone', 'anything', 'both', 'each', 'either', 'everybody', 'everyone', 'everything', 'few', 'many', 'neither', 'nobody', 'none', 'nothing', 'one', 'several', 'some', 'somebody', 'someone', 'something', 'this', 'that', 'these', 'those']
        # Removing Wh Question
        wh_q=''
        for ser in text:
            inflag = 0
            for w in wh:
                if w == ser:
                    inflag = 1
            if inflag == 0:
                wh_q = wh_q + ser + ' '


        # Removing Prepostion
        wh_q = wh_q.split()
        prep_q = ''
        for ser in wh_q:
            inflag = 0
            for prep in preposition:
                if ser == prep:
                    inflag = 1
            if inflag == 0:
                prep_q = prep_q + ser + ' '


        # Removing Pronoun
        prep_q = prep_q.split()
        pro_q = ''
        for ser in prep_q:
            inflag = 0
            for pro in pronoun:
                if ser == pro:
                    inflag = 1
            if inflag == 0:
                pro_q = pro_q + ser + ' '

        text = pro_q
        text = text.split()
        data = pro_q.strip()
        flag = 0
        answer = 0
        wikiflag = 0
        ans = 0

        data = ''
        asser = 0
        nege = 0
        posi = 0
        #Assertive Section
        for ser in text:
            inflag = 0
            for ass in assertor:
                if ser == ass and flag == 0 or data.find(ass) != -1 and flag == 0:
                    inflag = 1
                    asser = 1
                    flash('Assertive', 'Answer')
                    flag=1
            if inflag == 0:
                data = data + ser + ' '
        if asser == 1:
            data = data.strip()
            abc = models.Assertive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Negative Section
        if asser == 0:
            data = '' 
        for ser in text:
            inflag = 0           
            for neg in negator:
                if ser == neg and flag == 0 or data.find(neg) != -1 and flag == 0:
                    inflag = 1
                    nege = 1
                    flash('Negative', 'Answer')
                    flag = 1
            if inflag == 0:
                data = data + ser + ' '
        if nege == 1:
            data = data.strip()
            abc = models.Negative.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Postive Section

        if flag == 0:
            data = form.openid.data.lower()
            flash('Positive', 'Answer')
            abc = models.Positive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Wiki Section
        ans = 0
        if wikiflag == 1:
            abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        finalans=ny1
                        flash(ny1, 'Answer')
                        ny2 = wikipedia.page(data1)
                        flash('Source: '+ ny2.url, 'Answer')
                        #u = models.Wikipedia(question=data, answer=ny1)
                        #db.session.add(u)
                        #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer', 'Answer')
                        finalans = 'Your question is either out of scope of very trival for me to answer'
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')
        display = '\n'
        s = models.Chats.query.all()
        for chat in reversed(s):
            flash('Question: ' + chat.question, 'Display')
            flash('Answer: ' + chat.answer , 'Display')
            flash('.', 'Display')
        u = models.Chats(question=data1, answer=finalans)
        db.session.add(u)
        db.session.commit() 

        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)
 def test_suggestion(self):
   """Test getting a suggestion as well as search results."""
   search, suggestion = wikipedia.search("hallelulejah", suggestion=True)
   self.assertEqual(search, [])
   self.assertEqual(suggestion, u'hallelujah')
Exemple #9
0
 def test_limit(self):
     """Test limiting a request results."""
     self.assertEqual(wikipedia.search("Porsche", results=3),
                      mock_data["data"]["porsche.search"])
Exemple #10
0
 def test_search(self):
     """Test parsing a Wikipedia request result."""
     self.assertEqual(wikipedia.search("Barack Obama"),
                      mock_data["data"]["barack.search"])
def get_ticker(company_name, search_for_parent=True, sources=None):
    if not sources:
        sources = ["bing", "wikipedia"]

    ticker = None

    try:

        if "bing" in sources:
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
            }
            url = "https://www.bing.com/search?q=" + quote_plus(company_name +
                                                                " stock price")
            req = urllib.request.Request(url, headers=headers)
            page = urllib.request.urlopen(req)
            soup = BeautifulSoup(page, "html.parser")
            markup = str(soup.find_all("div", {"class": "fin_metadata"}))[1:-1]
            text = BeautifulSoup(markup, "html.parser").get_text()
            ticker_start = 0
            ticker_end = 0
            for i in range(len(text)):
                if text[i] == ":":
                    ticker_start = i + 2
                    break

            for i in range(len(text)):
                if text[i] == "·":
                    ticker_end = i - 1
                    break

            if not (ticker_start == 0 and ticker_end == 0):
                ticker = text[ticker_start:ticker_end]
        if not is_valid_ticker(ticker):
            ticker = None
    except:
        pass

    try:
        if "bing" in sources and ticker is None:
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
            }
            url = "https://www.bing.com/search?q=" + quote_plus(company_name)
            req = urllib.request.Request(url, headers=headers)
            page = urllib.request.urlopen(req)
            soup = BeautifulSoup(page, "html.parser")
            markup = str(soup.find_all("div", {"class": "fin_metadata"}))[1:-1]
            text = BeautifulSoup(markup, "html.parser").get_text()
            ticker_start = 0
            ticker_end = 0
            for i in range(len(text)):
                if text[i] == ":":
                    ticker_start = i + 2
                    break

            for i in range(len(text)):
                if text[i] == "·":
                    ticker_end = i - 1
                    break

            if not (ticker_start == 0 and ticker_end == 0):
                ticker = text[ticker_start:ticker_end]

        if not is_valid_ticker(ticker):
            ticker = None
    except:
        pass

    try:
        if "wikipedia" in sources and ticker is None:
            url = "https://en.wikipedia.org/wiki/" + "_".join(
                company_name.split(" "))
            req = urllib.request.Request(url)
            try:
                page = urllib.request.urlopen(req)
                # Get info box > Get traded as > Get the ticker
                soup = BeautifulSoup(page, "html.parser")
                infobox = BeautifulSoup(
                    str(soup.find_all("table",
                                      {"class": "infobox vcard"}))[1:-1],
                    "html.parser")
                row_re = re.compile(r"Traded")
                row = infobox.find(text=row_re)
                if row is not None:
                    row = str(row.parent.parent.parent)
                    soup = BeautifulSoup(row, "html.parser")
                    markup = soup.find_all("a", {"rel": "nofollow"})
                    for m in markup:
                        if m.get_text().isupper():
                            ticker = m.get_text()
            except:
                company_name = wikipedia.search(company_name)[0]
                url = "https://en.wikipedia.org/wiki/" + "_".join(
                    company_name.split(" "))
                req = urllib.request.Request(url)
                try:
                    page = urllib.request.urlopen(req)
                    # Get info box > Get traded as > Get the ticker
                    soup = BeautifulSoup(page, "html.parser")
                    infobox = BeautifulSoup(
                        str(soup.find_all("table",
                                          {"class": "infobox vcard"}))[1:-1],
                        "html.parser")
                    row_re = re.compile(r"Traded")
                    row = infobox.find(text=row_re)
                    if row is not None:
                        row = str(row.parent.parent.parent)
                        soup = BeautifulSoup(row, "html.parser")
                        markup = soup.find_all("a", {"rel": "nofollow"})
                        for m in markup:
                            if m.get_text().isupper():
                                ticker = m.get_text()
                except:
                    pass
    except:
        pass

    if not is_valid_ticker(ticker):
        ticker = None

    if search_for_parent and ticker is None:
        parent = find_parent(company_name)
        if parent:
            ticker = get_ticker(parent)

    return ticker
Exemple #12
0
 def test_search(self):
     _search = wikipedia.search("Queen")[1]
     assert test_search == "u\'Queen (band)\'"
Exemple #13
0
def process_request(req):
    action = req.get("result").get("action")

    if action != "yahooWeatherForecast":
        if action == u"weather.search":
            try:
                parameters = req.get("result").get("parameters")
                request_type = parameters.get('request_type')
                current_time = parameters.get('current_time')
                location = parameters.get('location')

                return weather_request_hangouts(None, None, location)
            except Exception as e:
                return {}
                # Do something else
        if u"wisdom" in action:
            parameters = req.get("result").get("parameters")
            query_wisdom = parameters.get('q')
            request_type = parameters.get('request_type')

            try:
                wikipedia_answer_url = None
                wisdom_answer = None
                speech = None
                if query_wisdom:
                    wikipedia_search = wikipedia.search(query_wisdom,
                                                        results=2)[0]
                    wikipedia_answer_url = wikipedia.page(wikipedia_search).url
                    if wikipedia_answer_url:
                        wisdom_answer = wikipedia.summary(query_wisdom,
                                                          sentences=1)
                    else:
                        wikipedia_search = wikipedia.search(query_wisdom,
                                                            results=2)[1]
                        wikipedia_answer_url = wikipedia.page(
                            wikipedia_search).url

                    if wisdom_answer and wikipedia_answer_url:
                        speech = "According to Wikipedia.org (" + wikipedia_answer_url + "): " + wisdom_answer
                    else:
                        speech = "I am sorry, but I couldn't find a good article or result for your " \
                                 "request on Wikipedia.org " \
                                 "Why don't you click on the following link to see similar results: "
                        "https://en.wikipedia.org/w/index.php?search=" + wisdom_answer.replace(
                            " ", "+")

                    return {
                        "speech": speech,
                        "displayText": speech,
                        # "data": data,
                        # "contextOut": [],
                        "source": "apiai-weather-webhook-sample"
                    }
            except Exception as e:
                logger.error("Error")

        return {}

    baseurl = "https://query.yahooapis.com/v1/public/yql?"
    yql_query = make_yql_query(req)
    if yql_query is None:
        return {}
    yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
    result = urlopen(yql_url).read()
    data = json.loads(result)
    res = make_webhook_result(data)
    return res
Exemple #14
0
 def test_search(self):
   """Test parsing a Wikipedia request result."""
   self.assertEqual(wikipedia.search("Barack Obama"), mock_data['data']["barack.search"])
Exemple #15
0
 def test_suggestion(self):
     """Test getting a suggestion as well as search results."""
     search, suggestion = wikipedia.search("hallelulejah", suggestion=True)
     self.assertEqual(search, [])
     self.assertEqual(suggestion, u"hallelujah")
Exemple #16
0
 def test_limit(self):
   """Test limiting a request results."""
   self.assertEqual(wikipedia.search("Porsche", results=3), mock_data['data']["porsche.search"])
Exemple #17
0
 def test_suggestion_none(self):
     """Test getting a suggestion when there is no suggestion."""
     search, suggestion = wikipedia.search("qmxjsudek", suggestion=True)
     self.assertEqual(search, [])
     self.assertEqual(suggestion, None)
Exemple #18
0
 def test_suggsetion_none(self):
   """Test getting a suggestion when there is no suggestion."""
   search, suggestion = wikipedia.search("qmxjsudek", suggestion=True)
   self.assertEqual(search, [])
   self.assertEqual(suggestion, None)
Exemple #19
0
def test():
    form = LoginForm()
    if form.validate_on_submit():
        flash(form.openid.data , 'Question')
        text = form.openid.data.lower()
        data = form.openid.data.lower() # for processing of answer(data mining)
        data1 = form.openid.data # for finding verbs nouns adjectives and number
        text = text.split() # for finding positive negative and assertive

        # Finding Nouns
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        flash(p, 'Answer')
        name = nltk.ne_chunk(p, binary=True)
        ent = re.findall(r'NE\s(.*?)/', str(name))
        chunkGram = r"""Noun: {<NN\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        NNnoun = chunkParser.parse(p)
        ip_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #nouns = ''
        #for n in ip_noun:
        #    nouns = nouns + n + ' '
        #flash ('Nouns: ' + str(nouns), 'Answer')
        flash ('Nouns list: ' + str(ip_noun), 'Answer')

        # Finding Verbs
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)

        chunkGram = r"""Verb: {<VB\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        VBverb = chunkParser.parse(p)
        ip_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #verbs = ''
        #for v in ip_verb:
        #    verbs = verbs + v + ' '
        #flash ('Verbs: ' + str(verbs), 'Answer')
        flash ('Verb List: ' + str(ip_verb), 'Answer')

        # Finding Adjective
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)

        chunkGram = r"""Verb: {<JJ\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        JJAdj = chunkParser.parse(p)
        ip_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #adjs = ''
        #for a in ip_adj:
        #    adjs = adjs + a + ' '
        #flash ('Ajectives: ' + str(adjs), 'Answer')
        flash ('Adjective list: ' + str(ip_adj), 'Answer')

        # Finding Numbers
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)
        chunkGram = r"""Number: {<CD\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        CDNumber = chunkParser.parse(p)
        ip_number = re.findall(r'Number\s(.*?)/', str(CDNumber))
        flash ('Number list: ' + str(ip_number), 'Answer')

        max_check = len(ip_noun) + len(ip_verb) + len(ip_adj) + len(ip_number) #counting the number of max hits

        # Similar Noun Form
        simi = models.Similar.query.all()
        count_n = len(ip_noun)
        max_n = 0
        for noun_sim in ip_noun:
            for sim in simi:
                if sim.word1 == noun_sim:
                    ip_noun.append(str(sim.word2))
                    ip_noun.append(str(sim.word3))
                if sim.word2 == noun_sim:
                    ip_noun.append(str(sim.word1))
                    ip_noun.append(str(sim.word3))
                if sim.word3 == noun_sim:
                    ip_noun.append(str(sim.word1))
                    ip_noun.append(str(sim.word2))
            max_n = max_n + 1
            if max_n >= count_n:
                break


        # Similar Verb Form
        simi = models.Similar.query.all()
        count_v = len(ip_verb)
        max_v = 0
        for verb_sim in ip_verb:
            for sim in simi:
                if sim.word1 == verb_sim:
                    ip_verb.append(str(sim.word2))
                    ip_verb.append(str(sim.word3))
                if sim.word2 == verb_sim:
                    ip_verb.append(str(sim.word1))
                    ip_verb.append(str(sim.word3))
                if sim.word3 == verb_sim:
                    ip_verb.append(str(sim.word1))
                    ip_verb.append(str(sim.word2))
            max_v = max_v + 1
            if max_v >= count_v:
                break

        # Similar Adjective Form
        simi = models.Similar.query.all()
        count_a = len(ip_adj)
        max_a = 0
        for adj_sim in ip_adj:
            for sim in simi:
                if sim.word1 == adj_sim:
                    ip_adj.append(str(sim.word2))
                    ip_adj.append(str(sim.word3))
                if sim.word2 == adj_sim:
                    ip_adj.append(str(sim.word1))
                    ip_adj.append(str(sim.word3))
                if sim.word3 == adj_sim:
                    ip_adj.append(str(sim.word1))
                    ip_adj.append(str(sim.word2))
            max_a = max_a + 1
            if max_a >= count_a:
                break

        #Printing the new appended list        
        flash ('Nouns list: ' + str(ip_noun), 'Answer')
        flash ('Verb List: ' + str(ip_verb), 'Answer')
        flash ('Adjective list: ' + str(ip_adj), 'Answer')
        flash ('Number list: ' + str(ip_number), 'Answer')

        ip_total = ip_noun + ip_verb + ip_adj + ip_number
        ip_total = list(set(ip_total))

        negator = ['not', 'never', 'not possible', 'does not', 'abort', 'neither', 'nor', 'negative', 'negate', 'can\'t', 'doesn\'t','can not','cant','doesnt','dont','don\'t']
        assertor = ['may be', 'can be', 'not sure', 'might', 'may']
        '''preposition = ['have', 'is', 'are', 'about', 'above', 'across', 'after', 'against', 'along', 'among', 'around', 'at', 'before', 'behind', 'below', 'beneath', 'beside', 'between', 'by', 'down', 'during', 'except', 'for', 'from', 'front', 'inside', 'instead', 'into', 'like', 'near', 'of', 'off', 'on', 'onto', 'top', 'out', 'outside', 'over', 'past', 'since', 'through', 'to', 'toward', 'under', 'underneath', 'until', 'up', 'upon', 'with', 'within', 'without']
        wh = ['why', 'what', 'how', 'Who', 'whoever', 'whom', 'whomever', 'whose', 'which']
        pronoun = ['i', 'me', 'you', 'she', 'her', 'he', 'him', 'it', 'we', 'us', 'you', 'they', 'them', 'my', 'mine', 'your', 'yours', 'hers', 'his', 'its', 'yours', 'ours', 'theirs', 'myself', 'yourself', 'himself', 'herself', 'itself', 'all', 'another', 'any', 'anybody', 'anyone', 'anything', 'both', 'each', 'either', 'everybody', 'everyone', 'everything', 'few', 'many', 'neither', 'nobody', 'none', 'nothing', 'one', 'several', 'some', 'somebody', 'someone', 'something', 'this', 'that', 'these', 'those']
        # Removing Wh Question
        wh_q=''
        for ser in text:
            inflag = 0
            for w in wh:
                if w == ser:
                    inflag = 1
            if inflag == 0:
                wh_q = wh_q + ser + ' '


        # Removing Prepostion
        wh_q = wh_q.split()
        prep_q = ''
        for ser in wh_q:
            inflag = 0
            for prep in preposition:
                if ser == prep:
                    inflag = 1
            if inflag == 0:
                prep_q = prep_q + ser + ' '


        # Removing Pronoun
        prep_q = prep_q.split()
        pro_q = ''
        for ser in prep_q:
            inflag = 0
            for pro in pronoun:
                if ser == pro:
                    inflag = 1
            if inflag == 0:
                pro_q = pro_q + ser + ' '

        text = pro_q
        text = text.split()
        data = pro_q.strip()
        
        '''
        flag = 0
        answer = 0
        wikiflag = 0
        ans = 0
        asser = 0
        nege = 0
        posi = 0
        #Assertive Section
        for ser in text:
            for ass in assertor:
                if ser == ass and flag == 0 or data.find(ass) != -1 and flag == 0:
                    asser = 1
                    flash('Assertive', 'Answer')
                    flag=1
        if asser == 1:
            display_ans = ''
            max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
            abc = models.Positive.query.all()
            for a in abc:
                # Noun
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                ent = re.findall(r'NE\s(.*?)/', str(name))
                chunkGram = r"""Noun: {<NN\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                NNnoun = chunkParser.parse(p)
                db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                # Verbs
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<VB\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                VBverb = chunkParser.parse(p)
                db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                # Adjective
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<JJ\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                JJAdj = chunkParser.parse(p)
                db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))


                # Number
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Number: {<CD\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                CDNumber = chunkParser.parse(p)
                db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                db_total = db_noun + db_adj + db_verb + db_number
                db_total = list(set(db_total))

                count = 0
                for ip in ip_total:
                    for dbs in db_total:
                        db_plural = re.escape(dbs) + 's?'
                        ip_plural = re.escape(ip) + 's?'
                        if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                            count = count + 1
                        if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                            count = count + 1
                        if ip == dbs:
                            count = count - 1

                if max_value < count:
                    display_ans = a.answer
                    max_value = count

            if display_ans == '':
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
            else:
                extra = 'Please be more sure about the problem you are facing so that we can provide you with precise answers. According to me the most relevant solution to your problem is: '
                display_ans = extra + '\n' + display_ans
                flash(display_ans, 'Answer')

             
            """for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')"""

        #Negative Section
        if asser != 1:
            for ser in text:          
                for neg in negator:
                    if ser == neg and flag == 0 or data.find(neg) != -1 and flag == 0:
                        nege = 1
                        flash('Negative', 'Answer')
                        flag = 1
            if nege == 1:
                display_ans = ''
                max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
                abc = models.Negative.query.all()
                for a in abc:
                    # Noun
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    ent = re.findall(r'NE\s(.*?)/', str(name))
                    chunkGram = r"""Noun: {<NN\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    NNnoun = chunkParser.parse(p)
                    db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                    # Verbs
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<VB\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    VBverb = chunkParser.parse(p)
                    db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                    # Adjective
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<JJ\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    JJAdj = chunkParser.parse(p)
                    db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                   # Number
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Number: {<CD\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    CDNumber = chunkParser.parse(p)
                    db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                    db_total = db_noun + db_adj + db_verb + db_number
                    db_total = list(set(db_total))

                    count = 0
                    for ip in ip_total:
                        for dbs in db_total:
                            db_plural = re.escape(dbs) + 's?'
                            ip_plural = re.escape(ip) + 's?'
                            if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                count = count + 1
                            if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                count = count + 1
                            if ip == dbs:
                                count = count - 1

                    if max_value < count:
                        display_ans = a.answer
                        max_value = count

                if display_ans == '':
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                else:
                    flash(display_ans, 'Answer')


                """for a in abc:
                    if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                        ans = 1
                        break
                if ans == 0:
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                    #return redirect ('http://www.lmgtfy.com/?q=' + data)
                else:
                    finalans=a.answer
                    flash(a.answer, 'Answer')"""

        #Postive Section
        if asser != 1 and nege != 1:
            if flag == 0:
                data = form.openid.data.lower()
                flash('Positive', 'Answer')
                flag = 1
                display_ans = ''
                max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
                abc = models.Positive.query.all()
                for a in abc:
                    # Noun
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    ent = re.findall(r'NE\s(.*?)/', str(name))
                    chunkGram = r"""Noun: {<NN\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    NNnoun = chunkParser.parse(p)
                    db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                    # Verbs
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<VB\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    VBverb = chunkParser.parse(p)
                    db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                    # Adjective
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<JJ\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    JJAdj = chunkParser.parse(p)
                    db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                    # Number
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Number: {<CD\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    CDNumber = chunkParser.parse(p)
                    db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                    db_total = db_noun + db_adj + db_verb + db_number
                    db_total = list(set(db_total))

                    count = 0
                    for ip in ip_total:
                        for dbs in db_total:
                            db_plural = re.escape(dbs) + 's?'
                            ip_plural = re.escape(ip) + 's?'
                            if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                count = count + 1
                            if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                count = count + 1
                            if ip == dbs:
                                count = count - 1

                    if max_value < count:
                        display_ans = a.answer
                        max_value = count

                if display_ans == '':
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                else:
                    flash(display_ans, 'Answer')

                """abc = models.Positive.query.all()
                for a in abc:
                    if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                        ans = 1
                        break
                if ans == 0:
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                    #return redirect ('http://www.lmgtfy.com/?q=' + data)
                else:
                    finalans=a.answer
                    flash(a.answer, 'Answer')"""

        #Wiki Section
        ans = 0
        if wikiflag == 1:

            display_ans = ''
            max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
            abc = models.Wikipedia.query.all()
            for a in abc:
                # Noun
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                ent = re.findall(r'NE\s(.*?)/', str(name))
                chunkGram = r"""Noun: {<NN\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                NNnoun = chunkParser.parse(p)
                db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                # Verbs
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<VB\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                VBverb = chunkParser.parse(p)
                db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                # Adjective
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<JJ\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                JJAdj = chunkParser.parse(p)
                db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                # Number
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Number: {<CD\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                CDNumber = chunkParser.parse(p)
                db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                db_total = db_noun + db_adj + db_verb + db_number
                db_total = list(set(db_total))

                count = 0
                for ip in ip_total:
                    for dbs in db_total:
                        db_plural = re.escape(dbs) + 's?'
                        ip_plural = re.escape(ip) + 's?'
                        if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                            count = count + 1
                        if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                            count = count + 1
                        if ip == dbs:
                            count = count - 1

                if max_value < count:
                    display_ans = a.answer
                    max_value = count

            if display_ans == '':
                answer = 0
            else:
                answer = 1

            """abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1"""

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        max_value = int(max_check * 0.8 + 0.5)
                        ip_wiki = ny1.encode('ascii','ignore')
                        # Noun
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        ent = re.findall(r'NE\s(.*?)/', str(name))
                        chunkGram = r"""Noun: {<NN\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        NNnoun = chunkParser.parse(p)
                        db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                        # Verbs
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<VB\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        VBverb = chunkParser.parse(p)
                        db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                        # Adjective
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<JJ\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        JJAdj = chunkParser.parse(p)
                        db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                        # Number
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Number: {<CD\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        CDNumber = chunkParser.parse(p)
                        db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                        db_total = db_noun + db_adj + db_verb + db_number
                        db_total = list(set(db_total))

                        count = 0
                        for ip in ip_total:
                            for dbs in db_total:
                                db_plural = re.escape(dbs) + 's?'
                                ip_plural = re.escape(ip) + 's?'
                                if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if ip == dbs:
                                    count = count - 1

                        if max_value <= count:
                            display_ans = ny1

                        if display_ans == '':
                            answer = 0
                        else:
                            answer = 1

                        if answer == 0:
                            flash('Answer not precise in wikipedia Interet', 'Answer')
                            flash(ny1, 'Answer')
                            wikiflag = 1
                        else:
                            display_ans=ny1
                            flash(ny1, 'Answer')
                            ny2 = wikipedia.page(data1)
                            flash('Source: '+ ny2.url, 'Answer')
                            #u = models.Wikipedia(question=data1, answer=ny1)
                            #db.session.add(u)
                            #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer  ' + str(inst), 'Answer')
                        display_ans = 'Your question is either out of scope of very trival for me to answer'
            else:
                flash(display_ans, 'Answer')
        #s = models.Chats.query.all()
        #for chat in reversed(s):
            #flash('Question: ' + chat.question, 'Display')
            #flash('Answer: ' + chat.answer , 'Display')
            #flash('.', 'Display')
        #u = models.Chats(question=data1, answer=display_ans)
        #db.session.add(u)
        #db.session.commit() 
        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)