Beispiel #1
0
 def run(self):
     print(self.name)
     try:
         text = wikipedia.summary("{}".format(self.name), sentences=2)
     except wikipedia.DisambiguationError as e:
         text = wikipedia.summary("{} químico".format(self.name),
                                  sentences=2)
         print(e.options)
     tts = gTTS(text=text, lang='es')
     print(text)
     tts.save(self.filename)
     print("finish")
     self.player.play()
     print("play")
Beispiel #2
0
	def searchIntent(self, session: DialogSession):
		search = self._extractSearchWord(session)
		if not search:
			self._whatToSearch(session, 'whatToSearch')
			return

		wikipedia.set_lang(self.LanguageManager.activeLanguage)

		try:
			with Online():
				result = wikipedia.summary(search, sentences=3)
		except OfflineError:
			self.endDialog(sessionId=session.sessionId, text=self.randomTalk('offline', skill='system'))
		except wikipedia.DisambiguationError as e:
			self.logWarning(msg=e)
			self._whatToSearch(session, 'ambiguous')
		except wikipedia.WikipediaException as e:
			self.logWarning(msg=e)
			self._whatToSearch(session, 'noMatch')
		except Exception as e:
			self.logWarning(msg=e, printStack=True)
		else:
			if not result:
				self._whatToSearch(session, 'noMatch')
			else:
				self.endDialog(sessionId=session.sessionId, text=result)
Beispiel #3
0
def get_item_summary(wd_id, lang='en'):
    if wd_id is None:
        return None
    try:
        r = requests.get(
            u'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(
                wd_id))
    except:
        logger.warning(
            u"couldn't get https://www.wikidata.org/wiki/Special:EntityData/{}.json"
            .format(wd_id))
        return ""
    try:
        title = r.json()['entities'][wd_id]['sitelinks']['{}wiki'.format(
            lang)]['title']
        try:
            return wikipedia.summary(title)
        except (PageError, WikipediaException, DisambiguationError):
            logger.warning(u"couldn't get wikipedia.summary({})".format(title))
            return ''
    except ValueError:
        #not JSON
        return ""
    except KeyError:
        logger.warning(u"couldn't get wikidata key {}".format(wd_id))
        return ""
Beispiel #4
0
def get_wikipedia(terms):
    results = []
    terms = terms.split(",")
    for term in terms:
        sum = wikipedia.summary(term)
        item = {"term": term, "summary": sum}

    return json.dumps(results)
 def test_zh(self):
     wikipedia.set_lang('zh')
     wikipedia.set_proxy({
         'http': 'http://localhost:1080',
         'https': 'https://localhost:1080'
     })
     wikipedia.set_request_lang('zh-CN,zh;q=0.9,en;q=0.8,da;q=0.7')
     print(wikipedia.summary('流行性感冒'))
Beispiel #6
0
    def get_wiki_snippet(search_term):
        result = ""
        # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings the link to chatch warning
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            while True:
                try:
                    result = wikipedia.summary(search_term)
                    break
                except wikipedia.DisambiguationError as e:
                    new_article = random.choice(e.options)
                    result = wikipedia.summary(new_article)
                    break
                except wikipedia.PageError as err:
                    result = "No matches found for " + str(search_term)
                    break

        return result
Beispiel #7
0
def get_me():
    entry_value = entry.get()
    answer.delete(1.0, END)
    try:
        answer_value = wikipedia.summary(entry_value)
        answer.insert(INSERT, answer_value)
    except:
        answer.insert(
            INSERT,
            "please check your internet connection or check your spelling")
Beispiel #8
0
def main():
    choice = ''
    known_language = ''
    while choice.lower() not in ['y', 'yes', 'n', 'no']:
        choice = raw_input('Default language - English. You wanna to change this? [y/n]: ')
        if choice.lower() in ['yes', 'y']:
            known_language = raw_input('You know your language abbreviation? [y/n]: ')
            if known_language.lower() in ['no', 'n']:
                lang_abbreviation = raw_input('Enter a native language naming: ')
                find_lang(lang_abbreviation)
                wikipedia.set_lang(raw_input('Enter your choice: '))
                name = raw_input('Page for search: ')
                MyPrettyPrinter().pprint(wikipedia.summary(name))
Beispiel #9
0
    def searchIntent(self, session: DialogSession, **_kwargs):
        search = self._extractSearchWord(session)
        if not search:
            self._whatToSearch(session, 'whatToSearch')
            return

        wikipedia.set_lang(self.LanguageManager.activeLanguage)
        result = wikipedia.summary(search, sentences=3)

        if not result:
            self._whatToSearch(session, 'noMatch')
        else:
            self.endDialog(sessionId=session.sessionId, text=result)
def get_item_summary(wd_id, lang="en"):
    if wd_id is None:
        return None
    try:
        r = requests.get(u"https://www.wikidata.org/wiki/Special:EntityData/{}.json".format(wd_id))
    except:
        logging.warning(u"couldn't get https://www.wikidata.org/wiki/Special:EntityData/{}.json".format(wd_id))
        return ""
    try:
        title = r.json()["entities"][wd_id]["sitelinks"]["{}wiki".format(lang)]["title"]
        try:
            return wikipedia.summary(title)
        except (PageError, WikipediaException, DisambiguationError):
            logging.warning(u"couldn't get wikipedia.summary({})".format(title))
            return ""
    except ValueError:
        # not JSON
        return ""
Beispiel #11
0
 def wikipedia(self):
     self.lucy.say('What you want to know')
     self.lucy.runAndWait()
     winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC)
     cmd_text = self.command()
     self.lucy.say('Let me see, what i can find')
     self.lucy.runAndWait()
     wiki_data = wikipedia.summary(cmd_text).split('.')
     self.lucy.say('.'.join(wiki_data[:3]))
     self.lucy.runAndWait()
     self.lucy.say('Do you want me to read more')
     self.lucy.runAndWait()
     winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC)
     cmd_text = self.command()
     if 'yes' in cmd_text and cmd_text is not None:
         self.lucy.say('.'.join(wiki_data[3:]))
         self.lucy.runAndWait()
     else:
         self.lucy.say('Okay')
         self.lucy.runAndWait()
def respond(voice_data):
    if 'what is your name' in voice_data:
        ray_speak('My name is Ray')
        return "Hello this is ray"
    if 'What time is it' in voice_data:
        ray_speak(ctime())
        return "This is option 2"
    if 'Google search' in voice_data:
        search = record_audio('What do you want to search for')
        url = 'https://.google.com/search?q=' + search
        webbrowser.get().open(url)
        ray_speak('Here is what I found for ' + search)
        return search
    if 'Wikipedia search' in voice_data:
        search = record_audio("What do you want to search in wikipedia")
        results = wikipedia.summary(search, sentences=3)
        ray_speak('Here is what I found in Wikipedia, ' + results)
        return results
    if 'find location' in voice_data:
        location = record_audio('Which location do you want to search')
        url = 'https://google.nl/maps/place/' + location + '/&amp'
        webbrowser.get().open(url)
        ray_speak('Here is the location of ' + location)
        return location
    if 'PDF' in voice_data:
        pdf = record_audio("Which pdf do you want to open")
        ray_speak("opening pdf")
        power = settings.MEDIA_ROOT + "\\" + pdf + ".pdf"
        os.startfile(power)
    if 'presentation' in voice_data:
        ray_speak("opening ppt")
        power = settings.MEDIA_ROOT + "/Text Summarization Sprint 2.pptx"
        os.startfile(power)
    if 'file' in voice_data:
        ray_speak("opening file")
        appli = r"text/views.py"
        os.startfile(appli)
        return "File opened"
Beispiel #13
0
    def searchIntent(self, session: DialogSession):
        search = self._extractSearchWord(session)
        if not search:
            self._whatToSearch(session, 'whatToSearch')
            return

        wikipedia.set_lang(self.LanguageManager.activeLanguage)

        try:
            result = wikipedia.summary(search, sentences=3)
        except wikipedia.DisambiguationError as e:
            self.logWarning(msg='Ambiguous result')
            self._whatToSearch(session, 'ambiguous')
        except wikipedia.WikipediaException as e:
            self.logWarning(msg='No match')
            self._whatToSearch(session, 'noMatch')
        except Exception as e:
            self.logWarning(msg=str(e), printStack=True)
        else:
            if not result:
                self._whatToSearch(session, 'noMatch')
            else:
                self.endDialog(sessionId=session.sessionId, text=result)
Beispiel #14
0
from wikipedia import wikipedia

if __name__ == '__main__':
    print(wikipedia.summary("Napoleon", sentences=2))
Beispiel #15
0
resultlist = searchresult
a = len(resultlist)

narrowlist = []

b = 0

print("Crawling for companies...")

for thing in resultlist:
    b = b + 1
    sys.stdout.write('\r')
    sys.stdout.write('%.0f%% complete' % (b / a * 100, ))
    sys.stdout.flush()
    try:
        summary = wikipedia.summary(thing, 1)
        if summary.find('company'):
            narrowlist.append(thing)
        elif summary.find('corporation'):
            narrowlist.append(thing)
        else:
            continue
    except:
        continue

print('\n')
print(narrowlist)
print("Scoring search results...")

a = len(narrowlist)
Beispiel #16
0
def test():
    form = LoginForm()
    if form.validate_on_submit():
        flash(form.openid.data, 'Question')
        text = form.openid.data.lower()
        data = form.openid.data.lower()
        data1 = form.openid.data
        text = text.split()
        negator = [
            'not', 'never', 'not possible', 'does not', 'abort', 'neither',
            'nor', 'no', 'negative', 'negate'
        ]
        assertor = ['may be', 'can be', 'not sure', 'might', 'may']
        preposition = [
            'have', 'is', 'are', 'about', 'above', 'across', 'after',
            'against', 'along', 'among', 'around', 'at', 'before', 'behind',
            'below', 'beneath', 'beside', 'between', 'by', 'down', 'during',
            'except', 'for', 'from', 'front', 'inside', 'instead', 'into',
            'like', 'near', 'of', 'off', 'on', 'onto', 'top', 'out', 'outside',
            'over', 'past', 'since', 'through', 'to', 'toward', 'under',
            'underneath', 'until', 'up', 'upon', 'with', 'within', 'without'
        ]
        wh = [
            'why', 'what', 'how', 'Who', 'whoever', 'whom', 'whomever',
            'whose', 'which'
        ]
        pronoun = [
            'i', 'me', 'you', 'she', 'her', 'he', 'him', 'it', 'we', 'us',
            'you', 'they', 'them', 'my', 'mine', 'your', 'yours', 'hers',
            'his', 'its', 'yours', 'ours', 'theirs', 'myself', 'yourself',
            'himself', 'herself', 'itself', 'all', 'another', 'any', 'anybody',
            'anyone', 'anything', 'both', 'each', 'either', 'everybody',
            'everyone', 'everything', 'few', 'many', 'neither', 'nobody',
            'none', 'nothing', 'one', 'several', 'some', 'somebody', 'someone',
            'something', 'this', 'that', 'these', 'those'
        ]
        # Removing Wh Question
        wh_q = ''
        for ser in text:
            inflag = 0
            for w in wh:
                if w == ser:
                    inflag = 1
            if inflag == 0:
                wh_q = wh_q + ser + ' '

        # Removing Prepostion
        wh_q = wh_q.split()
        prep_q = ''
        for ser in wh_q:
            inflag = 0
            for prep in preposition:
                if ser == prep:
                    inflag = 1
            if inflag == 0:
                prep_q = prep_q + ser + ' '

        # Removing Pronoun
        prep_q = prep_q.split()
        pro_q = ''
        for ser in prep_q:
            inflag = 0
            for pro in pronoun:
                if ser == pro:
                    inflag = 1
            if inflag == 0:
                pro_q = pro_q + ser + ' '

        text = pro_q
        text = text.split()
        data = pro_q.strip()
        flag = 0
        answer = 0
        wikiflag = 0
        ans = 0

        data = ''
        asser = 0
        nege = 0
        posi = 0
        #Assertive Section
        for ser in text:
            inflag = 0
            for ass in assertor:
                if ser == ass and flag == 0 or data.find(
                        ass) != -1 and flag == 0:
                    inflag = 1
                    asser = 1
                    flash('Assertive', 'Answer')
                    flag = 1
            if inflag == 0:
                data = data + ser + ' '
        if asser == 1:
            data = data.strip()
            abc = models.Assertive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1
                        or a.question.lower().find(data) != -1
                    ) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash(
                    'Answer not in database... Lets search Wikipedia Database',
                    'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans = a.answer
                flash(a.answer, 'Answer')

        #Negative Section
        if asser == 0:
            data = ''
        for ser in text:
            inflag = 0
            for neg in negator:
                if ser == neg and flag == 0 or data.find(
                        neg) != -1 and flag == 0:
                    inflag = 1
                    nege = 1
                    flash('Negative', 'Answer')
                    flag = 1
            if inflag == 0:
                data = data + ser + ' '
        if nege == 1:
            data = data.strip()
            abc = models.Negative.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1
                        or a.question.lower().find(data) != -1
                    ) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash(
                    'Answer not in database... Lets search Wikipedia Database',
                    'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans = a.answer
                flash(a.answer, 'Answer')

        #Postive Section

        if flag == 0:
            data = form.openid.data.lower()
            flash('Positive', 'Answer')
            abc = models.Positive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1
                        or a.question.lower().find(data) != -1
                    ) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash(
                    'Answer not in database... Lets search Wikipedia Database',
                    'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans = a.answer
                flash(a.answer, 'Answer')

        #Wiki Section
        ans = 0
        if wikiflag == 1:
            abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1
                        or a.question.lower().find(data) != -1
                    ) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash(
                    'Answer not in Wikipedia database... Lets search Wikipedia Internet',
                    'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1,
                                                chars=0,
                                                auto_suggest=True,
                                                redirect=True,
                                                sentences=3)
                        finalans = ny1
                        flash(ny1, 'Answer')
                        ny2 = wikipedia.page(data1)
                        flash('Source: ' + ny2.url, 'Answer')
                        #u = models.Wikipedia(question=data, answer=ny1)
                        #db.session.add(u)
                        #db.session.commit()
                    except Exception as inst:
                        flash(
                            'Your question is either out of scope of very trival for me to answer',
                            'Answer')
                        finalans = 'Your question is either out of scope of very trival for me to answer'
            else:
                finalans = a.answer
                flash(a.answer, 'Answer')
        display = '\n'
        s = models.Chats.query.all()
        for chat in reversed(s):
            flash('Question: ' + chat.question, 'Display')
            flash('Answer: ' + chat.answer, 'Display')
            flash('.', 'Display')
        u = models.Chats(question=data1, answer=finalans)
        db.session.add(u)
        db.session.commit()

        return redirect('/test')
    return render_template("index2.html", title='ChatterBot', form=form)
Beispiel #17
0
 def test_disambiguation_error_summary_function(self):
     with self.assertRaises(Exception) as context:
         wikipedia.summary("Mercury")
     self.assertFalse(
         "wikipedia.exceptions.DisambiguationError: \"{0}\" may refer to: \n{1}"
         in str(context.exception))
Beispiel #18
0
 def test_page_error_query_does_not_match(self):
     with self.assertRaises(Exception) as context:
         wikipedia.summary("~!@")
     self.assertFalse(
         "wikipedia.exceptions.PageError: \"{0}\" does not match any pages. Try another query!"
         in str(context.exception))
Beispiel #19
0
 def test_summary(self):
     _summary = wikipedia.summary("Hello world")
     assert _summary == "u\'A \"Hello, World!\" program is a computer program that ouputs or displays the message \"Hello, World!\". Because it is very simple in most programming languages, it is often used to illustrate the basic syntax of a programming language and is often the first program people write.\'"
Beispiel #20
0
def test():
    form = LoginForm()
    if form.validate_on_submit():
        flash(form.openid.data , 'Question')
        text = form.openid.data.lower()
        data = form.openid.data.lower()
        data1 = form.openid.data
        text = text.split()
        negator = ['not', 'never', 'not possible', 'does not', 'abort', 'neither', 'nor', 'no', 'negative', 'negate']
        assertor = ['may be', 'can be', 'not sure', 'might', 'may']
        preposition = ['have', 'is', 'are', 'about', 'above', 'across', 'after', 'against', 'along', 'among', 'around', 'at', 'before', 'behind', 'below', 'beneath', 'beside', 'between', 'by', 'down', 'during', 'except', 'for', 'from', 'front', 'inside', 'instead', 'into', 'like', 'near', 'of', 'off', 'on', 'onto', 'top', 'out', 'outside', 'over', 'past', 'since', 'through', 'to', 'toward', 'under', 'underneath', 'until', 'up', 'upon', 'with', 'within', 'without']
        wh = ['why', 'what', 'how', 'Who', 'whoever', 'whom', 'whomever', 'whose', 'which']
        pronoun = ['i', 'me', 'you', 'she', 'her', 'he', 'him', 'it', 'we', 'us', 'you', 'they', 'them', 'my', 'mine', 'your', 'yours', 'hers', 'his', 'its', 'yours', 'ours', 'theirs', 'myself', 'yourself', 'himself', 'herself', 'itself', 'all', 'another', 'any', 'anybody', 'anyone', 'anything', 'both', 'each', 'either', 'everybody', 'everyone', 'everything', 'few', 'many', 'neither', 'nobody', 'none', 'nothing', 'one', 'several', 'some', 'somebody', 'someone', 'something', 'this', 'that', 'these', 'those']
        # Removing Wh Question
        wh_q=''
        for ser in text:
            inflag = 0
            for w in wh:
                if w == ser:
                    inflag = 1
            if inflag == 0:
                wh_q = wh_q + ser + ' '


        # Removing Prepostion
        wh_q = wh_q.split()
        prep_q = ''
        for ser in wh_q:
            inflag = 0
            for prep in preposition:
                if ser == prep:
                    inflag = 1
            if inflag == 0:
                prep_q = prep_q + ser + ' '


        # Removing Pronoun
        prep_q = prep_q.split()
        pro_q = ''
        for ser in prep_q:
            inflag = 0
            for pro in pronoun:
                if ser == pro:
                    inflag = 1
            if inflag == 0:
                pro_q = pro_q + ser + ' '

        text = pro_q
        text = text.split()
        data = pro_q.strip()
        flag = 0
        answer = 0
        wikiflag = 0
        ans = 0

        data = ''
        asser = 0
        nege = 0
        posi = 0
        #Assertive Section
        for ser in text:
            inflag = 0
            for ass in assertor:
                if ser == ass and flag == 0 or data.find(ass) != -1 and flag == 0:
                    inflag = 1
                    asser = 1
                    flash('Assertive', 'Answer')
                    flag=1
            if inflag == 0:
                data = data + ser + ' '
        if asser == 1:
            data = data.strip()
            abc = models.Assertive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Negative Section
        if asser == 0:
            data = '' 
        for ser in text:
            inflag = 0           
            for neg in negator:
                if ser == neg and flag == 0 or data.find(neg) != -1 and flag == 0:
                    inflag = 1
                    nege = 1
                    flash('Negative', 'Answer')
                    flag = 1
            if inflag == 0:
                data = data + ser + ' '
        if nege == 1:
            data = data.strip()
            abc = models.Negative.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Postive Section

        if flag == 0:
            data = form.openid.data.lower()
            flash('Positive', 'Answer')
            abc = models.Positive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Wiki Section
        ans = 0
        if wikiflag == 1:
            abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        finalans=ny1
                        flash(ny1, 'Answer')
                        ny2 = wikipedia.page(data1)
                        flash('Source: '+ ny2.url, 'Answer')
                        #u = models.Wikipedia(question=data, answer=ny1)
                        #db.session.add(u)
                        #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer', 'Answer')
                        finalans = 'Your question is either out of scope of very trival for me to answer'
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')
        display = '\n'
        s = models.Chats.query.all()
        for chat in reversed(s):
            flash('Question: ' + chat.question, 'Display')
            flash('Answer: ' + chat.answer , 'Display')
            flash('.', 'Display')
        u = models.Chats(question=data1, answer=finalans)
        db.session.add(u)
        db.session.commit() 

        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)
Beispiel #21
0
def process_request(req):
    action = req.get("result").get("action")

    if action != "yahooWeatherForecast":
        if action == u"weather.search":
            try:
                parameters = req.get("result").get("parameters")
                request_type = parameters.get('request_type')
                current_time = parameters.get('current_time')
                location = parameters.get('location')

                return weather_request_hangouts(None, None, location)
            except Exception as e:
                return {}
                # Do something else
        if u"wisdom" in action:
            parameters = req.get("result").get("parameters")
            query_wisdom = parameters.get('q')
            request_type = parameters.get('request_type')

            try:
                wikipedia_answer_url = None
                wisdom_answer = None
                speech = None
                if query_wisdom:
                    wikipedia_search = wikipedia.search(query_wisdom,
                                                        results=2)[0]
                    wikipedia_answer_url = wikipedia.page(wikipedia_search).url
                    if wikipedia_answer_url:
                        wisdom_answer = wikipedia.summary(query_wisdom,
                                                          sentences=1)
                    else:
                        wikipedia_search = wikipedia.search(query_wisdom,
                                                            results=2)[1]
                        wikipedia_answer_url = wikipedia.page(
                            wikipedia_search).url

                    if wisdom_answer and wikipedia_answer_url:
                        speech = "According to Wikipedia.org (" + wikipedia_answer_url + "): " + wisdom_answer
                    else:
                        speech = "I am sorry, but I couldn't find a good article or result for your " \
                                 "request on Wikipedia.org " \
                                 "Why don't you click on the following link to see similar results: "
                        "https://en.wikipedia.org/w/index.php?search=" + wisdom_answer.replace(
                            " ", "+")

                    return {
                        "speech": speech,
                        "displayText": speech,
                        # "data": data,
                        # "contextOut": [],
                        "source": "apiai-weather-webhook-sample"
                    }
            except Exception as e:
                logger.error("Error")

        return {}

    baseurl = "https://query.yahooapis.com/v1/public/yql?"
    yql_query = make_yql_query(req)
    if yql_query is None:
        return {}
    yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
    result = urlopen(yql_url).read()
    data = json.loads(result)
    res = make_webhook_result(data)
    return res
    def searchIntent(self, intent: str, session: DialogSession) -> bool:
        slots = session.slots
        sessionId = session.sessionId
        customData = session.customData

        search = customData.get('userInput', slots.get('what'))

        if not search:
            self.continueDialog(sessionId=sessionId,
                                text=self.randomTalk('whatToSearch'),
                                intentFilter=[self._INTENT_USER_ANSWER],
                                previousIntent=self._INTENT_SEARCH,
                                customData={
                                    'module': self.name,
                                })
            return True

        wikipedia.set_lang(self.LanguageManager.activeLanguage)
        engine = customData.get('engine', 'wikipedia')

        try:
            if engine == 'wikipedia':
                result = wikipedia.summary(search, sentences=3)
            else:
                result = wikipedia.summary(search, sentences=3)

            if result:
                self.endDialog(sessionId=sessionId, text=result)
            else:
                self.continueDialog(
                    sessionId=sessionId,
                    text=self.TalkManager.randomTalk('noMatch').format(search),
                    intentFilter=[self._INTENT_USER_ANSWER],
                    previousIntent=self._INTENT_SEARCH,
                    customData={
                        'module': self.name,
                        'engine': engine
                    })

        except wikipedia.DisambiguationError:
            self.continueDialog(
                sessionId=sessionId,
                text=self.TalkManager.randomTalk('ambiguous').format(search),
                intentFilter=[self._INTENT_USER_ANSWER],
                previousIntent=self._INTENT_SEARCH,
                customData={
                    'module': self.name,
                    'engine': engine
                })
        except wikipedia.WikipediaException:
            self.continueDialog(
                sessionId=sessionId,
                text=self.TalkManager.randomTalk('noMatch').format(search),
                intentFilter=[self._INTENT_USER_ANSWER],
                previousIntent=self._INTENT_SEARCH,
                customData={
                    'module': self.name,
                    'engine': engine
                })
        except Exception as e:
            self._logger.error(f'Error: {e}')
            self.endDialog(sessionId=sessionId,
                           text=self.TalkManager.randomTalk('error',
                                                            module='system'))
        return True
Beispiel #23
0
    def _respond(self, input, sessionID):
        """Private version of respond(), does the real work."""
        if len(input) == 0:
            return ""

        # guard against infinite recursion
        inputStack = self.getPredicate(self._inputStack, sessionID)
        if len(inputStack) > self._maxRecursionDepth:
            if self._verboseMode:
                err = "WARNING: maximum recursion depth exceeded (input='%s')" % input.encode(
                    self._textEncoding, 'replace')
                sys.stderr.write(err)
            return ""

        # push the input onto the input stack
        inputStack = self.getPredicate(self._inputStack, sessionID)
        inputStack.append(input)
        self.setPredicate(self._inputStack, inputStack, sessionID)

        # run the input through the 'normal' subber
        subbedInput = self._subbers['normal'].sub(input)

        # fetch the bot's previous response, to pass to the match()
        # function as 'that'.
        outputHistory = self.getPredicate(self._outputHistory, sessionID)
        try:
            that = outputHistory[-1]
        except IndexError:
            that = ""
        subbedThat = self._subbers['normal'].sub(that)

        # fetch the current topic
        topic = self.getPredicate("topic", sessionID)
        subbedTopic = self._subbers['normal'].sub(topic)

        # Determine the final response.
        response = ""
        elem = self._brain.match(subbedInput, subbedThat, subbedTopic)
        if elem is None:
            if self._verboseMode:
                err = "Looking up %s on Wikipedia\n" % input.encode(
                    self._textEncoding)
                sys.stderr.write(err)
                try:
                    wikiString = "%s" % input.encode(self._textEncoding)
                    wikisumm = wikipedia.summary(wikiString, sentences=1)
                    wikisumm = wikisumm.split("XFIND ", 1)[1]
                    ser.write(wikisumm + "\n")  # Send Serial data
                # except wikipedia.DisambiguationError as e:
#                 	sys.stderr.write(e.options)
                except:
                    wikisumm = "dunno"
                time.sleep(.5)
                ser.write(wikisumm + "\n")  # Send Serial data
        else:
            # Process the element into a response string.
            response += self._processElement(elem, sessionID).strip()
            response += " "
        response = response.strip()

        # pop the top entry off the input stack.
        inputStack = self.getPredicate(self._inputStack, sessionID)
        inputStack.pop()
        self.setPredicate(self._inputStack, inputStack, sessionID)

        return response
Beispiel #24
0
 def wiki(self, mess, args):
     """Fetch 2 sentences of a wikipedia article"""
     return wikipedia.summary(args, sentences=2)
Beispiel #25
0
           playlist="L:\songs"
           songs=os.listdir(playlist)
           os.startfile(os.path.join(playlist,songs[joo]))
           speak("playing music")
       elif "open avast" in query:
           py = "C:/Program Files/AVAST Software/Avast/AvastUI"
           speak("opening avast")
           speak("opening avast")
           os.startfile(py)
       elif "open teamviewer" in query:
           tv = "C:/Program Files (x86)/TeamViewer/TeamViewer"
           speak("opening teamviewer")
           os.startfile(tv)
       elif "wikipedia" in query:
           query = query.replace("wikipedia","")
           result = wikipedia.summary(query,sentences = 2)
           print(result)
           speak(result)
       elif "today headlines" in query:
          web.open("https://news.google.com/?hl=en-IN&gl=IN&ceid=IN:en")
       elif "play game" in query:
           speak("can we play guess the number game ")
           speak("type yes or no")
           i = input("type yes or no :")

           if i == "yes":
               print("you have three chance to guess the number")
               speak("you have three chance to guess the number")
               print("if you not guessed correctly it automatically exists from the game")
               speak("if you not guessed correctly it automatically exists from the game")
               secretnumber = random.randint(0, 8)
Beispiel #26
0
    def _respond(self, input, sessionID):
        """Private version of respond(), does the real work."""
        if len(input) == 0:
            return ""

        # guard against infinite recursion
        inputStack = self.getPredicate(self._inputStack, sessionID)
        if len(inputStack) > self._maxRecursionDepth:
            if self._verboseMode:
                err = "WARNING: maximum recursion depth exceeded (input='%s')" % input.encode(self._textEncoding, 'replace')
                sys.stderr.write(err)
            return ""

        # push the input onto the input stack
        inputStack = self.getPredicate(self._inputStack, sessionID)
        inputStack.append(input)
        self.setPredicate(self._inputStack, inputStack, sessionID)

        # run the input through the 'normal' subber
        subbedInput = self._subbers['normal'].sub(input)

        # fetch the bot's previous response, to pass to the match()
        # function as 'that'.
        outputHistory = self.getPredicate(self._outputHistory, sessionID)
        try: that = outputHistory[-1]
        except IndexError: that = ""
        subbedThat = self._subbers['normal'].sub(that)

        # fetch the current topic
        topic = self.getPredicate("topic", sessionID)
        subbedTopic = self._subbers['normal'].sub(topic)

        # Determine the final response.
        response = ""
        elem = self._brain.match(subbedInput, subbedThat, subbedTopic)
        if elem is None:
            if self._verboseMode:
                err = "Looking up %s on Wikipedia\n" % input.encode(self._textEncoding)
                sys.stderr.write(err)
                try:
                	wikiString = "%s" % input.encode(self._textEncoding)
                	wikisumm = wikipedia.summary(wikiString, sentences=1)
                	wikisumm = wikisumm.split("XFIND ",1)[1]
                	ser.write(wikisumm + "\n")   # Send Serial data
                # except wikipedia.DisambiguationError as e:
#                 	sys.stderr.write(e.options)
                except:
                	wikisumm = "dunno"
                time.sleep(.5)
                ser.write(wikisumm + "\n")   # Send Serial data
        else:
            # Process the element into a response string.
            response += self._processElement(elem, sessionID).strip()
            response += " "
        response = response.strip()

        # pop the top entry off the input stack.
        inputStack = self.getPredicate(self._inputStack, sessionID)
        inputStack.pop()
        self.setPredicate(self._inputStack, inputStack, sessionID)
        
        return response
Beispiel #27
0
def run_alexa():
    command = take_cmd()
    print(command)
    if 'play' in command:
        song = take_cmd()
        print('playing' + song)
        talk('playing' + song)
        kit.playonyt(song)
    elif 'time' in command:
        talk('Now it is', hour, minute)
    elif 'wiki' in command:
        srch = take_cmd()
        info = wikipedia.summary(srch, 3)
        print(info)
        talk('according to wikipedia' + info)
    elif 'joke' in command:
        talk(pyjokes.get_joke('english', 'funny'))
    elif 'message' in command:
        talk("please tell me the number on which I should send the message")
        n = '+91' + input()
        talk("please tell me the message you wish to convey")
        msg = take_cmd()
        print(n)
        kit.sendwhatmsg(n, msg, hour, minute + 2, 10, True)
    elif 'news' in command:
        news = webbrowser.open_new_tab(
            "https://timesofindia.indiatimes.com/home/headlines")
        talk('here are some headlines from times of india')
    elif 'search' in command:
        talk('what would you like to search on web?')
        res = take_cmd()
        talk('okay,here are your results')
        kit.search(res)
    elif 'ask' in command:
        talk(
            'I will try my best to answer your question, what do you want to know?'
        )
        qstn = take_cmd()
        app_id = "L8HWRQ-UWVQ3XL54K"
        client = wolframalpha.Client(app_id)
        res = client.query(qstn)
        ans = next(res.results).text
        talk(ans)
    elif 'direction' in command or 'map' in command:
        talk('Can you please tell me the starting location?')
        ini = take_cmd()
        talk('thanks, now can you please tell me where are you headed to?')
        des = take_cmd()
        webbrowser.open_new_tab("https://www,google.ca/maps/dir/" + ini + "/" +
                                des)
    elif 'music' in command:
        talk('opening spotify...please wait')
        webbrowser.open_new_tab("https://open.spotify.com/")
    elif 'bye' in command or 'tata' in command or 'stop' in command:
        talk('Okay I am going now, have a nice day ahead')
        datetime.time.sleep(100)
    elif 'yourself' in command:
        talk(
            'I am your personal assistant smarty, I can carry out your small tasks and help in saving your time and effort,I was made by piyush'
        )
    elif 'weather' in command:
        api_key = "b675ff1bd4d0eed3a1392b61e1a0d712"
        base_url = "https://api.openweathermap.org/data/2.5/weather?"
        talk("what is the city name")
        city_name = take_cmd()
        complete_url = base_url + "&q=" + city_name + "&appid=" + api_key
        response = requests.get(complete_url)
        x = response.json()
        if x["cod"] != "404":
            y = x["main"]
            current_temperature = y["temp"] - 273
            current_temperature = round(current_temperature, 2)
            current_humidiy = y["humidity"]
            z = x["weather"]
            weather_description = z[0]["description"]
            talk(" Temperature is " + str(current_temperature) +
                 " degree celsius" + "\n humidity in percentage is " +
                 str(current_humidiy) + "\n description  " +
                 str(weather_description))
    elif 'log off' in command or 'shut down' in command:
        talk(
            "Okay, your system will shut down in 10 seconds, make sure you closed all applications!"
        )
        subprocess.call(["shutdown", "/1"])
    else:
        talk('please repeat what you said')
Beispiel #28
0
import argparse
from wikipedia import wikipedia

parser = argparse.ArgumentParser()
parser.add_argument(
    '--slideshow',
    help='Render a slideshow of the images found on that subject',
    action='store_true')
parser.add_argument('wikipage',
                    help='Wiki page title',
                    nargs='?',
                    default="Iron Man")
args = parser.parse_args()

if args.slideshow:
    from slideshow import render_slideshow
    render_slideshow(args.wikipage)
else:
    print(wikipedia.summary(args.wikipage))
Beispiel #29
0
def test():
    form = LoginForm()
    if form.validate_on_submit():
        flash(form.openid.data , 'Question')
        text = form.openid.data.lower()
        data = form.openid.data.lower() # for processing of answer(data mining)
        data1 = form.openid.data # for finding verbs nouns adjectives and number
        text = text.split() # for finding positive negative and assertive

        # Finding Nouns
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        flash(p, 'Answer')
        name = nltk.ne_chunk(p, binary=True)
        ent = re.findall(r'NE\s(.*?)/', str(name))
        chunkGram = r"""Noun: {<NN\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        NNnoun = chunkParser.parse(p)
        ip_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #nouns = ''
        #for n in ip_noun:
        #    nouns = nouns + n + ' '
        #flash ('Nouns: ' + str(nouns), 'Answer')
        flash ('Nouns list: ' + str(ip_noun), 'Answer')

        # Finding Verbs
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)

        chunkGram = r"""Verb: {<VB\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        VBverb = chunkParser.parse(p)
        ip_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #verbs = ''
        #for v in ip_verb:
        #    verbs = verbs + v + ' '
        #flash ('Verbs: ' + str(verbs), 'Answer')
        flash ('Verb List: ' + str(ip_verb), 'Answer')

        # Finding Adjective
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)

        chunkGram = r"""Verb: {<JJ\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        JJAdj = chunkParser.parse(p)
        ip_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))
        #noun = re.findall(r'<NN\w?>*', str(p))
        #print ent
        #adjs = ''
        #for a in ip_adj:
        #    adjs = adjs + a + ' '
        #flash ('Ajectives: ' + str(adjs), 'Answer')
        flash ('Adjective list: ' + str(ip_adj), 'Answer')

        # Finding Numbers
        tokenized = nltk.word_tokenize(data1)
        p = nltk.pos_tag(tokenized)
        name = nltk.ne_chunk(p, binary=True)
        chunkGram = r"""Number: {<CD\w?>} """
        chunkParser = nltk.RegexpParser(chunkGram)
        CDNumber = chunkParser.parse(p)
        ip_number = re.findall(r'Number\s(.*?)/', str(CDNumber))
        flash ('Number list: ' + str(ip_number), 'Answer')

        max_check = len(ip_noun) + len(ip_verb) + len(ip_adj) + len(ip_number) #counting the number of max hits

        # Similar Noun Form
        simi = models.Similar.query.all()
        count_n = len(ip_noun)
        max_n = 0
        for noun_sim in ip_noun:
            for sim in simi:
                if sim.word1 == noun_sim:
                    ip_noun.append(str(sim.word2))
                    ip_noun.append(str(sim.word3))
                if sim.word2 == noun_sim:
                    ip_noun.append(str(sim.word1))
                    ip_noun.append(str(sim.word3))
                if sim.word3 == noun_sim:
                    ip_noun.append(str(sim.word1))
                    ip_noun.append(str(sim.word2))
            max_n = max_n + 1
            if max_n >= count_n:
                break


        # Similar Verb Form
        simi = models.Similar.query.all()
        count_v = len(ip_verb)
        max_v = 0
        for verb_sim in ip_verb:
            for sim in simi:
                if sim.word1 == verb_sim:
                    ip_verb.append(str(sim.word2))
                    ip_verb.append(str(sim.word3))
                if sim.word2 == verb_sim:
                    ip_verb.append(str(sim.word1))
                    ip_verb.append(str(sim.word3))
                if sim.word3 == verb_sim:
                    ip_verb.append(str(sim.word1))
                    ip_verb.append(str(sim.word2))
            max_v = max_v + 1
            if max_v >= count_v:
                break

        # Similar Adjective Form
        simi = models.Similar.query.all()
        count_a = len(ip_adj)
        max_a = 0
        for adj_sim in ip_adj:
            for sim in simi:
                if sim.word1 == adj_sim:
                    ip_adj.append(str(sim.word2))
                    ip_adj.append(str(sim.word3))
                if sim.word2 == adj_sim:
                    ip_adj.append(str(sim.word1))
                    ip_adj.append(str(sim.word3))
                if sim.word3 == adj_sim:
                    ip_adj.append(str(sim.word1))
                    ip_adj.append(str(sim.word2))
            max_a = max_a + 1
            if max_a >= count_a:
                break

        #Printing the new appended list        
        flash ('Nouns list: ' + str(ip_noun), 'Answer')
        flash ('Verb List: ' + str(ip_verb), 'Answer')
        flash ('Adjective list: ' + str(ip_adj), 'Answer')
        flash ('Number list: ' + str(ip_number), 'Answer')

        ip_total = ip_noun + ip_verb + ip_adj + ip_number
        ip_total = list(set(ip_total))

        negator = ['not', 'never', 'not possible', 'does not', 'abort', 'neither', 'nor', 'negative', 'negate', 'can\'t', 'doesn\'t','can not','cant','doesnt','dont','don\'t']
        assertor = ['may be', 'can be', 'not sure', 'might', 'may']
        '''preposition = ['have', 'is', 'are', 'about', 'above', 'across', 'after', 'against', 'along', 'among', 'around', 'at', 'before', 'behind', 'below', 'beneath', 'beside', 'between', 'by', 'down', 'during', 'except', 'for', 'from', 'front', 'inside', 'instead', 'into', 'like', 'near', 'of', 'off', 'on', 'onto', 'top', 'out', 'outside', 'over', 'past', 'since', 'through', 'to', 'toward', 'under', 'underneath', 'until', 'up', 'upon', 'with', 'within', 'without']
        wh = ['why', 'what', 'how', 'Who', 'whoever', 'whom', 'whomever', 'whose', 'which']
        pronoun = ['i', 'me', 'you', 'she', 'her', 'he', 'him', 'it', 'we', 'us', 'you', 'they', 'them', 'my', 'mine', 'your', 'yours', 'hers', 'his', 'its', 'yours', 'ours', 'theirs', 'myself', 'yourself', 'himself', 'herself', 'itself', 'all', 'another', 'any', 'anybody', 'anyone', 'anything', 'both', 'each', 'either', 'everybody', 'everyone', 'everything', 'few', 'many', 'neither', 'nobody', 'none', 'nothing', 'one', 'several', 'some', 'somebody', 'someone', 'something', 'this', 'that', 'these', 'those']
        # Removing Wh Question
        wh_q=''
        for ser in text:
            inflag = 0
            for w in wh:
                if w == ser:
                    inflag = 1
            if inflag == 0:
                wh_q = wh_q + ser + ' '


        # Removing Prepostion
        wh_q = wh_q.split()
        prep_q = ''
        for ser in wh_q:
            inflag = 0
            for prep in preposition:
                if ser == prep:
                    inflag = 1
            if inflag == 0:
                prep_q = prep_q + ser + ' '


        # Removing Pronoun
        prep_q = prep_q.split()
        pro_q = ''
        for ser in prep_q:
            inflag = 0
            for pro in pronoun:
                if ser == pro:
                    inflag = 1
            if inflag == 0:
                pro_q = pro_q + ser + ' '

        text = pro_q
        text = text.split()
        data = pro_q.strip()
        
        '''
        flag = 0
        answer = 0
        wikiflag = 0
        ans = 0
        asser = 0
        nege = 0
        posi = 0
        #Assertive Section
        for ser in text:
            for ass in assertor:
                if ser == ass and flag == 0 or data.find(ass) != -1 and flag == 0:
                    asser = 1
                    flash('Assertive', 'Answer')
                    flag=1
        if asser == 1:
            display_ans = ''
            max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
            abc = models.Positive.query.all()
            for a in abc:
                # Noun
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                ent = re.findall(r'NE\s(.*?)/', str(name))
                chunkGram = r"""Noun: {<NN\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                NNnoun = chunkParser.parse(p)
                db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                # Verbs
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<VB\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                VBverb = chunkParser.parse(p)
                db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                # Adjective
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<JJ\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                JJAdj = chunkParser.parse(p)
                db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))


                # Number
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Number: {<CD\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                CDNumber = chunkParser.parse(p)
                db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                db_total = db_noun + db_adj + db_verb + db_number
                db_total = list(set(db_total))

                count = 0
                for ip in ip_total:
                    for dbs in db_total:
                        db_plural = re.escape(dbs) + 's?'
                        ip_plural = re.escape(ip) + 's?'
                        if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                            count = count + 1
                        if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                            count = count + 1
                        if ip == dbs:
                            count = count - 1

                if max_value < count:
                    display_ans = a.answer
                    max_value = count

            if display_ans == '':
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
            else:
                extra = 'Please be more sure about the problem you are facing so that we can provide you with precise answers. According to me the most relevant solution to your problem is: '
                display_ans = extra + '\n' + display_ans
                flash(display_ans, 'Answer')

             
            """for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')"""

        #Negative Section
        if asser != 1:
            for ser in text:          
                for neg in negator:
                    if ser == neg and flag == 0 or data.find(neg) != -1 and flag == 0:
                        nege = 1
                        flash('Negative', 'Answer')
                        flag = 1
            if nege == 1:
                display_ans = ''
                max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
                abc = models.Negative.query.all()
                for a in abc:
                    # Noun
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    ent = re.findall(r'NE\s(.*?)/', str(name))
                    chunkGram = r"""Noun: {<NN\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    NNnoun = chunkParser.parse(p)
                    db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                    # Verbs
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<VB\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    VBverb = chunkParser.parse(p)
                    db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                    # Adjective
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<JJ\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    JJAdj = chunkParser.parse(p)
                    db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                   # Number
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Number: {<CD\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    CDNumber = chunkParser.parse(p)
                    db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                    db_total = db_noun + db_adj + db_verb + db_number
                    db_total = list(set(db_total))

                    count = 0
                    for ip in ip_total:
                        for dbs in db_total:
                            db_plural = re.escape(dbs) + 's?'
                            ip_plural = re.escape(ip) + 's?'
                            if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                count = count + 1
                            if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                count = count + 1
                            if ip == dbs:
                                count = count - 1

                    if max_value < count:
                        display_ans = a.answer
                        max_value = count

                if display_ans == '':
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                else:
                    flash(display_ans, 'Answer')


                """for a in abc:
                    if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                        ans = 1
                        break
                if ans == 0:
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                    #return redirect ('http://www.lmgtfy.com/?q=' + data)
                else:
                    finalans=a.answer
                    flash(a.answer, 'Answer')"""

        #Postive Section
        if asser != 1 and nege != 1:
            if flag == 0:
                data = form.openid.data.lower()
                flash('Positive', 'Answer')
                flag = 1
                display_ans = ''
                max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
                abc = models.Positive.query.all()
                for a in abc:
                    # Noun
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    ent = re.findall(r'NE\s(.*?)/', str(name))
                    chunkGram = r"""Noun: {<NN\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    NNnoun = chunkParser.parse(p)
                    db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                    # Verbs
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<VB\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    VBverb = chunkParser.parse(p)
                    db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                    # Adjective
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Verb: {<JJ\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    JJAdj = chunkParser.parse(p)
                    db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                    # Number
                    tokenized = nltk.word_tokenize(a.question)
                    p = nltk.pos_tag(tokenized)
                    name = nltk.ne_chunk(p, binary=True)
                    chunkGram = r"""Number: {<CD\w?>} """
                    chunkParser = nltk.RegexpParser(chunkGram)
                    CDNumber = chunkParser.parse(p)
                    db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                    db_total = db_noun + db_adj + db_verb + db_number
                    db_total = list(set(db_total))

                    count = 0
                    for ip in ip_total:
                        for dbs in db_total:
                            db_plural = re.escape(dbs) + 's?'
                            ip_plural = re.escape(ip) + 's?'
                            if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                count = count + 1
                            if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                count = count + 1
                            if ip == dbs:
                                count = count - 1

                    if max_value < count:
                        display_ans = a.answer
                        max_value = count

                if display_ans == '':
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                else:
                    flash(display_ans, 'Answer')

                """abc = models.Positive.query.all()
                for a in abc:
                    if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                        ans = 1
                        break
                if ans == 0:
                    answer = 0
                else:
                    answer = 1

                if answer == 0:
                    flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                    wikiflag = 1
                    #return redirect ('http://www.lmgtfy.com/?q=' + data)
                else:
                    finalans=a.answer
                    flash(a.answer, 'Answer')"""

        #Wiki Section
        ans = 0
        if wikiflag == 1:

            display_ans = ''
            max_value = int(max_check * 0.8 + 0.5) # counting the no of hits
            abc = models.Wikipedia.query.all()
            for a in abc:
                # Noun
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                ent = re.findall(r'NE\s(.*?)/', str(name))
                chunkGram = r"""Noun: {<NN\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                NNnoun = chunkParser.parse(p)
                db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                # Verbs
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<VB\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                VBverb = chunkParser.parse(p)
                db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                # Adjective
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Verb: {<JJ\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                JJAdj = chunkParser.parse(p)
                db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                # Number
                tokenized = nltk.word_tokenize(a.question)
                p = nltk.pos_tag(tokenized)
                name = nltk.ne_chunk(p, binary=True)
                chunkGram = r"""Number: {<CD\w?>} """
                chunkParser = nltk.RegexpParser(chunkGram)
                CDNumber = chunkParser.parse(p)
                db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                db_total = db_noun + db_adj + db_verb + db_number
                db_total = list(set(db_total))

                count = 0
                for ip in ip_total:
                    for dbs in db_total:
                        db_plural = re.escape(dbs) + 's?'
                        ip_plural = re.escape(ip) + 's?'
                        if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                            count = count + 1
                        if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                            count = count + 1
                        if ip == dbs:
                            count = count - 1

                if max_value < count:
                    display_ans = a.answer
                    max_value = count

            if display_ans == '':
                answer = 0
            else:
                answer = 1

            """abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1"""

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        max_value = int(max_check * 0.8 + 0.5)
                        ip_wiki = ny1.encode('ascii','ignore')
                        # Noun
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        ent = re.findall(r'NE\s(.*?)/', str(name))
                        chunkGram = r"""Noun: {<NN\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        NNnoun = chunkParser.parse(p)
                        db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                        # Verbs
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<VB\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        VBverb = chunkParser.parse(p)
                        db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                        # Adjective
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<JJ\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        JJAdj = chunkParser.parse(p)
                        db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                        # Number
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Number: {<CD\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        CDNumber = chunkParser.parse(p)
                        db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                        db_total = db_noun + db_adj + db_verb + db_number
                        db_total = list(set(db_total))

                        count = 0
                        for ip in ip_total:
                            for dbs in db_total:
                                db_plural = re.escape(dbs) + 's?'
                                ip_plural = re.escape(ip) + 's?'
                                if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if ip == dbs:
                                    count = count - 1

                        if max_value <= count:
                            display_ans = ny1

                        if display_ans == '':
                            answer = 0
                        else:
                            answer = 1

                        if answer == 0:
                            flash('Answer not precise in wikipedia Interet', 'Answer')
                            flash(ny1, 'Answer')
                            wikiflag = 1
                        else:
                            display_ans=ny1
                            flash(ny1, 'Answer')
                            ny2 = wikipedia.page(data1)
                            flash('Source: '+ ny2.url, 'Answer')
                            #u = models.Wikipedia(question=data1, answer=ny1)
                            #db.session.add(u)
                            #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer  ' + str(inst), 'Answer')
                        display_ans = 'Your question is either out of scope of very trival for me to answer'
            else:
                flash(display_ans, 'Answer')
        #s = models.Chats.query.all()
        #for chat in reversed(s):
            #flash('Question: ' + chat.question, 'Display')
            #flash('Answer: ' + chat.answer , 'Display')
            #flash('.', 'Display')
        #u = models.Chats(question=data1, answer=display_ans)
        #db.session.add(u)
        #db.session.commit() 
        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)