def test_get_quotes(self): language = self.author.language fetch_quotes = wikiquotes.get_quotes(self.author.name, language) # This can be done like this set(self.author.quotes).issubset(set(fetch_quotes)) # But we need to know which elements not_in_fetch_quotes = set(self.author.quotes) - set(fetch_quotes) self.assertSetEqual(set([]), not_in_fetch_quotes, msg = "Author: {}. Quotes not fetch: {}".format(self.author.name, not_in_fetch_quotes))
def test_quotes_length(self): language = self.author.language fetch_quotes = wikiquotes.get_quotes(self.author.name, language) # It is probable that new quotes are added. # The assumption is that quotes aren't removed. # So quotes fetched must be at least the number seen. # We don't want tests to break if there is a new quote. self.assertTrue(len(fetch_quotes) >= len(self.author.quotes)) # No repeated elements self.assertEqual(len(fetch_quotes), len(set(fetch_quotes)))
def get_quotes(person, file_name): with codecs.open(file_name, 'wb', 'utf-8') as f: names=person.split(' ') quotes = wikiquotes.get_quotes(person, 'english') i=0 for quote in quotes: for name in names: if name in quote[0: len(person)] and ':' in quote[0: len(person)]: quotes[i] = quote.split(': ', 1)[-1].strip() i+=1 f.writelines([quote+'\n' for quote in quotes])
def qfind(query): Authorlist = findtitles(query=query) if Authorlist == []: return ["NoAuthorFound", wikiquote.random_titles(max_titles=7)] else: Authorlist.sort() for i in Authorlist: if query.lower() == i.lower(): qt = ranlist(i) if qt == []: Authorlist.remove(i) continue else: Authorlist.remove(i) return ["Success", choice(qt), i, Authorlist] for i in Authorlist: if query.lower() in i.lower() or i.lower() in query.lower(): qt = ranlist(i) if qt == []: Authorlist.remove(i) continue else: Authorlist.remove(i) return ["Success", choice(qt), i, Authorlist] Quotelist = [] Resultlist = [] for i in Authorlist: try: a = wikiquotes.get_quotes(i, "english") except: pass else: Quotelist = a del a try: c = wikiquote.quotes(i, max_quotes=5) except: c = [] for k in c: if k not in Quotelist: Quotelist.append(k) for j in Quotelist: if query.lower() in j.lower(): Resultlist.append([j, i]) if Resultlist != []: Resultlist.sort() Quote = choice(Resultlist) Authorlist.remove(Quote[1]) return ["Success", Quote[0], Quote[1], Authorlist] else: return ["NoQuoteFound", Authorlist]
def create_new_persona_quotes(): # get input text if not request.form or not 'input' in request.form: abort(400) persona = request.form['input'] # print(raw_text) if not persona: json.dumps('Prompt should not be empty!') init(wikiquotes.get_quotes(persona, "english"))#, 4) print("New persona: {}".format(persona)) session['persona'] = persona return ''#redirect('/')
def test_random_quote(self): language = self.author.language fetch_quotes = wikiquotes.get_quotes(self.author.name, language) number_of_quotes = len(fetch_quotes) if number_of_quotes == 1: return if number_of_quotes > 1: for i in range(0, random_tries): random_quote = wikiquotes.random_quote(self.author.name, language) other_random_quote = wikiquotes.random_quote(self.author.name, language) if not random_quote == other_random_quote: return self.fail("Incorrect random quotes for {}".format(self.author.name))
def index(request): """ http://i.imgur.com/tbVr3WT.jpg """ title_response = requests.post('https://en.wikipedia.org/w/api.php', { 'action': 'query', 'list': 'random', 'format': 'json' }) random_title = title_response.json()['query']['random'][0]['title'] # original spec dictates that we use the last quote from a random page, but to minimize api calls we might just use random quotes random_quotes = [] while len(random_quotes) <= 0: quote_response = requests.post('https://en.wikiquote.org/w/api.php', { 'action': 'query', 'list': 'random', 'format': 'json' }) random_quotes = wikiquotes.get_quotes(quote_response.json()['query']['random'][0]['title'], 'english') random_quote = random_quotes[-1].split()[-1] """ the original spec dictates that you take the 5th image from flickr.com/explore/interesting/7days/ not sure how to make the api return a fresh page each time, so we're going to get creative with it """ photo_response = requests.post('https://api.flickr.com/services/rest/', { 'method': 'flickr.interestingness.getList', 'api_key': settings.FLICKR_API_KEY, #'per_page': 5, 'format': 'json', 'nojsoncallback': 1 }) random_photo_json = photo_response.json()['photos']['photo'][random.randint(0,101)] # use ** to unpack dict random_photo_url = 'https://farm{farm}.staticflickr.com/{server}/{id}_{secret}.jpg'.format(**random_photo_json) return render(request, 'album_cover/index.html', { 'random_title': random_title, 'random_quote': random_quote, 'random_photo': random_photo_url })
li = [] for d in data['tags']: li.append(d['name']) print(li) cap = [] count=0 for i in li: count+=1 if(count<2): dd = wikiquotes.get_quotes(i, "english") cap.append(dd[0]) print(cap) tone_analyzer = ToneAnalyzerV3( username='******', password='******', version='2017-09-26') for c in cap: utterances = [{'text': c, 'user': '******'}] rtone= tone_analyzer.tone_chat(utterances) #print(type(rtone)) print(rtone["utterances_tone"][0]["tones"][0]["tone_name"]) if(srtone["utterances_tone"][0]["tones"])
def test_get_quotes_encoding(self): author = Author.random_author() quotes = wikiquotes.get_quotes(author.name, author.language) for quote in quotes: self.assertTrue(language_manager.is_unicode(quote))
# from textblob import TextBlob #Configuration consumer_key = config.consumer_key consumer_secret = config.consumer_secret access_token = config.access_token access_token_secret = config.access_token_secret auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) #List of Speakers leaders = [ 'Eric Thomas', 'Tony Rob', 'Nelson Mandela', 'Zig Ziglar', 'Jim Rohn', 'Wayne Dyer', 'Robin Sharman', 'Brian Tracy' ] try: for leader in leaders: quote = wikiquotes.get_quotes(leader, 'english') for index in range(0, len(quote)): data = wikiquotes.get_quotes(leader, 'english')[index] + ' -~ ' + leader info = (data[:270] + '...') if len(data) > 75 else data api.update_status(info) time.sleep(86400) except Exception as e: print('Error:' + str(e))
internetResult = weatherByCity('London').json() temp = internetResult['main']['temp'] city = internetResult['name'] country = internetResult['sys']['country'] weather = internetResult['weather'][0]['main'] nice_or_not = False led = aiy.voicehat.get_led() led.set_state(aiy.voicehat.LED.ON) recognizer = aiy.cloudspeech.get_recognizer() aiy.audio.say("I can hear every word you say.") aiy.audio.get_recorder().start() QUOTES = wikiquotes.get_quotes('kindness', 'english') while True: text = recognizer.recognize() print(text) if text is None: #aiy.audio.say("Sorry I didn't hear anything.") continue if 'lesson' in text: if nice_or_not == False: aiy.audio.say("You can't teach me anything") else: aiy.audio.say("Maybe another time") elif 'kindness' in text: aiy.audio.say("Error Error,nice Brian online!") nice_or_not = True
def get_image(request): if request.method == 'POST': img = request.FILES.get("image") path = default_storage.save('mrcaption/image', ContentFile(img.read())) print (request.FILES) headers = {'Ocp-Apim-Subscription-Key': '2593b2ee7a9345c7823c2dd3df0f028d', "Content-Type": "application/octet-stream" } image_data = open(path , "rb").read() vision_url = "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0/analyze?visualFeatures=Categories,Tags&language=en" response = requests.post(vision_url, headers=headers, data=image_data) data = response.json() # print (data) # li = [] # for d in data['tags']: # li.append(d['name']) # print(li) # cap = [] # count=0 # for i in li: # count+=1 # if(count<4): # dd = wikiquotes.get_quotes(i, "english") # cap.append(dd[0]) # print(cap) # tone_analyzer = ToneAnalyzerV3( # username='******', # password='******', # version='2017-09-26') # for c in cap: # utterances = [{'text': c, 'user': '******'}] # rtone= tone_analyzer.tone_chat(utterances) # #print(type(rtone)) # sent = rtone["utterances_tone"][0]["tones"][0]["tone_name"] # # if(srtone["utterances_tone"][0]["tones"]) # client = textapi.Client(" 016eb657", " 590dff367360e75235f3753b78ef1488") # sentiment = client.Hashtags({'text': cap[0]}) # hashtag = sentiment['hashtags'] # dictt = {} # dictt = {"quotes" : cap , # "sent" : sent, # "hashtag" : hashtag} # print (dictt) li = [] for d in data['tags']: li.append(d['name']) print(li) cap = [] count=0 for i in li: count+=1 if(count<2): dd = wikiquotes.get_quotes(i, "english") #print(type(dd[0])) qq = dd[0].replace('\n', ' ') cap.append(qq) print(cap) tone_analyzer = ToneAnalyzerV3( username='******', password='******', version='2017-09-26') client = textapi.Client(" 016eb657", " 590dff367360e75235f3753b78ef1488") ret = [] for c in cap: utterances = [{'text': c, 'user': '******'}] rtone= tone_analyzer.tone_chat(utterances) #print(rtone["utterances_tone"][0]["tones"]) sent = rtone["utterances_tone"][0]["tones"] sentiment = client.Hashtags({'text': c}) #print(sentiment['hashtags']) hashtag = sentiment['hashtags'] ret.append({"quote": c,"sent": sent,"hashtag":hashtag}) return JsonResponse(ret, safe=False)
else: print("No results found in Wikiquote database.") print("Please type again an author or a concept") fileinput.close() print() print("Please select one of the results by index starting from 0 to " + str(len(search_result) - 1)) for line in fileinput.input(): try: index = int(line) print() print( str(index) + " ++++++++++++++ " + search_result[index] + " +++++++++++++++++++++++") print() quotes_result = wikiquotes.get_quotes(search_result[index], language) if len(quotes_result) > 0: # process result and prepare for NLTK examples input_sentences = [] for quote in quotes_result: input_sentences.append( quote.replace("«", "").replace("»", "").replace("[no sources]", "")) process_text(input_sentences, number_of_words) break else: print("No quotes are available for " + line) print( "Please select again one of the results by index starting from 0 to " + str(len(search_result) - 1))
else: cli.delete_last_lines(1) print(success_logo + "User found") break elif (what_type == what_c[2]): # Quotes have been chosen cli.delete_last_lines(len(what_c) + 1) # Q : Which author? while True: author_a = cli.type_answer_menu('Of which author?', 'None') author_name = author_a.get('answer') file_name = '../results/quotes_' + author_name + '.txt' # output file print(info_logo + "Searching for the author...") n_quotes = len(wikiquotes.get_quotes(author_name, 'english')) if (n_quotes < 20): cli.delete_last_lines(1) print( warning_logo + "This author doesn't have enough quotes. Please try another one." ) continue else: cli.delete_last_lines(1) print(success_logo + "Author found") break elif (what_type == what_c[3]): # Movies titles have been chosen cli.delete_last_lines(len(what_c) + 1)
import wikiquotes from ohbotMac import ohbot import random ohbot.reset() ohbot.wait(1) print("Sending the request") theList = wikiquotes.get_quotes("Musk", "English") if theList: ohbot.reset() ohbot.wait(1) print("Success") print(theList) print(len(theList)) text = theList[random.randint(0, len(theList) - 1)] print(text) ohbot.say(text) ohbot.wait(1) else: print("Problem connecting to the server") print("Sending the second request") quoteOfTheDay = wikiquotes.quote_of_the_day("English") newText = quoteOfTheDay[0]