def main(): try: mc = MarkovChain() text = mc.add_file('ascii.txt') mc.generate_text(text) except ValueError: print "Oops! There's been an issue"
def main(args): mc = MarkovChain() for a in args[1::]: fd = FetchData(a) mc.add_string(fd.fetch_data()) chain = mc.generate_text() out = (" ").join(chain) print out
def main(args): mc = MarkovChain() for a in args[1::]: fd = FetchData(a) mc.add_string(fd.fetch_data()) chain = mc.generate_text() out = (" ").join(chain) print out
def write_song(doc1, doc2): mc = MarkovChain() verse = 1 mc.add_file(doc1) with open(doc2, "w") as f: while verse < 5: f.write(("Verse %s" + "\n") % (verse)) for x in range(4): line = mc.generate_text() join_line = " ".join(line) f.write(join_line.capitalize() + "\n") verse += 1
def write_song(doc1, doc2): mc = MarkovChain() verse = 1 mc.add_file(doc1) with open(doc2, 'w') as f: while verse < 5: f.write(('Verse %s' + '\n') % (verse)) for x in range(4): line = mc.generate_text() join_line = ' '.join(line) f.write(join_line.capitalize() + '\n') f.write('\n') verse += 1
def get_chain(text, num_words): #grab the text from the web. Data shouldn't contain HTML. #initialize the markov chain with text chain = MarkovChain() chain.add_string(text) #ask user for the number of words #generate the markov chain using the number of words provided #by the user and the text from the website entered. chain_out = chain.generate_text(num_words) stripped_chain = format_chain(chain_out) return add_punct(stripped_chain)
def generateLyrics(artists): allLyrics = [] if USE_DATABASE == True: # TODO return None else: for artist in artists: allLyrics += [item[1] for item in getAllLyrics(artist)] # end for #print allLyrics mc = MarkovChain(2) for song in allLyrics: mc.add_string(song) # end newLyrics = mc.generate_text(80) print newLyrics return newLyrics
''' Created on June 2, 2018 @author: vanessa ''' from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file('songtexte.txt') lyric = mc.generate_text() lyric = ' '.join(lyric)
from markov_python.cc_markov import MarkovChain import fetch_data hounds = MarkovChain() hounds.add_file('D:\Projects\holmes_markov\Hounds.txt') output = hounds.generate_text() output = ' '.join(output) output = output.capitalize() + "." print output
from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file("story.txt") print mc.generate_text()
from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file('/Users/mobpro/desktop/winereviews.txt') mc.add_string("red") print str(mc.generate_text(10))
from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file('C:/Users/dell/PycharmProjects/markov_chain/Wine Reviews.txt') mc.add_string("red") word_lst_1 = mc.generate_text(10) print (word_lst_1) word_lst_2 = [] for word in word_lst_1: if word.isdigit() == False: word_lst_2.append(word) print(word_lst_2) not_allowed = ["cases", "made", "has", "while"] not_all_end = ["with", "and", "or", "on", "direct", "that", "are", "now", "through", "the", "supports", "hard", "a"] word_lst_3 = [] for word in word_lst_2: if word not in not_allowed: word_lst_3.append(word) print(word_lst_3) while word_lst_3[len(word_lst_3) - 1] in not_all_end: del word_lst_3[len(word_lst_3) - 1] print(word_lst_3) str1 = ' '.join(word_lst_3)
import fetch_data from markov_python.cc_markov import MarkovChain print "Welcom to MarkovChain" url = raw_input("Provide an URL:") text1 = fetch_data.getURL(url) mc = MarkovChain() mc.add_string(text1) output = mc.generate_text() print output
from markov_python.cc_markov import MarkovChain import fetch_data lyrics_5 =fetch_data.lyrics_4 #blink_data= open('') '''with open('/Users/lennykogosov/Desktop/markovproject/text.txt', 'r+') as f: text=f.read()''' my_file= open('/Users/lennykogosov/Desktop/markovproject/text.txt', 'r+') for x in lyrics_5: my_file.write(x) #my_file.close() mc= MarkovChain() mc.add_file('/Users/lennykogosov/Desktop/markovproject/text.txt') first_sample= mc.generate_text(20) print first_sample my_file.close()
#runs the project and outputs the text from markov_python.cc_markov import MarkovChain import fetch_data mc = MarkovChain() mc.add_file("sermon_output.txt") initial = mc.generate_text() def convert_to_file(initial): sermon_string = "" for n in initial: sermon_string += (n + " ") with open("Markov_Sermon.txt", "w") as f: f.write(sermon_string) convert_to_file(initial)
from markov_python.cc_markov import MarkovChain q = MarkovChain() a = MarkovChain() q.add_file('/users/agaro/desktop/Talk_bot/questions.txt') a.add_file('/users/agaro/desktop/Talk_bot/answers.txt') counter = 0 while counter <= 10: question = q.generate_text(13) question = ' '.join(question) print('Bot A: ') print question answer = a.generate_text(14) print('Bot B: ') answer = ' '.join(answer) print answer counter += 1
from markov_python.cc_markov import MarkovChain import requests from bs4 import BeautifulSoup, Tag r = requests.get('https://www.familyfriendpoems.com/print/poem/NDA3ODk=') response = r.content soup = BeautifulSoup(response, 'html.parser') tag = soup.find_all('p') + soup.find_all('script') + soup.find_all( 'b') + soup.find_all('title') + soup.find_all('a') for i in tag: i.decompose() markov_input = soup.get_text() mc = MarkovChain() mc.add_string(markov_input) a = mc.generate_text() b = mc.generate_text() c = mc.generate_text() d = mc.generate_text() print(' '.join(a)) print(' '.join(b)) print(' '.join(c)) print(' '.join(d)) print(markov_input)
import fetch_data link = 'http://www.e-reading.club/bookreader.php/1020088/Fomina_-_Pritchi._Daosskie%2C_kitayskie%2C_dzenskie.html' #link='http://www.krotov.info/acts/01/joseph/filon_02.htm' vocabul = fetch_data.load_web_to_text(link, 'voc.txt') print type(vocabul) fetch_data.show_param(str(vocabul)) out_fileS = 'out_text_string.txt' out_file = 'out_text.txt' text_object = MarkovChain(3) text_object.add_file('voc.txt') out_text = text_object.generate_text(200) for i in out_text: try: print type(i), i.decode('utf8').encode('cp866') except: print 'Error' out_text_j = ' '.join(out_text) #with open(out_fileS,'w+') as ofile: # for i in out_text: ofile.write(i+'\n') with open(out_file, 'w+') as ofile: for i in out_text: ofile.write(i + ' ') #print out_text.decode('utf-8')
except ValueError: print print("Sorry, that's not a valid length.") print print("Please, try again!") length = 0 print """ generate text from the Markov Chain (call multiple times to generate additional data) and convert output to the required format """ for current in range(number): while True: try: temporary = mc.generate_text(length) formatted = " ".join(temporary) print("Passage #" + str(current + 1) + ": " + formatted) print break except UnicodeEncodeError: pass # ending message if merely generating quotes if desired_activity == "1": print print("That's all for now! Enjoy the quotes!") """ if playing a game user tries to make an educated guess regarding the period from which the mimic quotes originate
rough = soup.find_all(itemprop='description') results2 = list(map(str, rough[0].contents)) stripped_results2 = [] for line in results2[:-5]: if line != "<br/>": if line: stripped_results2.append(line.strip()) song2 = ''.join(stripped_results2) print("*** New song with more Cowbell!", ) #print (stripped_results2) mc = MarkovChain() mc.add_string(song1) mc.add_string(song2) x = mc.generate_text() x = ' '.join(x) print(x) # This section pulls out the breaks # new_rough = rough.replace("<br/>", "") # print(new_rough) # This printed the lyrics but included div class and <br> print(soup.find_all(itemprop='description')) # This found null print(soup.find_all('lyrics')) # from jerry. Said worked on 2.7 -- print ("".join(map(str, div.contents))) #div = soup.find('div', id='lyrics') #soup = BeautifulSoup(r.content, builder=HTMLParserTreeBuilder()) #rough=soup.find_all(itemprop='description') #print(''.join(map(str, rough[0].contents)))
from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file('Romeo.txt') lines = (mc.generate_text(20)) line = " ".join(lines) newLine = line[0].upper() for char in line[1:]: newLine = newLine + char print(newLine)
from markov_python.cc_markov import MarkovChain if __name__ == '__main__': mc = MarkovChain() mc.add_file('lyrics.txt') lyric = mc.generate_text() lyric = ' '.join(lyric) lyric = ''.join([i for i in lyric if not i.isdigit()]) print '---------------------------' print '---------------------------' print lyric print '---------------------------' print '---------------------------'
from markov_python.cc_markov import MarkovChain #standard parser mc = MarkovChain() #mc.add_file('C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/carols.txt') #mc.add_file('C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/farie tales.txt') #mc.add_file('C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/50Shades.txt') #mc.add_file('C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/lovecraft.txt') #mc.add_file('C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/songdata.txt') mc.add_file( 'C:/Users/Chris/PycharmProjects/CodeCademy/venv/text files/bass_guitar_tabs.txt' ) lyrics = mc.generate_text(20) def printChain(L): s = "\n" while len(L) > 0: s = s + L.pop(0) + " " return s print printChain(lyrics)
from markov_python.cc_markov import MarkovChain import scrapy mc = MarkovChain() #add filepath into mc.add_file(filepath) for files created by scrapy mc.add_file('fetched_text') #.generate_text() should generate a list of words listofwords = [] listofwords = mc.generate_text(5) listofwords = " ".join(listofwords) print listofwords
This file combines the functions of the get_raw_data and cc_markov files to output the final program. ''' # combine the code to collect database information and run the Markov chain program. from markov_python.cc_markov import MarkovChain from get_raw_data import combine_end_users, combine_data, fetch_data from time import sleep # aggregate the raw data into a Markov chain and initialize a MarkovChain class instance raw_data = combine_end_users() emile_mc = MarkovChain() # Run the interactive program. The initial steps are hard coded to reflect the "onboarding" process print "Emile: Hi! I'm Emile, a self improvement AI. Would you like to work on sleep or exercise? \n" user_input = raw_input("User entry (or q to Quit): ") while user_input != 'q': if user_input.lower() == "sleep": print "\nEmile: Do you want to focus on your bed time, your wake time, or simply the number of hours of sleep you get per night? \n" elif user_input.lower() == "exercise": print "\nEmile: Great! As a baseline, how many days did you exercise last week, and what's your ideal number of exercise days?\n" else: emile_ans = emile_mc.generate_text(user_input, raw_data) if emile_ans == "no response": # Emile will only respond to input he has "seen" in the past print "\n","No known response. Try again!","\n" else: print "\n","Emile:",emile_ans[0][0],"\n" if len(emile_ans[0]) > 1: # If Emile's response contains 2+ continuous texts, he will respond again before prompting for user input sleep(1) print "Emile:",emile_ans[0][1],"\n" user_input = raw_input("User entry (or q to Quit): ") # continue until user quits out of the program
from markov_python.cc_markov import MarkovChain import sqlite3 from datetime import datetime mk = MarkovChain() conn = sqlite3.connect('tweets.db') c = conn.cursor() c.execute("SELECT COUNT(DISTINCT tweet) from home_tweets") num_rows = c.fetchone()[0] c.execute("SELECT DISTINCT tweet from home_tweets") tweets = c.fetchmany(num_rows) for tweet in tweets: mk.add_string(list(tweet)[0]) all_status = [] for i in range(int(num_rows / 50)): all_status.append(" ".join(mk.generate_text(20))) with open("data/update_status_" + str(datetime.today()) + ".txt", 'wt', encoding='utf-8') as f: for status in all_status: f.write(status) f.write("\n") pass
from markov_python.cc_markov import MarkovChain import fetch_data text = fetch_data mc = MarkovChain() mc.add_file("C:\Users\Wojtek\Desktop\Python Programming\markov_chain\Data_separate.txt") start = True while start == True: user_input = raw_input("You: ") if user_input == "bye": print "Judy: OK, byee!" start = False else: print "Judy: " + " ".join(mc.generate_text())
key_word_initializer=int(raw_input("How many keywords would you like"\ + "use when reinitializing the Markov Chain: ")) if(key_word_initializer == 0): my_markov_chain = MarkovChain() else: my_markov_chain = MarkovChain(key_word_initializer) print("The Markov Chain has now been reset.") elif( menu_selection == 'a' ): input_processing_selection=raw_input("Do you want to add a (w)ebpage"\ + " or a (f)ile: ") if(input_processing_selection == 'w'): webpage_upload_processing(my_markov_chain) elif(input_processing_selection == 'f'): file_upload_processing(my_markov_chain) elif( menu_selection == 'g'): markov_length=int(raw_input("How many words to you want the markov"\ + " chain to be. Default is 20: ")) if(markov_length > 0): markov_output=my_markov_chain.generate_text(markov_length) else: markov_output=my_markov_chain.generate_text() print(markov_output) program_exit=raw_input("Do you want to e(x)it or (r)epeat: ") if(program_exit == 'x'): break
from fetch import get_data from markov_python.cc_markov import MarkovChain import textwrap hms = int(input('How many songs do you want to mash up?')) def get_songs(): for i in range(hms): str = get_data() #mark.add_string(get_data()) mark = MarkovChain() get_songs() hmw = int(input('How many words do you want in this song?')) output = mark.generate_text(hmw) output = ' '.join(output) print(output, 20)
def generate_data(): raw_text = read_data(data_file) mc = MarkovChain() mc.add_string(raw_text) mcg = mc.generate_text() return mcg
content = request.text soup = BeautifulSoup(content, "html.parser") comments = soup.find_all(id="mid-song-discussion") for comment in comments: comment.extract() return soup.find_all(id="lyrics-body-text")[0].get_text() def grab_hitler(url): request = requests.get(url) content = request.text soup = BeautifulSoup(content, "html.parser") results = soup.find_all('span','bqQuoteLink') for result in results: yield result.get_text() for url in taytay_urls: mc.add_string(grab_taytay(url)) output = mc.generate_text(max_length=20) print "PRE HITLER LYRICS: " print " ".join(output) hitlers = grab_hitler(hitler_urls[0]) for hitler in hitlers: mc.add_string(hitler) h_output = mc.generate_text(max_length=20) print "POST HITLER LYRICS: " print " ".join(h_output)
import os from markov_python.cc_markov import MarkovChain mc = MarkovChain() dirname = os.path.dirname(os.path.abspath(__file__)) book = os.path.join(dirname, 'Monologo_do_Vaqueiro_Gil_Vicente.txt') mc.add_file(book) sentence = mc.generate_text() print(sentence)
from markov_python.cc_markov import MarkovChain file = "pg1661.txt" mc = MarkovChain() mc.add_file(file) print mc.generate_text().items
''' Created on 17/02/2019 @author: dinis ''' from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file("texto.txt") words_list = mc.generate_text() words_list = " ".join(words_list) print (words_list)
from markov_python.cc_markov import MarkovChain from fetch_data import lyric_links lyric_links() markov_lyrics = open("lyrics.txt", "r") mc = MarkovChain() mc.add_string(markov_lyrics.read()) markov_lyrics.close() answer = " ".join(mc.generate_text()) answer = answer.capitalize() print answer
""" This is the launcher to run the New Shakespeare Sonnet Generator """ $ git init $ git status $ git add . $ git status $ git commit -m "Initial commit" from fetchshakespeare import fetch_data from markov_python.cc_markov import MarkovChain import textwrap #creates and adds the randomly generated data to the string def get_words(): mc.add_string(fetch_data()) #generator mc = MarkovChain() get_words() poem_length = 100 output = mc.generate_text(poem_length) output = " ".join(output) print textwrap.fill(output,width=35).capitalize()
from fetch_data import train from markov_python.cc_markov import MarkovChain # The above comes from https://github.com/ReagentX/markov_python import re # Get the text from a webpage data = train('http://www.gutenberg.org/files/135/135-h/135-h.htm') # Clean the page to remove any pesky bits data = re.sub('<.*?>', '', data) data = re.sub('&rsquo', '\'', data) data = re.sub('&rdquo|&ldquo', '\"', data) # Make a markov chain object mc = MarkovChain(4) # Add the data mc.add_string(data) text = mc.generate_text(30) result = '' for word in text: result += word + ' ' print(result)
""" Script for running the project and outputting data """ from markov_python.cc_markov import MarkovChain import fetch_data """ """ # fetch_data.get_text() open_text = open('concatenated.txt') base_text = open_text.read() # base_text = fetch_data.set_text() # base_text = open('concatenated.txt') # print (base_text) # base_text.close() mc = MarkovChain() mc.add_string(base_text) mctext = mc.generate_text() print(mctext)
from markov_python.cc_markov import MarkovChain mc = MarkovChain() mc.add_file("text.txt") print(mc.generate_text(15))
""" This program prompts you to insert lyric urls from azlyrics. It mixes up the lyrics and gives you a new song in the same words by using a Markov Chain Generator """ from fetch_data import get_data from markov_python.cc_markov import MarkovChain import textwrap num = int(raw_input("How many songs do you want to mash up? ")) def get_songs(): for i in range(num): mc.add_string(get_data()) mc = MarkovChain() get_songs() song_length = int(raw_input('How many words do you want in this song? ')) output = mc.generate_text(song_length) output = " ".join(output) print textwrap.fill(output,20)
""" This module will tie together code from fetch_data.py and cc_markov.py. """ from markov_python.cc_markov import MarkovChain import fetch_data my_chain = MarkovChain() my_chain.add_string(fetch_data.get_content('http://wiersze.juniora.pl/tuwim/tuwim_l01.html')) # 3 years my_text = my_chain.generate_text() for word in my_text: print u"word: {}".format(word)