def pack_it_in(data, counter_max): ''' Generate new data based on old and put it into dict. ''' for item in indeed_sections: newtext = [] with open('data/ROLES.pickle', 'rb') as f: roles = pickle.load(f) with open('data/{}.pickle'.format(item), 'rb') as f: readytext = pickle.load(f) #print(readytext[0]) #exit() mk = markovgen.Markov(readytext) counter = 1 while counter < counter_max: mkroles = markovgen.Markov(roles) newrole = mkroles.generate_markov_text() if item == 'Work Experience': roleline = '<h3 style="margin:5px 0 5px 0;">{}</h3>'.format(newrole) else: roleline = '' line = mk.generate_markov_text() + '<br>' newtext.append(roleline+line) counter = counter + 1 data[item] = ' '.join(newtext) if args.verbose: print(data[item]) return data
def markovbooks(books): """Mix Books Together""" books = sorted(books.iteritems(), key=lambda (x, y): y['len']) max_len = books[-1][1]['len'] avg = books[0][1]['len'] + max_len / 2 output = "" chapters = ["" for i in range(max_len)] word_count = 0 chapters_count = 0 for book in books: chapter_count = 0 for chapter in book[1]['chapters']: f = open(chapter, 'r') content = f.read() if len(content) > 0: words = content[3:].split(' ') word_count += len(words) chapters[chapter_count] += " ".join(words) chapter_count += 1 f.close() chapters_count += chapter_count avg = (word_count / len(books)) / chapters_count for chapter in chapters: print chapter print "##########" markov = markovgen.Markov(StringIO(re.sub(r'\n', '', chapter))) output += "\n\n\n\n" output += markov.generate_markov_text(avg) output_txt('mark_book.txt', output.strip())
def generateMsg(): """ Generates random message consisting of 15 words """ inputText = open('jeeves.txt') markov = mg.Markov(inputText) text = markov.generate_markov_text(15) + '\n' return text
def _get_markov(self, irc, channel): if channel not in self._markovs: m = markovgen.Markov() self._markovs[channel] = m self._load_from_channellogger(irc, channel, m) else: m = self._markovs[channel] return m
def setUp(self): self.db = ScriplerMongoDB() self.markov = markovgen.Markov(open("jeeves.txt")) self.text1 = self.markov.generate_markov_text(10000) self.text2 = self.markov.generate_markov_text(10000) self.name1 = self.markov.generate_markov_text(10) self.name2 = self.markov.generate_markov_text(10) self.name3 = self.markov.generate_markov_text(10) self.createUsers()
def __init__(self): with open('data.json') as data: d = json.load(data) # print(d[0]['full_text']) self.m = markovgen.Markov() for tweet in d: try: tweet_text = tweet['full_text'] except: pass # some tweets might not have full_text attributes # print(tweet_text) self.m.feed(tweet_text)
def generate_role(roles): ''' Generate role on keypress. ''' i = 'yes' mk = markovgen.Markov(roles) while i != 'q': newrole = mk.generate_markov_text() mk.feed(newrole) print('Mr Markov offers role: ' + OKGREEN + '{}'.format(newrole) + ENDC) i = input('press any key to generate another role or "q" to exit...\n') print(mk.available_seeds()) return
def markovshorts(shorts): """Mix Shorts Together""" text = "" word_count = 0 for short in shorts: f = open(short, 'r') content = f.read() words = content.split(' ') word_count += len(words) text += " ".join(words) f.close() avg = word_count / len(shorts) markov = markovgen.Markov(StringIO(text)) output = markov.generate_markov_text(avg) output_txt('mark_short.txt', output)
def markov(bot, trigger): """Returns a markov chain composed of words from current and used triggers.""" chain = 3 size = 42 if trigger.group(2): data = trigger.group(2).split() try: chain = int(data[0]) except ValueError: return say(bot,"Invalid markov chain length .") except AttributeError: return say(bot,"Invalid input.") if (chain==0): return say(bot,"...") if (chain>10 or chain<2): return say(bot,"Please specify a chain length from 2 to 10.") if len(data)>1: try: size = int(data[1]) except ValueError: return say(bot,"Invalid markov size.") except AttributeError: return say(bot,"Invalid input.") if (size<1 or size>200): return say(bot,"Please specify a markov size from 1 to 200.") words = [] for input in shared_var['used_list']: if not input['blank']: words.append(input['text']) for input in shared_var['trigger_list']: if not input['blank']: words.append(input['text']) markov = markovgen.Markov(words,chain) text = markov.generate_markov_text(size) text = text[:1].upper() + text[1:] if text[-1] not in ['.','!','?']: if text[-1] in [',',':',';']: text = text[:-1]+'.' else: text = text + '.' if (size == 4): text = '"'+text[:-1]+'," Scion whispered.' return say(bot,text,3)
def makeFortune(): mk = markovgen.Markov(markovorig) line = mk.generate_markov_text() #remove punctuation exclude = ['"', '(', ')', ';'] line = ''.join(ch for ch in line if ch not in exclude) #make line lowercase, add period at end line = line.lower() line = line.split(". ") mfortune = [] for l in line: l += "." mfortune.append(l.capitalize()) mfortune = ' '.join(mfortune) return (mfortune)
def add_entry(): #g.db.execute('insert into entries (title, text) values (?, ?)', [request.form['title'], request.form['text']]) #g.db.commit() #spacedJobTitle = request.form['jobtitle'] jobTitle = request.form['jobtitle'].title() #jobTitle = request.form['jobtitle'].replace(" ", "+") scraping = False try: newJob, jobtitleFilename, jobbulletFilename = readJobs.existingJob(jobTitle, scraping) if newJob: readJobs.mainReadJobs(jobTitle, jobtitleFilename, jobbulletFilename, 5) #creating the opening and closing jobfile = codecs.open(jobtitleFilename, encoding='utf-8') markov = markovgen.Markov(jobfile) sentences = random.randint(2, 3) theOpening = markov.generate_markov_text(sentences) flash(theOpening,'theOpening' ) sentences = random.randint(2, 3) theClosing = markov.generate_markov_text(sentences) flash(theClosing,'theClosing') if( os.path.isfile(jobbulletFilename) ): bulletHeader = [ 'Key Responsibilities', 'Duties', 'Key Duties and Responsibilities', 'Objectives', 'Qualifications', 'Main Responsibilities'] flash(random.choice(bulletHeader),'bulletHeader') #creating in the bullet points file = codecs.open(jobbulletFilename, encoding='utf-8') markovBullets = markovgen.MarkovBullets(file) flash(jobTitle,'jobtitleMsg' ) sentences = random.randint(5, 7) theBullets = markovBullets.generate_markov_text(sentences) flash(theBullets,'bullets' ) addInDatabase(jobTitle) except: flash("There was an error in your request....Try Again",'error') return redirect(url_for('show_entries'))
def generateDescription(numWords): file = open("../data/textForMarkov.txt") markov = markovgen.Markov(file) return markov.generate_markov_text(numWords)
################################################################################################################ # 3RD WAY # USING MARKOV CHAIN # IMPORTING LIBRARIES import markovgen # CHOOSING FILE AND NEW FILE org = open("bbob.txt") outfile = open("bbob_markov.txt", "w") # REPEATABLE MARKOV' TEXT GENERATOR newtext = [] mk = markovgen.Markov(org) counter = 0 while counter < 10: # CHANGE 10 TO HOWEVER MANY LINES YOU WANT TO GENERATE line = '\n' + mk.generate_markov_text() # REMOVE PUNCTUATION exclude = ['"','(',')',';'] line = ''.join(ch for ch in line if ch not in exclude) # MAKE LINE LOWERCASE, ADD PERIOD AT END line = line.lower() + "." print (line) newtext.append(line) counter = counter + 1
async def quote(*args): content = open("tenth_sublevel.txt", 'r') markov = markovgen.Markov(content) quote = markov.generate_markov() await client.say(quote)
import markovgen, random # www.gutenberg.org/cache/epub/29765/pg29765.txt webster = open( "C:/Users/Owen/Documents/Coding/PythonProjects/MarkovText/webster.txt") markov = markovgen.Markov(webster, tuple_size=3) text = markov.generate_markov_text(length=300) print(text.replace(". ", ".\n"))
# from different spots in the text. # For instance, if the text contained two lines, # "she has a dog" and "my dog has a tail," # this might generate "my dog has a dog" and "she has a tail." # Housekeeping import markovgen, re, string # Choose original file, new filename original = open('twain.txt') outfile = open('twain_markov.txt', 'w') # Repeatable Markov'd text generator newtext = [] mk = markovgen.Markov(original) counter = 0 while counter < 10: # Change 10 to however many lines you want to generate line = '\n' + mk.generate_markov_text() #remove punctuation exclude = ['"', '(', ')', ';'] line = ''.join(ch for ch in line if ch not in exclude) #make line lowercase, add period at end line = line.lower() + "." print line newtext.append(line) counter = counter + 1
""" This file is used to create a basic set of sentences from which we will be extracting features later. We will create random sentences and save their corresponding features in a hashmap, so that it can be used to create the training and test model """ import markovgen f = open('small.txt') markov = markovgen.Markov(f) l = [] for i in range(10): try: l.append(markov.generate_markov_text()) except: pass f.close() print(l)
if len(self.words) < 3: return for i in range(len(self.words) - 2): yield (self.words[i], self.words[i + 1], self.words[i + 2]) def database(self): for w1, w2, w3 in self.triples(): key = (w1, w2) if key in self.cache: self.cache[key].append(w3) else: self.cache[key] = [w3] def generate_markov_text(self, size=25): seed = randomsntence.randint(0, self.word_size - 3) seed_word, next_word = self.words[seed], self.words[seed + 1] w1, w2 = seed_word, next_word gen_words = [] for i in range(size): gen_words.append(w1) w1, w2 = w2, randomsntence.choice(self.cache[(w1, w2)]) gen_words.append(w2) return ' '.join(gen_words) _file = open(".\\sample.txt") markov = markovgen.Markov(_file) texts = markov.generate_markov_text() print(texts)
# Run me! file = open('corpus') import markovgen markov = markovgen.Markov(file) markov.generate_markov_text()
#!/usr/bin/python3 import markovgen import sys if len(sys.argv) == 1: text = sys.argv[0] else: text = open('./shakespeareSonnets.txt') markov = markovgen.Markov(text) markov.generate_markov_text()