def generate_text(self, text, n, seed=None, max_iterations=100): model = self.current_model if self.text_changed or not self.current_model: model = markov.build_model(text, n) self.current_model = model self.text_changed = False new_text = markov.generate(model, n, seed, max_iterations) return new_text
def run(self): while True: event, values = self.window.read() #print(event, values) if event in (None, 'Exit'): break if event == '-B1-': # Generates text based on the markov chain module created in markov.py # the user can choose an order and the maximum iterations. Each generation # is printed on it's own separate line. if len(values['-IN-']) > 1: order = 2 max_iterations = 100 if len(values['-D1-']) > 0 and len(values['-IN2-']) > 0: order = int(values['-D1-']) max_iterations = int(values['-IN2-']) text = self.generate_text(values['-IN-'], order, None, max_iterations) self.window['-OUTPUT-'].print(text, end='') if event == '-FILE-': # Opens a file to read into the input box if values['-FILE-']: with open(values['-FILE-'], 'r') as f: text = f.read() self.window['-IN-'].update(text) if event == '-IN-': self.text_changed = True # Autocomplete like feature that allows dynamic text generation based on # the last few characters of the text. Changes dynamically as the user types. if values['-DG-']: order = 2 max_iterations = 100 if len(values['-D1-']) > 0 and len(values['-IN2-']) > 0: order = int(values['-D1-']) max_iterations = int(values['-IN2-']) order = min(order, len(values['-IN-'][:-1])) # TODO: Add dynamic model updating when user types in new text model = markov.build_model(values['-IN-'][:-1], order, self.current_model) new_text = markov.generate(model, order, values['-IN-'][-order - 1:-1], max_iterations) self.window['-OUTPUT-'].update('') self.window['-OUTPUT-'].print(values['-IN-'][:-1], end='') self.window['-OUTPUT-'].print(new_text[order:], text_color='white', background_color='red', end='') self.current_text = values['-IN-'][:-1] if event == 'Clear': # Clear the output box self.window['-OUTPUT-'].update('') self.window.close()
def test_build_model(self): tokens = list("condescendences") expected = { ('d', 'e'): ['s', 'n'], ('n', 'd'): ['e', 'e'], ('n', 'c'): ['e'], ('s', 'c'): ['e'], ('e', 's'): ['c', None], ('e', 'n'): ['d', 'c'], ('o', 'n'): ['d'], ('c', 'o'): ['n'], ('c', 'e'): ['n', 's']} self.assertEqual(markov.build_model(tokens, 2), expected)
def test_build_model(self): tokens = list("condescendences") expected = { ('d', 'e'): ['s', 'n'], ('n', 'd'): ['e', 'e'], ('n', 'c'): ['e'], ('s', 'c'): ['e'], ('e', 's'): ['c', None], ('e', 'n'): ['d', 'c'], ('o', 'n'): ['d'], ('c', 'o'): ['n'], ('c', 'e'): ['n', 's'] } self.assertEqual(markov.build_model(tokens, 2), expected)
import sys import markov text = sys.stdin.read() model = markov.build_model(text.split(), 4) generated = markov.generate(model, 4) print ' '.join(generated)
app.debug = False #metamorphosis, shakespear , gutenberg, darwin source = 'gutenberg' model = load_model('models/' + source + '.h5') text = open('source_text/' + source + '.txt').read().lower() # read the file and convert to lowercase maxlen = 40 chars = sorted(list(set(text))) # what position does each character exist at in the prev list char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) # markov config words = text.split() markov_model = markov.build_model(words, 2) # helper function to sample an index from a probability array def sample(preds, temperature=0.1): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) # Main Route @app.route("/") def index():
import markov ngram = 3 text = open("./prince.txt").read() model = markov.build_model(text, ngram) print ''.join(markov.generate(model, ngram, None, 500))
import json import spacy from random import choice, randint # spacy import spacy nlp = spacy.load('en') # set up flask app app = Flask(__name__) app.debug = True #metamorphosis, shakespear , gutenberg, darwin text = open('source_text/gutenberg.txt').read().lower() text = text.split() model = markov.build_model(text, 1) # Main Route @app.route("/") def index(): return render_template('index.html') # markov version @app.route('/_markov') def markovme(): instructions = [] routes = json.loads(request.args['routes']) print(routes) for route in routes:
#the twitter stuff APP_KEY = '' APP_SECRET = '' ACCESS_TOKEN = '' ACCESS_SECRET = '' twitter = Twython(APP_KEY, APP_SECRET, ACCESS_TOKEN, ACCESS_SECRET) # read the list of cities and generate city names with open("ut-cities.txt") as f: text_cities = f.read() line_cities = text_cities.strip() model = markov.build_model(line_cities, 2) markov_ut = markov.generate(model, 2) full_list_cities = ''.join(markov_ut) list_ut = full_list_cities.split() def count_letters(word): return len(word) - word.count(' ') final_list = [] for line in list_ut: if (15 > count_letters(line) > 4) and (line.istitle() is True): final_list.append(line)
sys.exit() format = bool(strtobool(args[2])) if args[2:3] else True line_parse = bool(strtobool(args[3])) if args[3:4] else False max_chars = int(args[4]) if args[4:5] else 70 min_chars = int(args[5]) if args[5:6] else 25 """ 1. Load text -> Parse text using MeCab """ parsed_text = markov.parse_text('data/' + filename + '.txt', is_line_messages=line_parse) logger.info('Parsed text.') """ 2. Build model """ text_model = markov.build_model(parsed_text, format=format, state_size=2) logger.info('Built text model.') json = text_model.to_json() open('data/' + filename + '.json', 'w').write(json) # Load from JSON # json = open('input.json').read() # text_model = markovify.Text.from_json(json) """ 3. Make sentences """ try: for _ in range(10): sentence = markov.make_sentences(text_model, start='',
import markov import sys import random time_pool = ["10:24 PM","11:30 PM", "2:35 AM", "7:01 PM ", "11:50 PM ", "2:03 PM", "2:00 AM" ] lines = list() for line in open("drunk_text.txt"): line = line.strip() if(len(line)>0): lines.append(line) # using markov model = markov.build_model(lines,4) generate_result = markov.char_level_generate(lines,4, count = 10) # generate_result = markov.word_level_generate(lines,2, count = 10) print random.choice(time_pool)+"\n " for m in generate_result: print"Dude: \n" + " "+ m print "\n" + ">>>>>>>>>>>>>>>"+"\n" + "\n"+ "\n" # Saved for later # Triallllll # for verizon look model_result = markov.generate(model,5) new_matrix = dict()
#### import markov.py file #### import markov #### read from life and death dataset #### life_text = open("./life_dataset.txt").read() death_text = open("./death_dataset.txt").read() amount = 5; life_model = markov.build_model(life_text, amount) death_model = markov.build_model(death_text, amount) print print 'LIFE' print ''.join(markov.generate(life_model, amount)) print print 'DEATH' print ''.join(markov.generate(death_model, amount))
import sys import markov text = sys.stdin.read() model = markov.build_model(text.split(), 3) generated = markov.generate(model, 3) print ' '.join(generated)