示例#1
0
def render_page():

    my_list = read_file('tintern_abbey.txt')
    chain = MarkovChain(my_list)
    num_words = int(10) - 1
    my_sentence = chain.walk(num_words)

    my_list2 = read_file("the_rime.txt")
    chain2 = MarkovChain(my_list2)
    num_words2 = int(10) - 1
    my_sentence2 = chain2.walk(num_words2)

    return render_template('index.html',
                           sentence=my_sentence,
                           sentence2=my_sentence2)
示例#2
0
def make_the_words():
    # build Histogram
    # my_file = open("./words.txt", "r")
    # # absolute path -> ./file.ext ## more fuctional for live deploy
    # lines = my_file.readlines()
    filename = "transient.txt"
    lines = open(filename, "r").readlines()
    transient_txt_words = []  # word_list

    for line in lines:
        wordslist = line.split(' ')
        for word in wordslist:
            word = word.strip(' . , ;" \n _ ?')

            transient_txt_words.append(word)

    my_histogram = histogram(transient_txt_words)

    # put together words into a sentence
    sentence = ''
    num_words = 10
    ''' # comment out to impliment markov
    for i in range(num_words):
        word = sample_by_frequency(my_histogram)
        sentence = sentence + " " + word '''

    # uncomment to impliment markov
    markovchain = MarkovChain(transient_txt_words)
    sentence = markovchain.walk(num_words)
    return sentence
示例#3
0
def markov(num=0):

    list_of_words = words_list()

    markovChain = MarkovChain(list_of_words)

    sentence = markovChain.walk(10)

    return sentence
示例#4
0
def gen_word():
    my_file = open("./words.txt", "r")
    lines = my_file.readlines()
    my_histogram = histogram(lines)

    sentence = ""
    num_words = 10
    # for i in range(num_words):
    #     word = sample(my_histogram)
    #     sentence += " " + word
    markovchain = MarkovChain(lines)
    sentence = markovchain.walk(num_words)
    return sentence
示例#5
0
def generate_words():
    '''my_histogram = (lines)
    
    sentence = ""
    num_words = 10
    for i in range (num_words):
        word = weighted_sample(my_histogram)
        sentence += " " + word
    return sentence'''

    markovchain = MarkovChain(
        ["one", "fish", "two", "fish", "red", "fish", "blue", "fish"])
    return markovchain.walk(10)
示例#6
0
 def sample(self, outf, nr_frames=1e6, n=3):
     '''Sample using an n-gram into the given file.'''
     chain = MarkovChain(n)
     chain.add_sequence(self.buf)
     gen = chain.walk()
     out = wave.open(outf, 'wb')
     out.setparams(self.params)
     out.setnframes(nr_frames)
     chunk = nr_frames / 100
     for k in xrange(int(nr_frames)):
         if k % chunk == 0:
             print k / chunk, "%"
         out.writeframes(self.repr_to_pcm(next(gen)))
示例#7
0
def render_page():
        
    my_list = read_file('plato.txt')

    chain = MarkovChain(my_list)

    num_words = int(10) - 1
    
    my_sentence = chain.walk(num_words)  

    my_sentence_2 = chain.walk(num_words)

    my_sentence_3 = chain.walk(num_words)

    my_sentence_4 = chain.walk(num_words)

    my_sentence_5 = chain.walk(num_words)
    
    return render_template('index.html', sentence=my_sentence,
                                        sentence2=my_sentence_2,
                                        sentence3=my_sentence_3,
                                        sentence4=my_sentence_4,
                                        sentence5=my_sentence_5)
示例#8
0
def create_sentence(word_num):
    source_text = "nietsche.txt"
    with open(source_text, "r") as file:
        og_text = file.read()

    word_list = og_text.split()

    for index, word in enumerate(word_list):
        word_list[index] = word.rstrip()
    chain = MarkovChain(word_list)
    chain.print_chain()
    sentence_words = []

    sentence = chain.walk(word_num)

    return sentence
示例#9
0
def generate_words():
    my_file = open("./words.txt", "r")
    lines = my_file.readlines()
    my_histogram = histogram(lines)

    word_list = []
    for line in lines:
        for word in line.split():
            word_list.append(word)
    sentence = ""
    num_words = 10
    # for i in range(num_words):
    #     word = sample_by_frequency(my_histogram)
    #     sentence += " " + word
    markovchain = MarkovChain(word_list)
    sentence = markovchain.walk(num_words)
    return sentence
示例#10
0
def generate_words():
    words_list = []
    with open('./EAP.text') as f:
        lines = f.readlines()
        for line in lines:
            for word in line.split():
                words_list.append(word)
    #lines = Dictogram(['one', 'fish', 'two', 'fish', 'red', 'fish', 'blue', 'fish'])
    markovchain = MarkovChain(words_list)
    '''sentence = ""
    num_words = 20
    for i in range(num_words):
        word = lines.sample()
        sentence += " " + word
    return sentence'''
    sentence = markovchain.walk(24)

    return render_template('index.html', sentence=sentence)
示例#11
0
def hello():

    # hs = histogram("words.txt")
    # samp = sample(hs)
    my_file = open("./words.txt", "r")
    lines = my_file.readlines()

    word_list = []

    for line in lines:
        for word in line.split():
            word_list.append(word)

    print(word_list)
    markovchain = MarkovChain(word_list)
    # return samp
    # num_words = 10

    return (markovchain.walk(20))
示例#12
0
def generate_words():
    #Build a histogram
    my_file = histogram("./text.txt")
    lines = my_file.readlines()
    my_histogram = histogram(lines)
    
    word_list = []
    for line in lines: 
        for word in line.split():
            word_list.append(word)

    sentence = ""
    num_words = 10
    # for i in range(num_words):
    #     #sample/frequency goes here
    #     word = sample(my_histogram)
    #     sentence += " " + word
    # return sentence
    markovchain = MarkovChain(word_list)
    sentence = markovchain.walk(num_words)
    return sentence
def generate_words():
    #build a histogram
    my_file = open("words.txt","r")
    lines = my_file.readlines()
    my_histogram = Histogram(lines)
    word_list = []
    for line in lines:
        for word in line.split():
            word_list.append(word)

    word = weighted_sample(my_histogram)
    #return word

    sentence = ""
    num_words = 10
    # for i in range(num_words):
    #     word = weighted_sample(my_histogram)
    #     sentence += " " + word
    markovChain = MarkovChain(word_list)
    sentence = markovChain.walk(num_words)
    print("sentence", sentence)
    return sentence
示例#14
0
def generate_words():
    #build a histogram
    # my_file = open("despacito.txt","r")
    lines = "one fish two fish red fish blue fish"
    my_histogram = histogram(lines)
    word_list = []
    for line in lines:
        for word in line.split():
            word_list.append(word)

    word = sample(my_histogram)
    #return word

    sentence = ""
    num_words = 10
    # for i in range(num_words):
    #     word = weighted_sample(my_histogram)
    #     sentence += " " + word
    markovChain = MarkovChain(word_list)
    sentence = markovChain.walk(num_words)
    print("sentence", sentence)
    return sentence
示例#15
0
def markov():
    word_list = words_list()
    markov_chain = MarkovChain(word_list)
    sentence = markov_chain.walk(10)

    return render_template('index.html', tweet=sentence)
示例#16
0
文件: test.py 项目: beng/markov-chain
def walk_corpus(fname):
        with open(fname, 'r') as f:
                words = f.read().split()
        chain = MarkovChain(5)
        chain.add_sequence(words)
        return chain.walk()
示例#17
0
def hello_world():
    temp = MarkovChain("YOU UNDERSTAND MY NAME AND YOU HEAR IT IN YOUR BONES AND IN YOUR TEETH AND YOU KNOW WHO I AM AND YOU KNOW WHY YOU MUST LISTEN TO ME".split())
    return temp.walk(10)
示例#18
0
def generate_words():
    textFile = open('./text.txt')
    text = textFile.read().split()
    chain = MarkovChain(text)
    sentence = chain.walk(20).capitalize() + '.'
    return render_template('index.html', sentence=sentence)
示例#19
0
def hello_world():
    temp = MarkovChain('one fish two fish red fish blue fish'.split())
    return temp.walk(5)
示例#20
0
def walk_corpus(fname):
    with open(fname, 'r') as f:
        words = f.read().split()
    chain = MarkovChain(5)
    chain.add_sequence(words)
    return chain.walk()
示例#21
0
 def walk_corpus():
     chain = MarkovChain(mc_nodes)
     chain.add_sequence(corpus)
     return chain.walk()