コード例 #1
0
stochastic=False
verbose=1



## tokenize text, change to matrix

text=[]
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)
        #text.append(korean_morph(line))
input=Tokenizer(n_words)
input.fit_on_texts(text)
seq=input.texts_to_sequences(text,n_sentence,n_maxlen)

n_words_x=input.nb_words

text=[]
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)

output=Tokenizer(n_words)
output.fit_on_texts(text)
targets=output.texts_to_sequences(text,n_sentence,n_maxlen)

n_words_y=output.nb_words

targets[:-1]=targets[1:]
コード例 #2
0
n_d = 1000  ## number of hidden nodes in decoder
n_y = dim_word

stochastic = False
verbose = 1

## tokenize text, change to matrix

text = []
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)
        #text.append(korean_morph(line))
input = Tokenizer(n_words_x)
input.fit_on_texts(text)
seq = input.texts_to_sequences(text, n_sentence, n_maxlen)

n_words_x = input.nb_words
'''
text=[]
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)

output=Tokenizer(n_words)
output.fit_on_texts(text)
'''
output = input
#targets=output.texts_to_sequences(text,n_sentence,n_maxlen)
targets = seq
n_words_y = output.nb_words
コード例 #3
0
stochastic=False
verbose=1



## tokenize text, change to matrix

text=[]
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)
        #text.append(korean_morph(line))
input=Tokenizer(n_words_x)
input.fit_on_texts(text)
seq=input.texts_to_sequences(text,n_sentence,n_maxlen)

n_words_x=input.nb_words
'''
text=[]
with open("data/TED2013.raw.en") as f:
    for line in f:
        text.append(line)

output=Tokenizer(n_words)
output.fit_on_texts(text)
'''
output=input
#targets=output.texts_to_sequences(text,n_sentence,n_maxlen)
targets=seq
n_words_y=output.nb_words