示例#1
0
SOS_token = 0
EOS_token = 1
MAX_LENGTH = 10
lang_dataset = TextDataset()
# batch_size = 1
lang_dataloader = DataLoader(lang_dataset, shuffle=True)

# input words num
input_size = lang_dataset.input_lang_words
hidden_size = 256
# output words num
output_size = lang_dataset.output_lang_words
total_epoch = 20

encoder = EncoderRNN(input_size, hidden_size)
decoder = DecoderRNN(hidden_size, output_size, n_layers=2)
attn_decoder = AttnDecoderRNN(hidden_size, output_size, n_layers=2)
use_attn = True

if torch.cuda.is_available():
    encoder = encoder.cuda()
    decoder = decoder.cuda()
    attn_decoder = attn_decoder.cuda()


def show_plot(points):
    plt.figure()
    x = np.arange(len(points))
    plt.plot(x, points)
    plt.show()
示例#2
0
        self.punchline_text.grid(row=3, column=0, columnspan=2, sticky=W)

        self.quit_button = Button(self, width=10)
        self.quit_button["text"] = "Quit"
        self.quit_button["fg"] = "red"
        self.quit_button["command"] = self.quit
        self.quit_button.grid(row=3, column=1, sticky=W)

    def generation_punchline(self):
        setup = self.setup_entry.get()
        split_setup = thu1.cut(setup.strip(), text=True)
        puchline = evaluateTestSet(encoder, decoder, split_setup)
        self.punchline_text.delete(0.0, END)
        self.punchline_text.insert(0.0, puchline)


hidden_size = 384

encoder = EncoderRNN(input_lang.n_words, hidden_size)
decoder = DecoderRNN(hidden_size, output_lang.n_words)

encoder.load_state_dict(torch.load('./encoder.pth'))
decoder.load_state_dict(torch.load('./decoder.pth'))

root = Tk()
root.title("Password")
root.geometry('460x170')
app = MachineJokes(root)
app.mainloop()
root.destroy()
示例#3
0
    def __init__(self,
                 vocabulary_size,
                 input_size,
                 hidden_size,
                 encoder_layer_nums,
                 decoder_layer_nums,
                 max_length,
                 max_sample_length,
                 begin_token,
                 end_token,
                 input_dropout_p=0,
                 dropout_p=0,
                 bidirectional=True,
                 rnn_cell='GRU',
                 use_attention=True,
                 graph_embedding=None,
                 graph_parameter={}):
        super(LineRNNModel, self).__init__()
        self.vocabulary_size = vocabulary_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.encoder_layer_nums = encoder_layer_nums
        self.max_length = max_length
        self.input_dropout_p = input_dropout_p
        self.dropout_p = dropout_p
        self.bidirectional = bidirectional
        self.bidirectional_num = 2 if bidirectional else 1
        self.rnn_cell = rnn_cell
        self.graph_embedding = graph_embedding
        self.graph_parameter = graph_parameter

        self.embedding = nn.Embedding(vocabulary_size, input_size)
        self.input_dropout = nn.Dropout(input_dropout_p)

        if graph_embedding is not None:
            self.graph_encoder = GraphEncoder(hidden_size,
                                              graph_embedding=graph_embedding,
                                              graph_parameter=graph_parameter)

        self.line_encoder = LineRNNEncoderWrapper(
            input_size=input_size,
            hidden_size=hidden_size,
            vocabulary_size=vocabulary_size,
            n_layers=encoder_layer_nums,
            max_length=max_length,
            input_dropout_p=input_dropout_p,
            dropout_p=dropout_p,
            bidirectional=bidirectional,
            rnn_cell=rnn_cell)
        self.code_encoder = EncoderRNN(vocab_size=vocabulary_size,
                                       max_len=max_length,
                                       input_size=input_size,
                                       hidden_size=hidden_size,
                                       input_dropout_p=input_dropout_p,
                                       dropout_p=dropout_p,
                                       n_layers=encoder_layer_nums,
                                       bidirectional=bidirectional,
                                       rnn_cell=rnn_cell,
                                       variable_lengths=False,
                                       embedding=None,
                                       update_embedding=True,
                                       do_embedding=False)
        self.encoder_linear = nn.Linear(hidden_size * self.bidirectional_num,
                                        hidden_size // 2)

        self.position_pointer = PositionPointerNetwork(
            hidden_size=self.bidirectional_num * hidden_size)
        self.decoder = DecoderRNN(vocab_size=vocabulary_size,
                                  max_len=max_sample_length,
                                  hidden_size=hidden_size,
                                  sos_id=begin_token,
                                  eos_id=end_token,
                                  n_layers=decoder_layer_nums,
                                  rnn_cell=rnn_cell,
                                  bidirectional=bidirectional,
                                  input_dropout_p=input_dropout_p,
                                  dropout_p=dropout_p,
                                  use_attention=use_attention)
示例#4
0
        output_sentence = ' '.join(output_words)
        print('<', output_sentence)
        print('')


input_size = lang_dataset.input_lang_words
hidden_size = 256
output_size = lang_dataset.output_lang_words

encoder = EncoderRNN(input_size, hidden_size)
encoder.load_state_dict(torch.load('./encoder.pth'))
if use_attn:
    decoder = AttnDecoderRNN(hidden_size, output_size, n_layers=2)
    decoder.load_state_dict(torch.load('./attn_decoder.pth'))
else:
    decoder = DecoderRNN(hidden_size, output_size, n_layers=2)
    decoder.load_state_dict(torch.load('./decoder.pth'))

if use_cuda:
    encoder = encoder.cuda()
    decoder = decoder.cuda()

evaluateRandomly(encoder, decoder)

if use_attn:
    pair_idx = random.choice(list(range(len(lang_dataset))))
    pairs = lang_dataset.pairs[pair_idx]
    print('>')
    print(pairs[0])
    in_lang, out_lang = lang_dataset[pair_idx]
    output_words, attentions = evaluate(encoder, decoder, in_lang)
示例#5
0
from model.seq2seq import EncoderRNN, DecoderRNN
from preprocess import input_lang, output_lang
from train import trainEpochs
import torch

hidden_size = 384

encoder = EncoderRNN(input_lang.n_words, hidden_size).cuda()
decoder = DecoderRNN(hidden_size, output_lang.n_words).cuda()

trainEpochs(encoder, decoder, 200000, print_every=500, learning_rate=0.001)

print('大吉大利 encoder_decoder 训练完毕......')

torch.save(encoder.state_dict(), './encoder.pth')
torch.save(decoder.state_dict(), './decoder.pth')
from model.seq2seq import AttnDecoderRNN, DecoderRNN, EncoderRNN

SOS_token = 0
EOS_token = 1
MAX_LENGTH = 10
lang_dataset = TextDataset()
lang_dataloader = DataLoader(lang_dataset, shuffle=True)
print()

input_size = lang_dataset.input_lang_words
hidden_size = 256
output_size = lang_dataset.output_lang_words
total_epoch = 20

encoder = EncoderRNN(input_size, hidden_size)
decoder = DecoderRNN(hidden_size, output_size, n_layers=2)
attn_decoder = AttnDecoderRNN(hidden_size, output_size, n_layers=2)
use_attn = True

if torch.cuda.is_available():
    encoder = encoder.cuda()
    decoder = decoder.cuda()
    attn_decoder = attn_decoder.cuda()


def showPlot(points):
    plt.figure()
    x = np.arange(len(points))
    plt.plot(x, points)
    plt.show()