def generate_command(self): generated_trump = Generator.generate_seq( self.model_trump, self.tokenizer_trump, self.seq_length_trump, self.trump_seed.get('1.0', 'end-1c'), 10, 30) generated_clinton = Generator.generate_seq( self.model_clinton, self.tokenizer_clinton, self.seq_length_clinton, self.clinton_seed.get('1.0', 'end-1c'), 10, 30) self.trump_text.configure(state='normal') self.trump_text.delete('1.0', 'end') self.trump_text.insert('end', Generator.postprocess(generated_trump)) self.trump_text.configure(state='disabled') self.clinton_text.configure(state='normal') self.clinton_text.delete('1.0', 'end') self.clinton_text.insert('end', Generator.postprocess(generated_clinton)) self.clinton_text.configure(state='disabled')
# Save trained model and tokenizer for later usage Trainer.save_model(model, MODEL_FILENAME) Trainer.save_tokenizer(tokenizer, TOKENIZER_FILENAME) # Second step: Generate Tweets with the trained model # # Open the sequenced data (if not open already) # doc = Trainer.load_file(SEQUENCE_FILENAME) # lines = doc.split('\n') # # Make all letters lower case # lines = [word.lower() for word in lines] # Get sequence length (50 in this case) seq_length = len(lines[0].split()) - 1 # Load model and tokenizer model = Generator.load_trained_model(MODEL_FILENAME) tokenizer = Generator.load_tokenizer(TOKENIZER_FILENAME) # Select and print a random seed text (we can make up our own seed text as well, # better results are expected with this approach tho. The tokenizer ignores word that are not known to him) seed_text = lines[randint(0, len(lines))] print(seed_text + '\n') # Generate and print a sequence with a word length of minimum 10 and maximum 30 generated = Generator.generate_seq(model, tokenizer, seq_length, seed_text, MIN_OUTPUT_WORD_COUNT, MAX_OUTPUT_WORD_COUNT) # Post-process and print generated text print(Generator.postprocess(generated))