def toShakespeare(self): """Given a line of text, return that text in the indicated style. Args: modern_text: (string) The input. Returns: string: The translated text, if generated. """ args = load_arguments() vocab = Vocabulary(self.vocab_path, args.embedding, args.dim_emb) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: model = Model(args, vocab) model.saver.restore(sess, args.model) if args.beam > 1: decoder = beam_search.Decoder(sess, args, vocab, model) else: decoder = greedy_decoding.Decoder(sess, args, vocab, model) batch = get_batch([self.modern_text], [1], vocab.word2id) ori, tsf = decoder.rewrite(batch) out = ' '.join(w for w in tsf[0]) return out
ckpt = tf.train.get_checkpoint_state(args.model) if ckpt and ckpt.model_checkpoint_path: try: print("Trying to restore from a checkpoint...") model.saver.restore(sess, ckpt.model_checkpoint_path) print("Model is restored from checkpoint {}".format( ckpt.model_checkpoint_path)) except Exception as e: print("Cannot restore from checkpoint due to {}".format(e)) pass # set type of decoding (is this after the very last layer?) if args.beam > 1: decoder = beam_search.Decoder(sess, args, vocab, model) else: decoder = greedy_decoding.Decoder(sess, args, vocab, model) if args.train: batches, _, _ = get_batches(train0, train1, vocab.word2id, args.batch_size, noisy=True, unparallel=False, max_seq_len=args.max_seq_length) random.shuffle(batches) start_time = time.time() step = 0 losses = Accumulator( args.steps_per_checkpoint,