root.setLevel(logging.DEBUG) dictionary, rev_dict = utils.get_dictionary(args.text) num_classes = len(dictionary) iterator = utils.tokenize(args.text, dictionary, batch_size=args.batch_size, seq_len=args.seq_len) sess = tf.Session() model = SeqGAN(sess, num_classes, logdir=args.logdir, learn_phase=args.learn_phase, only_cpu=args.only_cpu) model.build() model.load(ignore_missing=True) for epoch in xrange(1, args.num_epochs + 1): for step in xrange(1, args.num_steps + 1): logging.info('epoch %d, step %d', epoch, step) model.train_batch(iterator.next()) # Generates a sample from the model. g = model.generate(1000) print(utils.detokenize(g, rev_dict)) # Saves the model to the logdir. model.save()
root.setLevel(logging.DEBUG) dictionary, rev_dict = utils.get_dictionary(args.text, args.dictionary) num_classes = len(dictionary) iterator = utils.tokenize(args.text, dictionary, batch_size=args.batch_size, seq_len=args.seq_len) sess = tf.Session() model = SeqGAN(sess, num_classes, logdir=args.logdir, learn_phase=args.learn_phase, only_cpu=args.only_cpu) model.build() model.load(ignore_missing=True) for epoch in range(1, args.num_epochs + 1): for step in range(1, args.num_steps + 1): logging.info('epoch %d, step %d', epoch, step) model.train_batch(next(iterator)) # Generates a sample from the model. g = model.generate(1000) print(utils.detokenize(g, rev_dict)) # Saves the model to the logdir. model.save()