コード例 #1
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Sample from a trained SeqGAN model.')
    parser.add_argument('sample_len', metavar='N', type=int,
                        help='length of sample to generate')
    parser.add_argument('-t', '--dictionary', default='dictionary.pkl',
                        type=str, help='path to dictionary file')
    parser.add_argument('-d', '--logdir', default='model/', type=str,
                        help='directory of the trained model')
    parser.add_argument('-c', '--only_cpu', default=True, action='store_true',
                        help='if set, only build weights on cpu')
    args = parser.parse_args()

    if not os.path.exists(args.dictionary):
        raise ValueError('No dictionary file found: "%s". To build it, '
                         'run train.py' % args.dictionary)

    _, rev_dict = utils.get_dictionary(None, dfile=args.dictionary)
    num_classes = len(rev_dict)

    sess = tf.Session()
    model = SeqGAN(sess,
                   num_classes,
                   logdir=args.logdir,
                   only_cpu=args.only_cpu)
    model.build()
    model.load(ignore_missing=True)

    g = model.generate(args.sample_len)
    print('Generated text:', utils.detokenize(g, rev_dict))
コード例 #2
0
    root.setLevel(logging.DEBUG)

    dictionary, rev_dict = utils.get_dictionary(args.text)
    num_classes = len(dictionary)

    iterator = utils.tokenize(args.text,
                              dictionary,
                              batch_size=args.batch_size,
                              seq_len=args.seq_len)

    sess = tf.Session()
    model = SeqGAN(sess,
                   num_classes,
                   logdir=args.logdir,
                   learn_phase=args.learn_phase,
                   only_cpu=args.only_cpu)
    model.build()
    model.load(ignore_missing=True)

    for epoch in xrange(1, args.num_epochs + 1):
        for step in xrange(1, args.num_steps + 1):
            logging.info('epoch %d, step %d', epoch, step)
            model.train_batch(iterator.next())

        # Generates a sample from the model.
        g = model.generate(1000)
        print(utils.detokenize(g, rev_dict))

        # Saves the model to the logdir.
        model.save()
コード例 #3
0
ファイル: generate.py プロジェクト: chimamedia/SeqGAN-chainer
generator = SeqGAN(vocab_size=3000, emb_dim=128, hidden_dim=128,
                   sequence_length=40, start_token=0, lstm_layer=2
                   ).to_gpu()

batch_size = 10000


def progress_report(count, start_time, batch_size):
    duration = time.time() - start_time
    throughput = count * batch_size / duration
    sys.stderr.write(
        '\rtrain {} updates ({} samples) time: {} ({:.2f} samples/sec)'
            .format(count, count * batch_size,
                    str(datetime.timedelta(seconds=duration)).split('.')[0], throughput))

negative = []

# pool=None
st = time.time()
for x in range(30000 // batch_size):
    negative.append(generator.generate(batch_size))
    progress_report(x, st, batch_size)
t = time.time()
print()
print(t - st)
for x in range(30000 // batch_size):
    negative.append(generator.generate(batch_size, pool))
    progress_report(x, t, batch_size)
print(time.time() - t)