예제 #1
0
 def __init__(self, config_path='./models/117M'):
     self.config_path = config_path
     self.text_enc = encoder.get_encoder(self.config_path)
     self.hparams = model.default_hparams()
     with open(os.path.join(self.config_path, 'hparams.json')) as f:
         self.hparams.override_from_dict(json.load(f))
     self.eos_id = self.text_enc.encode('\n')[0]
예제 #2
0
 def __init__(self, input_num, config_path):
     self.hparams = model.default_hparams()
     self.config_path = config_path
     with open(os.path.join(self.config_path, 'hparams.json')) as f:
         self.hparams.override_from_dict(json.load(f))
     self.input_num = input_num
     self.text_enc = encoder.get_encoder(self.config_path)
     self.sos_id = self.text_enc.encode('\t')[0]
     self.eos_id = self.text_enc.encode('\n')[0]
예제 #3
0
def interact_model(box_selection, input_text,
    model_name='124M',
    seed=None,
    nsamples=1,
    batch_size=1,
    length=40,
    temperature=0.7,
    top_k=0,
    top_p=1,
    models_dir='gpt/models',
):
    if st.button('Generate Your Blog Post'):
        st.markdown("Body Text:")
        models_dir = os.path.expanduser(os.path.expandvars(models_dir))
        if batch_size is None:
            batch_size = 1
        assert nsamples % batch_size == 0

        enc = encoder.get_encoder(model_name, models_dir)
        hparams = model.default_hparams()
        with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
            hparams.override_from_dict(json.load(f))

        if length is None:
            length = hparams.n_ctx // 2
        elif length > hparams.n_ctx:
            raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)

        with tf.Session(graph=tf.Graph()) as sess:
            context = tf.placeholder(tf.int32, [batch_size, None])
            np.random.seed(seed)
            tf.set_random_seed(seed)
            # Tokens created from the sample
            output = sample.sample_sequence(
                hparams=hparams,
                length=length,
                # length=1023,
                context=context,
                batch_size=batch_size,
                temperature=temperature, top_k=top_k, top_p=top_p
            )

            saver = tf.train.Saver()
            ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
            saver.restore(sess, ckpt)

            while True:
                raw_text = box_selection + input_text
                # print(raw_text)
                while not raw_text:
                    print('Prompt should not be empty!')
                    st.markdown('Prompt should not be empty!')
                    raw_text = box_selection + "Title: " + input_text + " Body: "
                    # print(raw_text)
                context_tokens = enc.encode(raw_text)
                generated = 0
                for _ in range(nsamples // batch_size):
                    out = sess.run(output, feed_dict={
                        context: [context_tokens for _ in range(batch_size)]
                    })[:, len(context_tokens):]
                    for i in range(batch_size):
                        generated += 1
                        text = enc.decode(out[i])
                        text = text + "."
                        st.markdown(text)
                        st.success("Nice Blog!")
                        st.stop()
예제 #4
0
def interact_model(
    model_name='124M',
    seed=None,
    nsamples=1,
    batch_size=1,
    length=40,
    temperature=0.7,
    top_k=0,
    top_p=1,
    models_dir='gpt/models',
):
    """
    Interactively run the model
    :model_name=124M : String, which model to use
    :seed=None : Integer seed for random number generators, fix seed to reproduce
     results
    :nsamples=1 : Number of samples to return total
    :batch_size=1 : Number of batches (only affects speed/memory).  Must divide nsamples.
    :length=None : Number of tokens in generated text, if None (default), is
     determined by model hyperparameters
    :temperature=1 : Float value controlling randomness in boltzmann
     distribution. Lower temperature results in less random completions. As the
     temperature approaches zero, the model will become deterministic and
     repetitive. Higher temperature results in more random completions.
    :top_k=0 : Integer value controlling diversity. 1 means only 1 word is
     considered for each step (token), resulting in deterministic completions,
     while 40 means 40 words are considered at each step. 0 (default) is a
     special setting meaning no restrictions. 40 generally is a good value.
     :models_dir : path to parent folder containing model subfolders
     (i.e. contains the <model_name> folder)
    """

    st.title("Generate Your Own Text!")
    st.markdown("Click the button down below to generate your own text.")

    if st.button('Run GPT-2'):
        models_dir = os.path.expanduser(os.path.expandvars(models_dir))
        if batch_size is None:
            batch_size = 1
        assert nsamples % batch_size == 0

        enc = encoder.get_encoder(model_name, models_dir)
        hparams = model.default_hparams()
        with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
            hparams.override_from_dict(json.load(f))

        if length is None:
            length = hparams.n_ctx // 2
        elif length > hparams.n_ctx:
            raise ValueError("Can't get samples longer than window size: %s" %
                             hparams.n_ctx)

        with tf.Session(graph=tf.Graph()) as sess:
            context = tf.placeholder(tf.int32, [batch_size, None])
            np.random.seed(seed)
            tf.set_random_seed(seed)
            # Tokens created from the sample
            output = sample.sample_sequence(hparams=hparams,
                                            length=length,
                                            context=context,
                                            batch_size=batch_size,
                                            temperature=temperature,
                                            top_k=top_k,
                                            top_p=top_p)

            saver = tf.train.Saver()
            ckpt = tf.train.latest_checkpoint(
                os.path.join(models_dir, model_name))
            saver.restore(sess, ckpt)

            while True:
                raw_text = input("Model prompt >>> ")
                while not raw_text:
                    print('Prompt should not be empty!')
                    raw_text = input("Model prompt >>> ")
                context_tokens = enc.encode(raw_text)
                generated = 0
                for _ in range(nsamples // batch_size):
                    out = sess.run(
                        output,
                        feed_dict={
                            context:
                            [context_tokens for _ in range(batch_size)]
                        })[:, len(context_tokens):]
                    for i in range(batch_size):
                        generated += 1
                        text = enc.decode(out[i])
                        print("=" * 40 + " SAMPLE " + str(generated) + " " +
                              "=" * 40)
                        print(text)
                print("=" * 80)