Ejemplo n.º 1
0
def gpt2_pred_input(params, text=None):
    from models.gpt2 import encoder
    enc = encoder.get_encoder(params["encoder_path"])
    tokens = enc.encode(text)
    if len(tokens) > 1024:
        tokens = tokens[:1024]
    t = tf.broadcast_to(tokens, [params["batch_size"], len(tokens)])
    dataset = tf.data.Dataset.from_tensors(t)
    return dataset
Ejemplo n.º 2
0
def gpt2_predict(network, text, params):
    logger = logging.getLogger('tensorflow')

    enc = encoder.get_encoder(params["encoder_path"])
    predictions = network.predict(input_fn=partial(gpt2_pred_input, text=text))

    with tf.gfile.Open(params["predict_path"], "a") as f:
        for i, p in enumerate(predictions):
            p = p["tokens"]
            text = enc.decode(p)
            f.write("=" * 40 + " SAMPLE " + str(i) + " " + "=" * 40 + "\n")
            f.write(text)
            f.write("\n" + "=" * 80 + "\n")

            logger.info("=" * 40 + " SAMPLE " + str(i) + " " + "=" * 40 + "\n")
            logger.info(text)
            logger.info("\n" + "=" * 80 + "\n")
Ejemplo n.º 3
0
# Non TPU setup
if not predict_mode:
    params["batch_size"] = params["train_batch_size"]
else:
    params["batch_size"] = params["predict_batch_size"]
run_config = tf.estimator.RunConfig(
    model_dir=params["model_path"],
    session_config=sess_config,
)

network = tf.estimator.Estimator(model_fn=gpt2_model,
                                 config=run_config,
                                 params=params)

enc = encoder.get_encoder(params["encoder_path"])

while True:
    with ai_integration.get_next_input(
            inputs_schema={"text": {
                "type": "text"
            }}) as inputs_dict:
        # If an exception happens in this 'with' block, it will be sent back to the ai_integration library
        text = inputs_dict['text']
        if isinstance(text, bytes):
            text = text.decode('utf-8')
        predictions = network.predict(
            input_fn=partial(gpt2_pred_input, text=text))

        p = next(predictions)  # return just the first one
        p = p["tokens"]
Ejemplo n.º 4
0
      back_prop=False,
      )
  return [final]
  #return tf.concat([context, final], axis=-1)
  #token = thunk(context)
  #return token

sample_op = tft.tpu_shard(thunk2, inputs=[[context]*8, [length]*8])
r(sample_op, {context: [enc.encode("Hello, my name is")], length: 1},  options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True))



from transformers import GPT2TokenizerFast
enc = GPT2TokenizerFast.from_pretrained('gpt2')

enc = encoder.get_encoder()

params = gpt2.default_hparams()
context = tf.placeholder(tf.int32, shape=[1, None], name="tokens")
fwd = gpt2_rev.model(params=params, X=context, reuse=tf1.AUTO_REUSE)



params = gpt2_rev.default_hparams()
context_fixed = tf1.placeholder(tf.int32, shape=[1, params['n_ctx']+1], name="tokens")
output_fixed = gpt2_rev.model_grad(params=params, X=context_fixed[:, :-1], labels=context_fixed[:, 1:], reuse=tf1.AUTO_REUSE)
context = tf1.placeholder(tf.int32, shape=[1, None], name="tokens")
fwd = gpt2_rev.model(params=params, X=context, reuse=tf1.AUTO_REUSE)
length = tf1.placeholder(tf.int32, shape=[], name="length")
samp = sample.sample_sequence(params=params, length=length, context=context, batch_size=1, temperature=0.7, top_k=40)
#output = gpt2_rev.model_grad(params=params, X=context[..., :-1], labels=context[..., 1:], reuse=tf1.AUTO_REUSE)