Esempio n. 1
0
def default_hparams(model_name=None):
    hparams = HParams(
        n_vocab=0,
        n_ctx=1024,
        n_embd=768,
        n_head=12,
        n_layer=12,
    )
    if model_name is not None:
        with open(str(models_dir / model_name / 'hparams.json')) as f:
            hparams.override_from_dict(json.load(f))

    return hparams
Esempio n. 2
0
def sample_model(vocab_file="models/gpt2-vocab.json",
                 bpe_file="models/gpt2-merges.txt",
                 model_name='124M',
                 nsamples=1,
                 batch_size=1,
                 length=12,
                 temperature=1,
                 top_k=4,
                 top_p=0,
                 models_dir='models',
                 data_type='fp32'):
    """Run the sample_model.

    :model_name=124M : String, which model to use
    :nsamples=0 : Number of samples to return, if 0, continues to
     generate samples indefinately.
    :batch_size=1 : Number of batches (only affects speed/memory).
    :length=None : Number of tokens in generated text, if None (default), is
     determined by model hyperparameters
    :temperature=1 : Float value controlling randomness in boltzmann
     distribution. Lower temperature results in less random completions. As the
     temperature approaches zero, the model will become deterministic and
     repetitive. Higher temperature results in more random completions.
    :top_k=4 : Integer value controlling diversity. 1 means only 1 word is
     considered for each step (token), resulting in deterministic completions,
     while 40 means 40 words are considered at each step. 0 (default) is a
     special setting meaning no restrictions. 40 generally is a good value.
     :models_dir : path to parent folder containing model subfolders
     (i.e. contains the <model_name> folder)
    """
    np.random.seed(1)
    tf.set_random_seed(1)

    if data_type == 'fp32':
        tf_data_type = tf.float32
    elif data_type == 'fp16':
        tf_data_type = tf.float16
    else:
        assert (False)

    models_dir = os.path.expanduser(os.path.expandvars(models_dir))
    vocab_file = os.path.join(models_dir, model_name, 'encoder.json')
    bpe_file = os.path.join(models_dir, model_name, 'vocab.bpe')
    enc = encoder.get_encoder(vocab_file, bpe_file)
    hparams = HParams(n_vocab=0, n_ctx=1024, n_embd=768, n_head=12, n_layer=12)

    with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
        hparams.override_from_dict(json.load(f))

    if length is None:
        length = hparams.n_ctx
    elif length > hparams.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" %
                         hparams.n_ctx)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(graph=tf.Graph(), config=config) as sess:
        saver = tf.train.import_meta_graph("{}/{}/model.ckpt.meta".format(
            models_dir, model_name))

        lengths = np.random.randint(low=1, high=8, size=batch_size)
        min_start_length = lengths.min()
        max_start_length = lengths.max()
        attention_mask = np.tile(np.tri(min_start_length), (batch_size, 1, 1))

        start_ids = np.ones([batch_size, max_start_length
                             ]) * enc.encoder['<|endoftext|>']
        for i in range(batch_size):
            start_ids[i][0:lengths[i]] = 198
        # User can put some real start ids here, we use '\n' (198) here.

        sess.run(tf.global_variables_initializer())
        print("[INFO] restore the model {}/{}".format(models_dir, model_name))
        saver.restore(sess,
                      ("{}/{}/model.ckpt".format(models_dir, model_name)))

        decoder_args = TransformerArgument(beam_width=1,
                                           head_num=hparams.n_head,
                                           size_per_head=hparams.n_embd //
                                           hparams.n_head,
                                           num_layer=hparams.n_layer,
                                           dtype=tf_data_type,
                                           kernel_init_range=0.00,
                                           bias_init_range=0.00)

        decoding_args = DecodingGpt2Argument(hparams.n_vocab,
                                             enc.encoder['<|endoftext|>'],
                                             enc.encoder['<|endoftext|>'],
                                             length + 2, decoder_args, top_k,
                                             top_p, temperature)

        ckpt_dict = {}
        for var in tf.trainable_variables():
            ckpt_dict[var.name] = var
        decoding_vars = tf.trainable_variables()

        op_output = ft_gpt_op(decoding_vars, decoding_args, batch_size,
                              start_ids, min_start_length, max_start_length,
                              attention_mask)

        generated = 0

        while nsamples == 0 or generated < nsamples:
            op_out = sess.run(op_output)

            for i in range(batch_size):
                generated += 1

                text = enc.decode(op_out[i])
                print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
                print(text)
Esempio n. 3
0
def sample_model(model_name='124M',
                 nsamples=1,
                 batch_size=1,
                 length=12,
                 temperature=1,
                 top_k=4,
                 top_p=0,
                 models_dir='models',
                 data_type='fp32'):
    """Run the sample_model.

    :model_name=124M : String, which model to use
    :nsamples=0 : Number of samples to return, if 0, continues to
     generate samples indefinately.
    :batch_size=1 : Number of batches (only affects speed/memory).
    :length=None : Number of tokens in generated text, if None (default), is
     determined by model hyperparameters
    :temperature=1 : Float value controlling randomness in boltzmann
     distribution. Lower temperature results in less random completions. As the
     temperature approaches zero, the model will become deterministic and
     repetitive. Higher temperature results in more random completions.
    :top_k=4 : Integer value controlling diversity. 1 means only 1 word is
     considered for each step (token), resulting in deterministic completions,
     while 40 means 40 words are considered at each step. 0 (default) is a
     special setting meaning no restrictions. 40 generally is a good value.
     :models_dir : path to parent folder containing model subfolders
     (i.e. contains the <model_name> folder)
    """

    models_dir = os.path.expanduser(os.path.expandvars(models_dir))
    enc = encoder.get_encoder(model_name, models_dir)
    hparams = HParams(n_vocab=0, n_ctx=1024, n_embd=768, n_head=12, n_layer=12)

    with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
        hparams.override_from_dict(json.load(f))

    if length is None:
        length = hparams.n_ctx
    elif length > hparams.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" %
                         hparams.n_ctx)

    # start_ids has shape [batch_size, start_len].flatten()
    # start_ids = [15496, 11, 616, 3290, 468,
    #             15496, 11, 616, 3290, 469,
    #             15496, 11, 616, 3290, 470,
    #             15496, 11, 616, 3290, 471]
    start_ids = [enc.encoder['<|endoftext|>'] for i in range(batch_size)]

    with tf.Session(graph=tf.Graph()) as sess:
        saver = tf.train.import_meta_graph("{}/{}/model.ckpt.meta".format(
            models_dir, model_name))
        print("[INFO] restore the model {}/{}".format(models_dir, model_name))
        saver.restore(sess,
                      ("{}/{}/model.ckpt".format(models_dir, model_name)))

        if data_type == 'fp32':
            tf_data_type = tf.float32
        elif data_type == 'fp16':
            tf_data_type = tf.float16
        else:
            assert (False)

        decoder_args = TransformerArgument(beam_width=1,
                                           head_num=hparams.n_head,
                                           size_per_head=hparams.n_embd //
                                           hparams.n_head,
                                           num_layer=hparams.n_layer,
                                           dtype=tf_data_type,
                                           kernel_init_range=0.00,
                                           bias_init_range=0.00)

        decoding_args = DecodingGpt2Argument(hparams.n_vocab,
                                             enc.encoder['<|endoftext|>'],
                                             enc.encoder['<|endoftext|>'],
                                             length + 2, decoder_args, top_k,
                                             top_p, temperature)

        ckpt_dict = {}
        for var in tf.trainable_variables():
            ckpt_dict[var.name] = var
        decoding_vars = tf.trainable_variables()

        op_output = ft_gpt2_op(decoding_vars, decoding_args, batch_size,
                               start_ids)

        generated = 0

        while nsamples == 0 or generated < nsamples:
            print("[INFO] FT op time: {}".format(
                time_test(sess, op_output, iterations=5, warmup=True)))
            op_out = sess.run(op_output)

            for i in range(batch_size):
                generated += 1

                text = enc.decode(op_out[i][1:])
                print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
                print(text)