示例#1
0
def mine_triples(device, input_file, output_file, use_local_model=False):
    if use_local_model:
        print('loading BERT...')
        bert = BertForMaskedLM.from_pretrained("../models/BertForMaskedLM")
        print('loading GPT2...')
        gpt = GPT2LMHeadModel.from_pretrained("../models/GPT2LMHeadModel")
    else:
        print('loading BERT...')
        bert = BertForMaskedLM.from_pretrained(bert_model)
        print('loading GPT2...')
        gpt = GPT2LMHeadModel.from_pretrained(gpt2_model)
    """
        'concat': KnowledgeMiner(
            os.path.join(data_repo, candidate_file),
            device,
            DirectTemplate,
            bert
        ),
        'template': KnowledgeMiner(
            os.path.join(data_repo, candidate_file),
            device,
            PredefinedTemplate,
            bert,
            grammar=False,
            template_loc=os.path.join(template_repo, single_templates)
        ),
        'template_grammar': KnowledgeMiner(
            os.path.join(data_repo, candidate_file),
            device,
            PredefinedTemplate,
            bert,
            grammar=True,
            template_loc=os.path.join(template_repo, single_templates)
        ),
    """

    knowledge_miners = {
        'coherency':
        KnowledgeMiner(input_file,
                       device,
                       EnumeratedTemplate,
                       bert,
                       language_model=gpt,
                       template_loc=os.path.join(template_repo,
                                                 multiple_templates),
                       use_local_model=use_local_model)
    }

    for template_type in knowledge_miners.keys():
        predictions = run_experiment(template_type, knowledge_miners)
        triples = knowledge_miners[template_type].sentences.tuples
        scored_samples = list(zip(triples, predictions))
        scored_samples.sort(key=lambda x: x[1], reverse=True)
        with open(output_file, "w") as f:
            for triple, pred in scored_samples:
                rel, head, tail = triple
                triple = (rel.lower(), head, tail)
                f.write("\t".join(triple) + "\t" + "{:.5f}".format(pred))
                f.write("\n")
示例#2
0
def get_model(args, device):
    if args.scratch:
        config = GPT2Config(n_ctx=args.context_length,
                            n_positions=args.context_length)
        model = GPT2LMHeadModel(config)
    else:
        model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
    #import torchsummary
    #torchsummary.summary(model, (args.context_length, vocab_size), args.train_batch_size)
    return model.to(device)
示例#3
0
 def __init__(self, type, model_name_or_path="gpt2"):
     super(LM, self).__init__()
     self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
     if type == '345M':
         self.model = GPT2LMHeadModel.from_pretrained('output/')
     elif type == '117M':
         self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
     self.model.to(self.device)
     self.model.eval()
     self.start_token = '<|endoftext|>'
示例#4
0
def init_model(seed=0, model_path='gpt2'):
    '''
    Parameters:
    ----------
    seed : int
        seed number for different ramdomizers
    model_name_or_path : string, optional
        either model name for existing model or path for trained model
    '''
    np.random.seed(seed)
    torch.random.manual_seed(seed)
    torch.cuda.manual_seed(seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained('gpt2')
    model = GPT2LMHeadModel.from_pretrained('gpt2')
    
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_path))
    model = model.module
    
    model.to(device)
    model.eval()
    return model, enc, device
示例#5
0
def run_model():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name_or_path', type=str, default='', help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--max_seq_length", type=int, default=128)
    
    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--max_history", type=int, default=2)

    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=0.9)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)


    device = torch.device("cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #### load the GPT-2 model 
    config = GPT2Config.from_json_file(os.path.join(args.model_name_or_path, 'config.json'))
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = load_model(GPT2LMHeadModel(config), args.load_checkpoint, args, verbose=True)
    model.to(device)
    model.eval()

    history = []
    while True:
        raw_text = input("USR >>> ")
        while not raw_text:
            print('Prompt should not be empty!')
            raw_text = input("USR >>> ")
        if raw_text.lower() == 'quit':
          print('SYS >>> Goodbye!')
          break
        history.append(raw_text)
        context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history],[]) #+ [EOS_ID]
        context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)
        position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)

        out = generate_sequence(model, context_tokens, position_ids=position_ids,
                                length=args.generation_length, temperature=args.temperature, 
                                top_k=args.top_k, top_p= args.top_p) 

        out = out.tolist()                        
        text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii','ignore').decode('ascii')
        print("SYS >>> ", text)
        history.append(text)
        history = history[-(2*args.max_history+1):]
示例#6
0
def get_optimizer(model: GPT2LMHeadModel, data_loader: Any, num_epochs: int,
                  lr: float):
    params = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in params if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params': [p for n, p in params if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_optimization_steps = len(data_loader) * num_epochs

    optimizer = OpenAIAdam(
        optimizer_grouped_parameters,
        lr=lr,
        t_total=num_train_optimization_steps,

        # the following group of parameters is taken from train_gpt2.py
        warmup=0.002,
        max_grad_norm=1.0,
        weight_decay=0.01,
        schedule="warmup_linear",
        b2=.99)
    return optimizer
def fluency_score(rated_a, opt):

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained(opt.pretrained_model_path)
    model = GPT2LMHeadModel.from_pretrained(opt.pretrained_model_path)
    model.to(device)

    model.eval()
    nb_steps, eval_loss, exp_average_loss = 0, 0, None
    score_list = []
    # k = "the book is on the desk. These impressions show , when alive , they had smooth skin , robust limbs with webbed feet , and a ridge of skin on their undersides." tensor(169.6684, device='cuda:0')
    with torch.no_grad():
        for step, s in enumerate(
                rated_a):  # actually here is a batch with batchsize=1
            # Put model in training mode.
            if not s:
                print('space sentence')
                score_list.append(1e6)
                continue
            s = enc.encode(
                s)  # + [50256]  #50256 is the token_id for <|endoftext|>
            batch = torch.tensor([s]).to(device)
            loss = model(batch, lm_labels=batch)  # everage -logp
            # print(loss*len(s))
            eval_loss += loss.item()
            nb_steps += 1

            score_list.append(loss.item())

    cutoff = np.quantile([-t for t in score_list], 0.05)
    modified_rating = np.array(
        [cutoff if -t < cutoff else -t for t in score_list])
    normed_rating = (modified_rating - cutoff) / np.abs(cutoff)
    return normed_rating
示例#8
0
def mine_from_wikipedia(hardware):
    print('loading BERT...')
    bert = BertForMaskedLM.from_pretrained(bert_model)
    print('loading GPT2...')
    gpt = GPT2LMHeadModel.from_pretrained(gpt2_model)

    knowledge_miners = {
        'concat':
        KnowledgeMiner(data_repo + wikipedia_candidates, hardware,
                       DirectTemplate, bert),
        'template':
        KnowledgeMiner(data_repo + wikipedia_candidates,
                       hardware,
                       PredefinedTemplate,
                       bert,
                       grammar=False,
                       template_loc=template_repo + single_templates),
        'template_grammar':
        KnowledgeMiner(data_repo + wikipedia_candidates,
                       hardware,
                       PredefinedTemplate,
                       bert,
                       grammar=True,
                       template_loc=template_repo + single_templates),
        'coherency':
        KnowledgeMiner(data_repo + wikipedia_candidates,
                       hardware,
                       EnumeratedTemplate,
                       bert,
                       language_model=gpt,
                       template_loc=template_repo + multiple_templates)
    }

    for template_type in knowledge_miners.keys():
        run_experiment(template_type, knowledge_miners)
def download_model(name):
    if not name in MODELS:
        raise Exception(str(name) + ' not a model in the list')
    if not exists(PATH):
        print("# ", str(PATH), "not found, creating dir.")
        mkdir(PATH)
    print('# Downloading model: ' + str(name))
    name_path = MODEL_PATH_DICT[name]
    if name == 'word2vec':
        if not exists(join(PATH, name_path)):
            wget.download(
                'https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz'
            )
            shutil.move(name_path, join(PATH, name_path))
            print('# Downloaded word2vec')
        else:
            print('# Already downloaded')
    if name == 'glove':
        if not exists(join(PATH, name_path)):
            wget.download(
                'http://nlp.stanford.edu/data/wordvecs/glove.840B.300d.zip')
            zip = zipfile.ZipFile('./glove.840B.300d.zip')
            zip.extractall()
            _ = glove2word2vec('./glove.840B.300d.txt', join(PATH, name_path))
            print('# Downloaded glove')
        else:
            print('# Already downloaded')
    if name == 'dict2vec':
        if not exists(join(PATH, name_path)):
            wget.download(
                'https://dict2vec.s3.amazonaws.com/dict2vec300.tar.bz2')
            tar = tarfile.open("dict2vec300.tar.bz2")
            tar.extractall()
            tar.close()
            shutil.move(name_path, join(PATH, name_path))
            print('# Downloaded dict2vec')
        else:
            print('# Already downloaded')

    if name == 'conceptnet':
        if not exists(join(PATH, name_path)):
            wget.download(
                'https://conceptnet.s3.amazonaws.com/downloads/2019/numberbatch/numberbatch-en-19.08.txt.gz'
            )
            shutil.move(name_path, join(PATH, name_path))
            print('# Downloaded Conceptnet Numberbatch')
        else:
            print('# Already downloaded')
    if name == 'bert' or name == 'bert-context':
        _ = BertTokenizer.from_pretrained('bert-large-uncased')
        _ = BertModel.from_pretrained(
            'bert-large-uncased').embeddings.word_embeddings.weight.data.numpy(
            )
        print('# Downloaded bert')
    if name == 'gpt2' or name == 'gpt2-context':
        _ = GPT2Tokenizer.from_pretrained('gpt2')
        _ = GPT2LMHeadModel.from_pretrained('gpt2')
        _ = GPT2Model.from_pretrained('gpt2')
        print('# Downloaded gpt-2')
示例#10
0
 def __init__(self, model_name_or_path="gpt2"):
     super(LM, self).__init__()
     self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
     self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
     self.model.to(self.device)
     self.model.eval()
     self.start_token = '<|endoftext|>'
     print("Loaded GPT-2 model!")
示例#11
0
 def __init__(self,GPU, model_name_or_path="gpt2"):
     self.device = torch.device(GPU if torch.cuda.is_available() else "cpu")
     self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
     self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
     self.model.to(self.device)
     self.model.eval()
     self.start_token = '<|endoftext|>'
     print("Loaded GPT-2 model!")
示例#12
0
def run_model():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--nsamples", type=int, default=1)
    parser.add_argument("--batch_size", type=int, default=-1)
    parser.add_argument("--length", type=int, default=-1)
    parser.add_argument("--temperature", type=int, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
    args = parser.parse_args()
    print(args)

    if args.batch_size == -1:
        args.batch_size = 1
    assert args.nsamples % args.batch_size == 0

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
    model.to(device)
    model.eval()

    if args.length == -1:
        args.length = model.config.n_ctx // 2
    elif args.length > model.config.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)

    while True:
        context_tokens = []
        if not args.unconditional:
            raw_text = input("Model prompt >>> ")
            while not raw_text:
                print('Prompt should not be empty!')
                raw_text = input("Model prompt >>> ")
            context_tokens = enc.encode(raw_text)
        generated = 0
        for _ in range(args.nsamples // args.batch_size):
            out = sample_sequence(
                model=model, length=args.length,
                context=context_tokens if not args.unconditional else None,
                start_token=enc.encoder['<|endoftext|>'] if args.unconditional else None,
                batch_size=args.batch_size,
                temperature=args.temperature, top_k=args.top_k, device=device
            )
            out = out[:, len(context_tokens):].tolist()
            for i in range(args.batch_size):
                generated += 1
                text = enc.decode(out[i])
                print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
                print(text)
        print("=" * 80)
        if args.unconditional:
            break
示例#13
0
 def __init__(
         self,
         model_name_or_path="/data/pradeesh/detecting-fake-text/pytorch/"):
     super(LM, self).__init__()
     self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
     self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
     self.model.to(self.device)
     self.model.eval()
     self.start_token = '<|endoftext|>'
     print("Loaded GPT-2 model!")
示例#14
0
    def __init__(self, args):
        super().__init__()

        if args.gpt2_model_dir is not None:
            # load GPT2 model from file
            gpt_model_name = str(args.gpt2_model_dir) + "/"
            dict_file = gpt_model_name
            print("loading GPT2 model from {}".format(gpt_model_name))
        else:
            # load GPT2 model from huggingface cache
            gpt_model_name = args.gpt2_model_name
            dict_file = gpt_model_name

        # Load pre-trained model tokenizer (vocabulary)
        self.tokenizer = GPT2Tokenizer.from_pretrained(dict_file)

        # GPT uses different way to represent BPE then BERT. Namely, the
        # final suffixes are indicated with </w> suffix, while pieces that must
        # be followed are written as is. In BERT the prefixes are written as is
        # while the parts that must follow (not be followed!) have '##' prefix.
        # There is no one-to-one coversion. But at least we may make pieces that
        # may form a full word look the same.
        # Note that we should be very careful now,
        # tokenizer.convert_tokens_to_ids won't work with our vocabulary.

        def convert_word(word):
            if word == GPT2_EOS:
                return word

            if word.startswith('Ġ'):  # the token starts with a whitespace
                return word[1:]

            return f'_{word}_'  # the token not start with a white space.
            # may be not a head of a word,
            # or may be a head of a sentence.

        _, gpt_vocab = zip(*sorted(self.tokenizer.decoder.items()))
        self.vocab = [convert_word(word) for word in gpt_vocab]
        self._init_inverse_vocab()

        # Load pre-trained model (weights)
        self.gpt_model = GPT2LMHeadModel.from_pretrained(gpt_model_name)
        self.gpt_model.eval()
        # print(self.gpt_model.config)

        # Sanity check.
        assert len(self.vocab) == self.gpt_model.config.vocab_size
        #assert 0 == self.gpt_model.config.n_special

        self.eos_id = self.gpt_model.config.eos_token_id
        self.pad_id = self.gpt_model.config.eos_token_id
        self.unk_id = self.gpt_model.config.eos_token_id
        self.bos_id = self.gpt_model.config.bos_token_id
        self.model_vocab = self.vocab
示例#15
0
def init():
    #seed = 42
    #np.random.seed(seed)
    #torch.random.manual_seed(seed)
    #torch.cuda.manual_seed(seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained('gpt2')
    model = GPT2LMHeadModel.from_pretrained('gpt2')
    model.to(device)
    model.eval()
    return enc, model
 def create_gpt2_lm_head(self, config, input_ids, token_type_ids, position_ids,
                                mc_labels, lm_labels, mc_token_ids):
     model = GPT2LMHeadModel(config)
     model.eval()
     loss = model(input_ids, position_ids, token_type_ids, lm_labels)
     lm_logits, presents = model(input_ids, position_ids, token_type_ids)
     outputs = {
         "loss": loss,
         "lm_logits": lm_logits,
         "presents": presents,
     }
     return outputs
示例#17
0
def mine(hardware):
    print('Loading GPT2...')
    gpt = GPT2LMHeadModel.from_pretrained(gpt2_model)

    knowledge_miners = {
        'coherency': KnowledgeMiner(
            data_repo + test_data,
            hardware,
            EnumeratedTemplate,
            language_model = gpt,
            template_loc = template_repo + multiple_templates)
    }

    return run_experiment('coherency', knowledge_miners)
示例#18
0
def get_prob(context, topk, genre, title):
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'  
    device = "cuda" if torch.cuda.is_available() else "cpu"
    tokenizer = tokenization_bert.BertTokenizer(vocab_file='cache/vocab_fine_tuning.txt')

    model_config = pytorch_pretrained_bert.GPT2Config.from_json_file('cache/model_config_single.json')
    model_state_dict = torch.load('cache/model_single/model_epoch_1.pt')

    model = GPT2LMHeadModel(config=model_config)
    model.load_state_dict(model_state_dict)
    model.to(device)
    model.eval()

    batch_size = 1
    temperature = 1

    context_tokens = []

    with open('./cache/label_to_id.json','r',encoding='utf-8') as f:
        title_to_ids = json.load(f)
    try:
        ids = title_to_ids[genre]
        context_tokens.append(ids)
    except:
        ids = title_to_ids['七言律诗']
        context_tokens.append(ids)

    context_tokens.append(100)
    context_tokens.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(title)))
    context_tokens.append(4282) # 4282 is #

    raw_text = context
    if raw_text != "":
        context_tokens.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text)))


    watcher = WatchProb(model=model, context=context_tokens, tokenizer=tokenizer, temperature=temperature, top_k=topk, device=device)
    prob_dis = watcher.show_prob(topk=topk)

    eight_cumu = watcher.show_cumulative(0.8)
    nine_cumu = watcher.show_cumulative(0.9)
    ninefive_cumu = watcher.show_cumulative(0.95)
    prob_dis.append("")
    prob_dis.append("")
    prob_dis.append("0.8累计覆盖: "+str(eight_cumu))
    prob_dis.append("0.9累计覆盖: "+str(nine_cumu))
    prob_dis.append("0.95累计覆盖: "+str(ninefive_cumu))

    return prob_dis
示例#19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size',default=1,type=int,help='Batch size for inference')

    parser.add_argument('--model_name',default='gpt2',type=str,
                        help='Pre-trained model name')
    parser.add_argument('--max_seq_length',default=128,type=int,
                        help='Maximum total input sequence length after tokenization')

    args = parser.parse_args()

    input_ids = torch.zeros([args.batch_size,args.max_seq_length],dtype=torch.long)

    model = GPT2LMHeadModel.from_pretrained(args.model_name)
    torch.onnx.export(model,input_ids,'gpt2_'+'batch'+str(args.batch_size)+'.onnx')
示例#20
0
    def load_model(self,
                   model_path='./cache/model/model_epoch_1.pt',
                   model_config='./cache/model_config.json',
                   device='cpu'):
        # /data/disk1/private/hujinyi/gpt_poem/model_with_title/model_epoch_1.pt
        self.device = "cuda" if torch.cuda.is_available() else "cpu"

        model_config = pytorch_pretrained_bert.GPT2Config.from_json_file(
            model_config)
        model_state_dict = torch.load(model_path)
        model = GPT2LMHeadModel(config=model_config)
        model.load_state_dict(model_state_dict)
        model.to(self.device)
        model.eval()
        self.model = model
示例#21
0
def main():
    LENGTH = -1
    BATCH_SIZE = 1
    NSAMPLES = 18
    TEMPERATURE = 0.5
    TOPK = 5

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    tokenizer = tokenization.BertTokenizer(vocab_file='cache/vocab.txt')
    model_config = pytorch_pretrained_bert.GPT2Config.from_json_file(
        'model_config.json')
    model_state_dict = torch.load('./model.pt')
    model = GPT2LMHeadModel(config=model_config)
    model.load_state_dict(model_state_dict)
    model.to(device)
    model.eval()

    if LENGTH == -1:
        LENGTH = model.config.n_ctx // 2
    elif LENGTH > model.config.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" %
                         model.config.n_ctx)

    while True:
        raw_text = input("Model prompt >>> ")
        while not raw_text:
            print('Prompt should not be empty!')
            raw_text = input("Model prompt >>> ")
        context_tokens = tokenizer.convert_tokens_to_ids(
            tokenizer.tokenize(raw_text))
        generated = 0
        for _ in range(NSAMPLES // BATCH_SIZE):
            out = sample_sequence(model=model,
                                  length=LENGTH,
                                  context=context_tokens,
                                  start_token=None,
                                  batch_size=BATCH_SIZE,
                                  temperature=TEMPERATURE,
                                  top_k=TOPK,
                                  device=device)
            out = out[:, len(context_tokens):].tolist()
            for i in range(BATCH_SIZE):
                generated += 1
                text = tokenizer.convert_ids_to_tokens(out[i])
                print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
                print(text)
        print("=" * 80)
示例#22
0
 def __init__(self, text_sequence, model_type, temperature = 1.0, top_k = 0, batch_size = 1, length = 1, nsamples =1, debug = True):
     self.text_sequence = text_sequence
     #eventually will differentiate between gpt-2, BERT, etc.
     self.model_type = model_type
     model_name = 'gpt2'
     self.debug = debug
     #detect device
     self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     self.temperature = temperature
     self.top_k = top_k
     self.batch_size = batch_size
     self.length = length
     self.nsamples = nsamples
     #create encoder and model
     self.enc = GPT2Tokenizer.from_pretrained(model_name)
     self.model = GPT2LMHeadModel.from_pretrained(model_name)
     self.model.to(self.device)
     self.model.eval()
示例#23
0
    def init(self, model_path, model_checkpoint):
        self.config = GPT2Config.from_json_file(os.path.join(model_path, "config.json"))
        self.tokenizer = GPT2Tokenizer.from_pretrained(model_path)
        self.model = GPT2LMHeadModel(self.config)

        model_state_dict = fix_state_dict_namespace(torch.load(model_checkpoint))

        start_model = self.model
        if hasattr(self.model, "transformer") and all(not s.startswith('transformer.') for s in model_state_dict.keys()):
            print('loading transfomer only')
            start_model = self.model.transformer
        start_model.load_state_dict(model_state_dict)

        if self.fp16:
            self.model.half()

        self.model.to(self.device)
        self.model.eval()
def context_score(questions, answers, opt):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained(opt.pretrained_model_path)
    model = GPT2LMHeadModel.from_pretrained(opt.pretrained_model_path)
    model.to(device)

    model.eval()

    score_list = []
    with torch.no_grad():
        for step, (question, answer) in enumerate(
                zip(questions,
                    answers)):  # actually here is a batch with batchsize=1
            # Put model in training mode.
            if not answer:
                print('space sentence')
                score_list.append(-1e6)

                continue
            joint_enc = enc.encode(
                question + ' ' +
                answer)  # + [50256]  #50256 is the token_id for <|endoftext|>
            q = enc.encode(question)
            batch_joint = torch.tensor([joint_enc]).to(device)
            batch_q = torch.tensor([q]).to(device)

            loss_joint = model(batch_joint,
                               lm_labels=batch_joint)  # everage -logp
            loss_q = model(batch_q, lm_labels=batch_q)

            p_joint = -loss_joint * (len(joint_enc) - 1)
            p_q = -loss_q * (len(q) - 1)

            score = p_joint - (p_q)

            score_list.append(score.item())

    cutoff = np.quantile(score_list, 0.05)
    modified_rating = np.array(
        [cutoff if t < cutoff else t for t in score_list])
    normed_rating = (modified_rating - cutoff) / np.abs(cutoff)
    return normed_rating
def load_model_fromlist(name):
    if not name in MODELS:
        raise Exception(str(name) + ' not a model in the list')
    print('# Loading model: ' + str(name))
    name_path = MODEL_PATH_DICT[name]
    if name == 'word2vec':
        if not exists(join(PATH, name_path)): download_model(name)
        return (gensim.models.KeyedVectors.load_word2vec_format(join(
            PATH, name_path),
                                                                binary=True))
    if name == 'glove':
        if not exists(join(PATH, name_path)): download_model(name)
        return (gensim.models.KeyedVectors.load_word2vec_format(
            join(PATH, name_path)))
    if name == 'dict2vec':
        if not exists(join(PATH, name_path)): download_model(name)
        return (gensim.models.KeyedVectors.load_word2vec_format(
            join(PATH, name_path), binary=False, unicode_errors="ignore"))
    if name == 'conceptnet':
        if not exists(join(PATH, name_path)): download_model(name)
        return (gensim.models.KeyedVectors.load_word2vec_format(
            join(PATH, name_path)))
    if name == 'bert':
        tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
        model = BertModel.from_pretrained(
            'bert-large-uncased').embeddings.word_embeddings.weight.data.numpy(
            )
        return ([model, tokenizer])
    if name == 'bert-context':
        tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
        model = BertModel.from_pretrained('bert-large-uncased',
                                          output_hidden_states=True)
        return ([model, tokenizer])
    if name == 'gpt2':
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2LMHeadModel.from_pretrained(
            'gpt2').transformer.wte.weight.data.numpy()
        return ([model, tokenizer])
    if name == 'gpt2-context':
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2Model.from_pretrained('gpt2', output_hidden_states=True)
        return ([model, tokenizer])
示例#26
0
def run_model():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--nsamples", type=int, default=1)
    parser.add_argument("--batch_size", type=int, default=-1)
    parser.add_argument("--length", type=int, default=-1)
    parser.add_argument("--temperature", type=float, default=1.0)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
    parser.add_argument('--inputs_file', type=str, default=None)
    parser.add_argument('--output_file', type=str, default='results.json')
    parser.add_argument('--do_beam_search', type=bool, default=False)
    args = parser.parse_args()
    print(args)

    if args.batch_size == -1:
        args.batch_size = 1
    assert args.nsamples % args.batch_size == 0

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
    model.to(device)
    model.eval()

    if args.length == -1:
        args.length = model.config.n_ctx // 2
    elif args.length > model.config.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)

    if args.inputs_file is None:
      decode_interactive(model, enc, device, args)
    else:
      decode_from_file(model, enc, device, args)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', type=str, default='openai-gpt',
                        help='pretrained model name')
    parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
    parser.add_argument("--output_dir", default='tuned_gpt2', type=str, required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")
    parser.add_argument('--train_dataset', type=str, default='')
    
    parser.add_argument('--source_eval', type=str, default='')
    parser.add_argument('--target_eval', type=str, default='')
    parser.add_argument('--source_train', type=str, default='')
    parser.add_argument('--target_train', type=str, default='')
    
    
    parser.add_argument('--eval_dataset', type=str, default='')
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--num_train_epochs', type=int, default=10)
    parser.add_argument('--train_batch_size', type=int, default=8)
    parser.add_argument('--effective_batch_size',type=int, default=64)
    parser.add_argument('--eval_batch_size', type=int, default=16)
    parser.add_argument('--max_grad_norm', type=int, default=1)
    parser.add_argument('--learning_rate', type=float, default=6.25e-5)
    parser.add_argument('--warmup_proportion', type=float, default=0.002)
    parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
    parser.add_argument('--weight_decay', type=float, default=0.01)
    parser.add_argument('--lm_coef', type=float, default=0.9)
    parser.add_argument('--n_valid', type=int, default=374)
    parser.add_argument('--bsz', type=int, default = 20)
    parser.add_argument('--bptt', type=int, default = 40)

    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
    args = parser.parse_args()
#    print(args)

    model_type = 'gpt2'


    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

#    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device(type='cuda')
    n_gpu = torch.cuda.device_count()
    logger.info("device: {}, n_gpu {}".format(device, n_gpu))

#    if not args.do_train and not args.do_eval:
#        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        

    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    model = GPT2LMHeadModel.from_pretrained('gpt2').to('cuda')

    model.to(device)


    #file_train = args.train_dataset #'cnn_train.txt'
    #file_eval =  args.eval_dataset #'cnn_valid.txt'
    bptt = args.bptt
    bsz = args.bsz
    

#    X_eval, nbatch_eval = load_dataset(file_eval, tokenizer, bptt, bsz)
#    X_train, nbatch_train =  load_dataset(file_train, tokenizer, bptt, bsz)
    
    batches_eval, labels_eval, nbatch_eval = load_dataset(args.source_eval, args.target_eval, tokenizer, bptt, bsz)
    batches_train, labels_train, nbatch_train =  load_dataset(args.source_train, args.target_train, tokenizer, bptt, bsz)
    
    

    # Prepare optimizer
#    param_optimizer = list(model.parameters())
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    
    print('here 3')
#    num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
    num_train_optimization_steps = nbatch_train * args.num_train_epochs
    optimizer = OpenAIAdam(optimizer_grouped_parameters,
                           lr=args.learning_rate,
                           warmup=args.warmup_proportion,
                           max_grad_norm=args.max_grad_norm,
                           weight_decay=args.weight_decay,
                           t_total=num_train_optimization_steps)

    eval_loss_min = None
    print('here 4')
    model.to(device)

    nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
    model.train()
    for epoch_i in trange(int(args.num_train_epochs), desc="Epoch"):
        tr_loss = 0
        nb_tr_steps = 0
        
        for i_batch in tqdm(list(range(nbatch_train)), desc='Evaluating epoch {}'.format(epoch_i)):
            batch = batches_train[i_batch]#X_train[:, i_batch*bsz:(1+i_batch)*bsz].permute(1,0)
            
            batch = batch.cuda()
            lm_labels = labels_train[i_batch].cuda()
            if batch.numel() == 0:
                break
            
            #loss = model(batch, lm_labels = labels_train[i_batch].cuda())
                            # TRY DOING IT MANUALLY
            loss_fct = CrossEntropyLoss(reduction = 'none')
            lm_logits,_ = model(batch)
            shift_logits = lm_logits[:, :-1, :].contiguous()
            shift_labels = batch[:,1:].contiguous()
            
            shift_labels_mask = (lm_labels[:,1:].contiguous().view(-1) != -1).float()
            
            loss_mat = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                        shift_labels.view(-1))
            loss = (loss_mat*shift_labels_mask).view(-1).sum()/shift_labels_mask.sum() # avg over non-masked indices
            
            loss.backward()
            
            # only step the model if you've gone through 'effective_batch_size' examples
            if (i_batch*args.train_batch_size) % args.effective_batch_size == 0 and i_batch != 0:
                optimizer.step()
                optimizer.zero_grad()
                
            tr_loss += loss.item()
            

            exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
            nb_tr_steps += 1
         
            
            
            ###
            # Evaluations
            ###
            
            
            if i_batch % 1000 == 0: # get eval score
                eval_loss = eval_model(model, nbatch_eval,batches_eval,labels_eval, bsz)
                
                # if eval_loss improves, save model
                if eval_loss_min is None or eval_loss < eval_loss_min:
                    eval_loss_min = eval_loss
                    
                    # save model if eval loss is lower
                    model_to_save = model
                    # If we save using the predefined names, we can load using `from_pretrained`
                    output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
                    output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        
                    torch.save(model_to_save.state_dict(), output_model_file)
                    to_json_file(model_to_save.config,output_config_file)
                
                print('eval_loss {}',format(eval_loss))
                model.train()
                
            if i_batch % 200 == 0: # try generating from model 
                print("Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0]))

                model.eval()
                if model_type == 'gpt':
                    encode = lambda a: tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a))
                    decode = tokenizer.decode
                elif model_type == 'gpt2':
                    encode = tokenizer.encode
                    decode = tokenizer.decode
                
                generate_from_model(encode, decode, model = model,model_type = model_type)
                model.train()
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, Dataset
from tqdm import trange

import pytorch_pretrained_bert
from data_loader import get_data_loader
from model_sampler import print_samples
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIAdam
from torch.utils.data import DataLoader, Dataset, Subset
model_name = 'gpt2'
enc = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)


model_name = 'gpt2'
enc = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
device='cpu'
beam_width = 130
stopwords = []

def to_list(tensor):
    return list(tensor.cpu().numpy())

def predict(line, max_predictions):
    """Give continuation of the line with at most max_predictions BPE tokens. Returns line extended with predictions of
     the model."""
示例#29
0
def run():
    parser = ArgumentParser()
    parser.add_argument("--model_type", type=str, default="gpt", help="gpt or gpt2")
    parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
    parser.add_argument("--filename", type=str, default="data/instances_dev.pkl", help="File to use for decoding")
    parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
    parser.add_argument("--max_length", type=int, default=50, help="Maximum length of the output utterances")
    parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
    parser.add_argument("--seed", type=int, default=42, help="Seed")
    parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
    parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
    parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__file__)
    logger.info(pformat(args))

    random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    logger.info("Get pretrained model and tokenizer")

    if args.model_type == 'gpt2':
        tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
        model = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
    else:
        tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
        model = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)

    model.to(args.device)
    model.eval()

    data = get_dataset_from_file(tokenizer, args.filename)
    final_output_dict = {
        "version": "squash-2.0",
        "data": [{
            "paragraphs": []
        }]
    }
    question_number = 0
    # For all the instances corresponding one paragraph, model input format is: paragraph + answer + question) 
    # Paragraph will be common accross all the instances.
    # "past" can be used to reuse precomputed hidden state for paragraph in a subsequent predictions
    
    imort copy 
    
    previous_para_index = None
    past = None
    for inst in tqdm.tqdm(data):
        with torch.no_grad():
            current_para_index = inst['para_index']
            if current_para_index != prev_para_index:
                past = None
                currrent_inst = copy.deepcopy(inst)
                # only keeping paragraph details in the instance to get its hidden states 
                current_inst['question'] =  []
                current_inst['answer'] = []
                instance, _ = build_input_from_segments(current_inst,tokenizer,with_eos=False)
                input_ids = torch.tensor(instance['input_ids'][:-2],device=args.device).unsqueeze(0)
                token_type_ids = torch.tensor(instance['token_type_ids'][:-2],device=args.device).unsqueeze(0)
                _,past=model(input_ids,toekn_type_ids=toekn_type_ids,past=past) #output "past" will have paragraph embeddings
            output = sample_sequence(inst, tokenizer, model, args,past)

        original_paragraph = tokenizer.decode(output['paragraph'])
        generated_question = tokenizer.decode(output['question'], skip_special_tokens=True)
        original_answer = tokenizer.decode(output['answer'], skip_special_tokens=True)
        para_index = inst['para_index']

        # Output in a SQUAD-like format with questions clumped together under their parent paragraph
        if len(final_output_dict["data"][0]["paragraphs"]) > para_index:
            # verify whether the paragraph text is identical
            assert original_paragraph == final_output_dict["data"][0]["paragraphs"][para_index]['context']
            # append the question answer pair
            final_output_dict["data"][0]["paragraphs"][para_index]['qas'].append({
                'id': 'question_%d' % question_number,
                'question': generated_question,
                'answers': [{
                    'text': original_answer,
                    'answer_start': original_paragraph.index(original_answer)
                }],
                'class': output['class'],
                'algorithm': output['algorithm'],
                'is_impossible': False
            })
        else:
            # add a new question to the list of QA pairs
            final_output_dict['data'][0]['paragraphs'].append({
                'context': original_paragraph,
                'qas': [{
                    'id': 'question_%d' % question_number,
                    'question': generated_question,
                    'answers': [{
                        'text': original_answer,
                        'answer_start': original_paragraph.index(original_answer)
                    }],
                    'class': output['class'],
                    'algorithm': output['algorithm'],
                    'is_impossible': False
                }]
            })

        question_number += 1

    with open("squash/temp/generated_questions.json", "w") as f:
        f.write(json.dumps(final_output_dict))
示例#30
0
def run_model():
    print(socket.gethostname())

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_name_or_path',
        type=str,
        default='',
        help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--test_file",
                        '-t',
                        type=str,
                        default=None,
                        help='input file for testing')
    parser.add_argument("--output_file",
                        '-o',
                        type=str,
                        default=None,
                        help='output file for testing')
    parser.add_argument("--normalize_data", type=boolean_string, default=True)
    parser.add_argument("--batch_size", '-b', type=int, default=256)
    parser.add_argument("--max_seq_length", type=int, default=512)
    parser.add_argument("--no_token_id", action='store_true')
    parser.add_argument("--no_attn_mask", action='store_true')
    parser.add_argument("--no_eos", action='store_true')

    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument('--unconditional',
                        action='store_true',
                        help='If true, unconditional generation.')
    parser.add_argument('--is_sampling',
                        action='store_true',
                        help='If true, sampling for generation.')
    parser.add_argument('--output_ref',
                        action='store_true',
                        help='If true, output ref')

    #BEAM
    parser.add_argument("--beam",
                        action='store_true',
                        help='If true, beam search')
    parser.add_argument("--beam_width", type=int, default=1)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)
    parser.add_argument('--config', help='JSON config file')
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--cstr_decode', action='store_true')
    parser.add_argument("--bonus", type=float, default=0.0)

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    if args.config is not None:
        # override argparse defaults by config JSON
        opts = json.load(open(args.config))
        for k, v in opts.items():
            if isinstance(v, str):
                # PHILLY ENV special cases
                if 'PHILLY_JOB_DIRECTORY' in v:
                    v = v.replace('PHILLY_JOB_DIRECTORY',
                                  os.environ['PHILLY_JOB_DIRECTORY'])
                elif 'PHILLY_LOG_DIRECTORY' in v:
                    v = v.replace('PHILLY_LOG_DIRECTORY',
                                  os.environ['PHILLY_LOG_DIRECTORY'])
            setattr(args, k, v)

        # command line should override config JSON
        argv = sys.argv[1:]
        overrides, _ = parser.parse_known_args(argv)
        for k, v in vars(overrides).items():
            if f'--{k}' in argv:
                setattr(args, k, v)
        # setattr(args, 'local_rank', overrides.local_rank)


# do normal parsing

    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu
    print(args)

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    config = GPT2Config.from_json_file(
        os.path.join(args.model_name_or_path, 'config.json'))
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = load_model(GPT2LMHeadModel(config),
                       args.load_checkpoint,
                       args,
                       verbose=True)
    model.to(device)
    model.eval()

    if args.test_file:
        eval_dataloader = get_eval_list_same_length_with_order(
            args.test_file, enc, args.batch_size, True)

        model.eval()
        outs = []
        targets = []
        loss_all = []
        ppl_all = []
        sources = []
        conv_ids = []
        with torch.no_grad():
            with tqdm.tqdm(total=len(eval_dataloader), desc=f"Test") as pbar:
                for step, batch in enumerate(
                        tqdm.tqdm(eval_dataloader, desc="Iteration")):

                    new_batch = []
                    for t in batch:
                        if isinstance(t, list):
                            new_batch.append(t)
                        else:
                            new_batch.append(t.to(device))

                    input_ids, position_ids, token_ids, attn_masks, label_ids, context_len, conv_id = new_batch

                    if args.no_token_id:
                        token_ids = None
                    if args.no_eos:
                        input_ids = input_ids[:, :-1]
                    if args.no_attn_mask:
                        attn_masks = None
                    if args.beam:
                        out = beam_search_naive(model,
                                                input_ids,
                                                position_ids=position_ids,
                                                token_type_ids=token_ids,
                                                attn_masks=attn_masks,
                                                length=args.generation_length,
                                                beam_width=args.beam_width,
                                                device=args.device,
                                                use_bonus=args.cstr_decode,
                                                bonus=args.bonus,
                                                enc=enc)
                    else:
                        out = generate_sequence(model,
                                                input_ids,
                                                position_ids=position_ids,
                                                token_type_ids=token_ids,
                                                attn_masks=attn_masks,
                                                length=args.generation_length,
                                                start_token=None,
                                                temperature=args.temperature,
                                                top_k=args.top_k,
                                                sample=args.is_sampling,
                                                use_bonus=args.cstr_decode,
                                                bonus=args.bonus,
                                                enc=enc)

                    sources.extend(input_ids.cpu().numpy())
                    out = out.tolist()
                    outs.extend(out)
                    targets.extend(label_ids)
                    conv_ids.extend(conv_id.cpu().numpy())

                conv_id_map = {conv_ids[i]: i for i in range(len(conv_ids))}
                val_src = [
                    enc.decode(
                        cut_seq_to_eos(s)).encode('utf-8').decode('utf-8')
                    for s in sources
                ]
                #print(len(val_src),len(targets))

                val_set = [
                    enc.decode(s).encode('utf-8').decode('utf-8')
                    for s in targets
                ]
                gen = [
                    enc.decode(
                        cut_seq_to_eos(s)).encode('utf-8').decode('utf-8')
                    for s in outs
                ]

                val_src_orders = [
                    val_src[conv_id_map[i]] for i in sorted(conv_id_map)
                ]
                val_set_orders = [
                    val_set[conv_id_map[i]] for i in sorted(conv_id_map)
                ]
                gen_orders = [gen[conv_id_map[i]] for i in sorted(conv_id_map)]

                print("=" * 40 + " SAMPLE " + "=" * 40)
                src = enc.decode([
                    x for x in input_ids[-1].cpu().numpy() if x != 0
                ]).encode('utf-8').decode('utf-8')
                gt = val_set[-1]
                resp = gen[-1]
                print(
                    f"Source: \t {src} \n Oracle: \t {gt} \n Resp: \t {resp}\n"
                )
                if args.output_file:
                    with open(args.output_file + '.resp.txt', "w") as resp_f:
                        for i, r in enumerate(gen_orders):
                            r = re.sub("\n", "", r)
                            if args.output_ref:
                                # import pdb; pdb.set_trace()
                                resp_f.write(val_src_orders[i] + '\t' +
                                             val_set_orders[i] + '\t' + r +
                                             '\n')
                            else:
                                resp_f.write(r + '\n')
                print("=" * 80)

                sys.stdout.flush()

    else:
        generated = 0
        while True:
            raw_text = input("Model prompt >>> ")
            while not raw_text:
                print('Prompt should not be empty!')
                raw_text = input("Model prompt >>> ")
            context_tokens = enc.encode(raw_text) + [EOS_ID]
            context_tokens = torch.tensor(context_tokens,
                                          device=device,
                                          dtype=torch.long).unsqueeze(
                                              0)  #.repeat(batch_size, 1)
            generated += 1
            position_ids = torch.arange(0,
                                        context_tokens.size(-1),
                                        dtype=torch.long,
                                        device=context_tokens.device)
            token_ids = None if args.no_token_id else torch.zeros_like(
                context_tokens, dtype=torch.long, device=context_tokens.device)
            if args.beam:
                out = beam_search_naive(model,
                                        context_tokens,
                                        position_ids=None,
                                        token_type_ids=token_ids,
                                        length=args.generation_length,
                                        beam_width=args.beam_width,
                                        device=args.device)
            else:
                out = generate_sequence(model,
                                        context_tokens,
                                        position_ids=None,
                                        token_type_ids=token_ids,
                                        length=args.generation_length,
                                        start_token=None,
                                        temperature=args.temperature,
                                        top_k=args.top_k,
                                        sample=args.is_sampling)
            out = out.tolist()
            text = enc.decode(cut_seq_to_eos(
                out[0])).encode('utf-8').decode('utf-8')
            print("=" * 40 + " RESPONSE " + str(generated) + " " + "=" * 40)
            print(text)
            print("=" * 80)
示例#31
0
文件: bert.py 项目: distillbert/code
 def __init__(self):
     self.model = GPT2LMHeadModel.from_pretrained('gpt2')
     self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
     self.model.cuda()
     self.model.eval()
#   "vocab_size": 50257
# }

## Predict hidden states features for each layer
with torch.no_grad():
    hidden_states_1, past = model(tokens_tensor_1)
    print(hidden_states_1.shape)  # torch.Size([1, 6, 768])
    print(len(past), past[0].shape)  # 12 torch.Size([2, 1, 12, 6, 64])
    hidden_states_2, past = model(tokens_tensor_2, past=past)
    print(hidden_states_2.shape)  # torch.Size([1, 8, 768])
    print(len(past), past[0].shape)  # 12 torch.Size([2, 1, 12, 14, 64]); 14 = 8 + 6
    ## past can be used to reuse precomputed hidden state in a subsequent predictions (see beam-search examples in the run_gpt2.py example).

##################################################################
## GPT2LMHeadModel
model = GPT2LMHeadModel.from_pretrained('/Users/coder352/datasets/WordVec/pytorch_pretrained_bert/gpt2/')
model.eval()

## Predict all tokens
with torch.no_grad():
    predictions_1, past = model(tokens_tensor_1)
    predictions_2, past = model(tokens_tensor_2, past=past)
    print(hidden_states_2.shape)  # torch.Size([1, 8, 768])
    print(len(past), past[0].shape)  # 12 torch.Size([2, 1, 12, 14, 64])

## get the predicted last token
predicted_index = torch.argmax(predictions_2[0, -1, :]).item(); print(predicted_index)  # 508
predicted_token = tokenizer.decode([predicted_index]); print(predicted_token)  #  who

##################################################################
## Transformer-XL