Exemplo n.º 1
0
def run_model():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name_or_path', type=str, default='', help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--max_seq_length", type=int, default=128)
    
    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--max_history", type=int, default=2)

    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=0.9)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)


    device = torch.device("cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #### load the GPT-2 model 
    config = GPT2Config.from_json_file(os.path.join(args.model_name_or_path, 'config.json'))
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = load_model(GPT2LMHeadModel(config), args.load_checkpoint, args, verbose=True)
    model.to(device)
    model.eval()

    history = []
    while True:
        raw_text = input("USR >>> ")
        while not raw_text:
            print('Prompt should not be empty!')
            raw_text = input("USR >>> ")
        if raw_text.lower() == 'quit':
          print('SYS >>> Goodbye!')
          break
        history.append(raw_text)
        context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history],[]) #+ [EOS_ID]
        context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)
        position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)

        out = generate_sequence(model, context_tokens, position_ids=position_ids,
                                length=args.generation_length, temperature=args.temperature, 
                                top_k=args.top_k, top_p= args.top_p) 

        out = out.tolist()                        
        text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii','ignore').decode('ascii')
        print("SYS >>> ", text)
        history.append(text)
        history = history[-(2*args.max_history+1):]
Exemplo n.º 2
0
def evaluate_models_from(GPT_saved_models_folder, eval_file, enc, args):
    # Prepare eval data
    eval_dataloader_loss = DynamicBatchingLoader(eval_file, enc,
                                                 args.normalize_data,
                                                 args.eval_batch_size,
                                                 args.max_seq_length)

    eval_dataloader_gen = get_eval_list_same_length(eval_file, enc,
                                                    args.eval_batch_size, True)
    # read eval_loss log file
    eval_loss_log_file = os.path.join(GPT_saved_models_folder, "eval_log.txt")
    min_ckpt_old_perplexity = None
    min_ckpt_new_perplexity = None
    min_old_perplexity = 1000000.0
    min_new_perplexity = 1000000.0

    with open(eval_loss_log_file, "r") as reader:
        head_row = next(reader)
        for line in reader:
            line = line.strip()
            epoch, ckpt_no, _, loss, perplexity = line.split(",")
            epoch = int(epoch)
            ckpt_no = int(ckpt_no) - 1
            loss = float(loss)
            perplexity = float(perplexity)
            print(ckpt_no, loss, perplexity, end="")
            if min_old_perplexity > perplexity:
                min_old_perplexity = perplexity
                min_ckpt_old_perplexity = ckpt_no
            # calculate new loss and perplexity
            model_filename = "GP2-pretrain-step-{}.pkl"
            model = load_model(GPT2LMHeadModel(config),
                               os.path.join(GPT_saved_models_folder,
                                            model_filename.format(ckpt_no)),
                               args,
                               verbose=True)
            eval_loss, eval_ppl = eval_model_loss(model, enc,
                                                  eval_dataloader_loss, epoch,
                                                  args)
            if min_new_perplexity > eval_ppl:
                min_new_perplexity = eval_ppl
                min_ckpt_new_perplexity = ckpt_no
    print("Old best ckpt and perplexity:", min_ckpt_old_perplexity,
          min_old_perplexity)
    print("New best ckpt and perplexity:", min_ckpt_new_perplexity,
          min_new_perplexity)
    return min_ckpt_old_perplexity, min_old_perplexity, min_ckpt_new_perplexity, min_new_perplexity
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--telegram_token', type=str, help='Telegram token')
    parser.add_argument('--model_name_or_path', type=str, default='', help='pretrained model name or path to local checkpoint')
    parser.add_argument('--tokenizer-path', type=str, help='Path to vocabulary')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--max_seq_length", type=int, default=128)

    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--max_history", type=int, default=3)

    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=0.9)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)

    parser.add_argument('--proxy_url', type=str, help='Proxy URL')
    parser.add_argument('--proxy_user', type=str, help='Proxy user')
    parser.add_argument('--proxy_password', type=str, help='Proxy password')

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    device = torch.device("cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #### load the GPT-2 model 
    model_config = GPT2Config.from_json_file(os.path.join(args.model_name_or_path, 'config.json'))
    tokenizer = RubertaTokenizer(vocab_file=args.tokenizer_path)
    model = load_model(GPT2LMHeadModel(model_config), args.load_checkpoint, args, verbose=True)
    model.to(device)
    model.eval()

    # Run Telegram bot
    bot = TelegramBot(model, tokenizer, args)
    bot.run_chat()
Exemplo n.º 4
0
def convert_to_dialogpt(args):
    config = GPT2Config.from_json_file(args.config_path)
    model = load_model(GPT2LMHeadModel(config), None, args, verbose=True)

    model_state_dict = torch.load(args.megatron_checkpoint_path)

    model_state_dict = fix_state_dict_namespace(model_state_dict['model'])
    model_state_dict = fix_model_shapes(model_state_dict)

    start_model = model
    if (hasattr(model, "transformer")
        and all(not s.startswith('transformer.')
                for s in model_state_dict.keys())):
        logger.info('loading transfomer only')
        start_model = model.transformer
    start_model.load_state_dict(model_state_dict)

    torch.save(start_model.state_dict(), args.dialogpt_output_path)
Exemplo n.º 5
0
    #     args.train_input_file, args.train_batch_size,
    #     args.max_seq_length)

eval_dataloader_loss = DynamicBatchingLoader(args.eval_input_file, enc,
                                             args.normalize_data,
                                             args.eval_batch_size,
                                             args.max_seq_length)

eval_dataloader_gen = get_eval_list_same_length(args.eval_input_file, enc,
                                                args.eval_batch_size, True)

#########################################################################
# Prepare Model and Optimizer
##########################################################################
model = load_model(GPT2LMHeadModel(config),
                   args.init_checkpoint,
                   args,
                   verbose=True)
if args.local_rank != -1:
    # when from scratch make sure initial models are the same
    params = [p.data for p in model.parameters()]
    all_reduce_and_rescale_tensors(params,
                                   float(torch.distributed.get_world_size()))

model_parameters = filter(lambda p: p.requires_grad, model.parameters())
total_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info('Number of parameter = {}'.format(total_params))

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'ln']  # no decay for bias and LayerNorm (ln)
optimizer_grouped_parameters = [{
    'params':
Exemplo n.º 6
0
def run_model():
    print(socket.gethostname())

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_name_or_path',
        type=str,
        default='',
        help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--test_file",
                        '-t',
                        type=str,
                        default=None,
                        help='input file for testing')
    parser.add_argument("--output_file",
                        '-o',
                        type=str,
                        default=None,
                        help='output file for testing')
    parser.add_argument("--normalize_data", type=boolean_string, default=True)
    parser.add_argument("--batch_size", '-b', type=int, default=256)
    parser.add_argument("--max_seq_length", type=int, default=512)
    parser.add_argument("--no_token_id", action='store_true')
    parser.add_argument("--no_attn_mask", action='store_true')
    parser.add_argument("--no_eos", action='store_true')

    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument('--unconditional',
                        action='store_true',
                        help='If true, unconditional generation.')
    parser.add_argument('--is_sampling',
                        action='store_true',
                        help='If true, sampling for generation.')
    parser.add_argument('--output_ref',
                        action='store_true',
                        help='If true, output ref')

    #BEAM
    parser.add_argument("--beam",
                        action='store_true',
                        help='If true, beam search')
    parser.add_argument("--beam_width", type=int, default=1)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)
    parser.add_argument('--config', help='JSON config file')
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--cstr_decode', action='store_true')
    parser.add_argument("--bonus", type=float, default=0.0)

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    if args.config is not None:
        # override argparse defaults by config JSON
        opts = json.load(open(args.config))
        for k, v in opts.items():
            if isinstance(v, str):
                # PHILLY ENV special cases
                if 'PHILLY_JOB_DIRECTORY' in v:
                    v = v.replace('PHILLY_JOB_DIRECTORY',
                                  os.environ['PHILLY_JOB_DIRECTORY'])
                elif 'PHILLY_LOG_DIRECTORY' in v:
                    v = v.replace('PHILLY_LOG_DIRECTORY',
                                  os.environ['PHILLY_LOG_DIRECTORY'])
            setattr(args, k, v)

        # command line should override config JSON
        argv = sys.argv[1:]
        overrides, _ = parser.parse_known_args(argv)
        for k, v in vars(overrides).items():
            if f'--{k}' in argv:
                setattr(args, k, v)
        # setattr(args, 'local_rank', overrides.local_rank)


# do normal parsing

    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu
    print(args)

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    config = GPT2Config.from_json_file(
        os.path.join(args.model_name_or_path, 'config.json'))
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = load_model(GPT2LMHeadModel(config),
                       args.load_checkpoint,
                       args,
                       verbose=True)
    model.to(device)
    model.eval()

    if args.test_file:
        eval_dataloader = get_eval_list_same_length_with_order(
            args.test_file, enc, args.batch_size, True)

        model.eval()
        outs = []
        targets = []
        loss_all = []
        ppl_all = []
        sources = []
        conv_ids = []
        with torch.no_grad():
            with tqdm.tqdm(total=len(eval_dataloader), desc=f"Test") as pbar:
                for step, batch in enumerate(
                        tqdm.tqdm(eval_dataloader, desc="Iteration")):

                    new_batch = []
                    for t in batch:
                        if isinstance(t, list):
                            new_batch.append(t)
                        else:
                            new_batch.append(t.to(device))

                    input_ids, position_ids, token_ids, attn_masks, label_ids, context_len, conv_id = new_batch

                    if args.no_token_id:
                        token_ids = None
                    if args.no_eos:
                        input_ids = input_ids[:, :-1]
                    if args.no_attn_mask:
                        attn_masks = None
                    if args.beam:
                        out = beam_search_naive(model,
                                                input_ids,
                                                position_ids=position_ids,
                                                token_type_ids=token_ids,
                                                attn_masks=attn_masks,
                                                length=args.generation_length,
                                                beam_width=args.beam_width,
                                                device=args.device,
                                                use_bonus=args.cstr_decode,
                                                bonus=args.bonus,
                                                enc=enc)
                    else:
                        out = generate_sequence(model,
                                                input_ids,
                                                position_ids=position_ids,
                                                token_type_ids=token_ids,
                                                attn_masks=attn_masks,
                                                length=args.generation_length,
                                                start_token=None,
                                                temperature=args.temperature,
                                                top_k=args.top_k,
                                                sample=args.is_sampling,
                                                use_bonus=args.cstr_decode,
                                                bonus=args.bonus,
                                                enc=enc)

                    sources.extend(input_ids.cpu().numpy())
                    out = out.tolist()
                    outs.extend(out)
                    targets.extend(label_ids)
                    conv_ids.extend(conv_id.cpu().numpy())

                conv_id_map = {conv_ids[i]: i for i in range(len(conv_ids))}
                val_src = [
                    enc.decode(
                        cut_seq_to_eos(s)).encode('utf-8').decode('utf-8')
                    for s in sources
                ]
                #print(len(val_src),len(targets))

                val_set = [
                    enc.decode(s).encode('utf-8').decode('utf-8')
                    for s in targets
                ]
                gen = [
                    enc.decode(
                        cut_seq_to_eos(s)).encode('utf-8').decode('utf-8')
                    for s in outs
                ]

                val_src_orders = [
                    val_src[conv_id_map[i]] for i in sorted(conv_id_map)
                ]
                val_set_orders = [
                    val_set[conv_id_map[i]] for i in sorted(conv_id_map)
                ]
                gen_orders = [gen[conv_id_map[i]] for i in sorted(conv_id_map)]

                print("=" * 40 + " SAMPLE " + "=" * 40)
                src = enc.decode([
                    x for x in input_ids[-1].cpu().numpy() if x != 0
                ]).encode('utf-8').decode('utf-8')
                gt = val_set[-1]
                resp = gen[-1]
                print(
                    f"Source: \t {src} \n Oracle: \t {gt} \n Resp: \t {resp}\n"
                )
                if args.output_file:
                    with open(args.output_file + '.resp.txt', "w") as resp_f:
                        for i, r in enumerate(gen_orders):
                            r = re.sub("\n", "", r)
                            if args.output_ref:
                                # import pdb; pdb.set_trace()
                                resp_f.write(val_src_orders[i] + '\t' +
                                             val_set_orders[i] + '\t' + r +
                                             '\n')
                            else:
                                resp_f.write(r + '\n')
                print("=" * 80)

                sys.stdout.flush()

    else:
        generated = 0
        while True:
            raw_text = input("Model prompt >>> ")
            while not raw_text:
                print('Prompt should not be empty!')
                raw_text = input("Model prompt >>> ")
            context_tokens = enc.encode(raw_text) + [EOS_ID]
            context_tokens = torch.tensor(context_tokens,
                                          device=device,
                                          dtype=torch.long).unsqueeze(
                                              0)  #.repeat(batch_size, 1)
            generated += 1
            position_ids = torch.arange(0,
                                        context_tokens.size(-1),
                                        dtype=torch.long,
                                        device=context_tokens.device)
            token_ids = None if args.no_token_id else torch.zeros_like(
                context_tokens, dtype=torch.long, device=context_tokens.device)
            if args.beam:
                out = beam_search_naive(model,
                                        context_tokens,
                                        position_ids=None,
                                        token_type_ids=token_ids,
                                        length=args.generation_length,
                                        beam_width=args.beam_width,
                                        device=args.device)
            else:
                out = generate_sequence(model,
                                        context_tokens,
                                        position_ids=None,
                                        token_type_ids=token_ids,
                                        length=args.generation_length,
                                        start_token=None,
                                        temperature=args.temperature,
                                        top_k=args.top_k,
                                        sample=args.is_sampling)
            out = out.tolist()
            text = enc.decode(cut_seq_to_eos(
                out[0])).encode('utf-8').decode('utf-8')
            print("=" * 40 + " RESPONSE " + str(generated) + " " + "=" * 40)
            print(text)
            print("=" * 80)
Exemplo n.º 7
0
    tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path)

    # load
    config = GPT2Config.from_json_file(
        os.path.join(args.model_name_or_path, 'config.json'))
    config.no_token_id = args.no_token_id
    config.persona_emb_type = args.persona_emb_type
    config.PersonaNum = args.PersonaNum

    config.do_persona_linear = args.do_persona_linear
    config.persona_n_embd = args.persona_n_embd
    args.n_gpu = 1
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    args.device = device

    model = load_model(GPT2LMHeadModel(config), model_file, args, verbose=True)

    # fix misused key value
    model.eval()
    model.to('cuda')

    # decode_size = len(open(decode_file,'rU').readlines())
    output_lines = []
    # with open(decode_file,'r') as fin:
    with codecs.open(decode_file, 'r', encoding='utf-8') as fin:
        print(decode_file)
        lines = fin.readlines()
        assert args.decode_num <= len(lines)
        if args.decode_num == -1:
            decode_size = len(lines)
        else:
Exemplo n.º 8
0
def run_model():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_name_or_path',
        type=str,
        default='',
        help='pretrained model name or path to local checkpoint')
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--load_checkpoint", '-c', type=str, default='')
    parser.add_argument("--fp16", type=boolean_string, default=False)
    parser.add_argument("--max_seq_length", type=int, default=128)

    parser.add_argument("--generation_length", type=int, default=20)
    parser.add_argument("--max_history", type=int, default=2)

    parser.add_argument("--temperature", type=float, default=1)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=0.9)

    parser.add_argument('--use_gpu', action='store_true')
    parser.add_argument("--gpu", type=int, default=0)

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
    n_gpu = torch.cuda.device_count()
    args.device, args.n_gpu = device, n_gpu

    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #### load the GPT-2 model
    config = GPT2Config.from_json_file(
        os.path.join(args.model_name_or_path, 'config.json'))
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
    model = load_model(GPT2LMHeadModel(config),
                       args.load_checkpoint,
                       args,
                       verbose=True)
    model.to(device)
    model.eval()

    bot = DialogptIrcBot(CHANNEL, NICKNAME, REALNAME, SERVER, PORT)
    thread_dialog = threading.Thread(target=bot.start)
    thread_dialog.setDaemon(True)
    thread_dialog.start()

    history = []
    sleep(1)
    while bot.alive:
        a = 0
        num = bot.num
        if bot.quest_rep:
            if len(bot.quest_rep) == num + 1:
                if len(bot.quest_rep[num]) == 1:
                    a = 1
                    question = bot.quest_rep[num][0]

        if a == 1:
            try:
                history.append(question)
                context_tokens = sum(
                    [enc.encode(h) + [EOS_ID] for h in history], [])
                context_tokens = torch.tensor(context_tokens,
                                              device=device,
                                              dtype=torch.long).unsqueeze(0)
                position_ids = torch.arange(0,
                                            context_tokens.size(-1),
                                            dtype=torch.long,
                                            device=context_tokens.device)

                out = generate_sequence(model,
                                        context_tokens,
                                        position_ids=position_ids,
                                        length=args.generation_length,
                                        temperature=args.temperature,
                                        top_k=args.top_k,
                                        top_p=args.top_p)

                out = out.tolist()
                text = enc.decode(cut_seq_to_eos(out[0])).encode(
                    'ascii', 'ignore').decode('ascii')

                history.append(text)
                history = history[-(2 * args.max_history + 1):]

            except:
                text = "Je ne comprends pas la question!"

            # Envoi de la réponse
            print("\nQuestion n°:", num)
            print("Question:", bot.quest_rep[num])
            print("Response:", text)
            bot.quest_rep[num].append(text)
Exemplo n.º 9
0
# eval_dataloader_loss = DynamicBatchingLoader(
#     args.eval_input_file, enc, args.normalize_data,
#     args.eval_batch_size, args.max_seq_length)

# eval_dataloader_gen = get_eval_list_same_length(
#     args.eval_input_file, enc, args.eval_batch_size, True)

#########################################################################
# Prepare Model and Optimizer
##########################################################################

gpt2_model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
gpt2_model.resize_token_embeddings(len(enc))

model = load_model(gpt2_model, args.init_checkpoint, args, verbose=True)
if args.local_rank != -1:
    # when from scratch make sure initial models are the same
    params = [p.data for p in model.parameters()]
    all_reduce_and_rescale_tensors(params,
                                   float(torch.distributed.get_world_size()))

model_parameters = filter(lambda p: p.requires_grad, model.parameters())
total_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info('Number of parameter = {}'.format(total_params))

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'ln']  # no decay for bias and LayerNorm (ln)
optimizer_grouped_parameters = [{
    'params':
    [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
Exemplo n.º 10
0
device = torch.device(
    "cuda" if torch.cuda.is_available() and use_gpu else "cpu")
n_gpu = torch.cuda.device_count()
fp16 = False

np.random.seed(42)
torch.random.manual_seed(42)
torch.cuda.manual_seed(42)

#### load the GPT-2 model
config = GPT2Config.from_json_file(
    os.path.join(model_name_or_path, 'config.json'))
enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
model = load_model(GPT2LMHeadModel(config),
                   load_checkpoint,
                   n_gpu,
                   device,
                   fp16,
                   verbose=True)
model.to(device)
model.eval()

history = []
while True:
    raw_text = input("USR >>> ")
    while not raw_text:
        print('Prompt should not be empty!')
        raw_text = input("USR >>> ")
    history.append(raw_text)
    context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history],
                         [])  #+ [EOS_ID]
    context_tokens = torch.tensor(context_tokens,