Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
    parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
                        help='选择模型参数')
    parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='选择词库')
    parser.add_argument('--raw_data_path', default='data/train.json', type=str, required=False, help='原始训练语料')
    parser.add_argument('--tokenized_data_path', default='data/tokenized/', type=str, required=False,
                        help='tokenized语料存放位置')
    parser.add_argument('--raw', action='store_true', help='是否先做tokenize')
    parser.add_argument('--epochs', default=5, type=int, required=False, help='训练循环')
    parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
    parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
    parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
    parser.add_argument('--log_step', default=1, type=int, required=False, help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
    parser.add_argument('--stride', default=768, type=int, required=False, help='训练时取训练数据的窗口步长')
    parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
    parser.add_argument('--fp16', action='store_true', help='混合精度')
    parser.add_argument('--fp16_opt_level', default='O1', type=str, required=False)
    parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
    parser.add_argument('--num_pieces', default=100, type=int, required=False, help='将训练语料分成多少份')
    parser.add_argument('--min_length', default=20, type=int, required=False, help='最短收录文章长度')
    parser.add_argument('--n_ctx', default=50, type=int, required=False, help='训练样本长度')
    parser.add_argument('--output_dir', default='model/', type=str, required=False, help='模型输出路径')
    parser.add_argument('--pretrained_model', default='', type=str, required=False, help='模型训练起点路径')
    parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
    parser.add_argument('--segment', action='store_true', help='中文以词为单位')
    parser.add_argument('--bpe_token', action='store_true', help='subword')
    parser.add_argument('--padding', action='store_true', help='padding')
    parser.add_argument('--encoder_json', default="tokenizations/encoder.json", type=str, help="encoder.json")
    parser.add_argument('--vocab_bpe', default="tokenizations/vocab.bpe", type=str, help="vocab.bpe")
    parser.add_argument('--max_steps_perEpoch_perPiece', default=1000000, type=int, required=False)
    parser.add_argument('--steps_savemodel', default=10000, type=int, required=False, help='保存模型步数')
    args = parser.parse_args()
    print('args:\n' + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    raw_data_path = args.raw_data_path
    tokenized_data_path = args.tokenized_data_path
    num_pieces = args.num_pieces
    min_length = args.min_length
    n_ctx = args.n_ctx
    padding = args.padding
    print('building files')
    build_files(data_path=raw_data_path, tokenized_data_path=tokenized_data_path, num_pieces=num_pieces,
                full_tokenizer=full_tokenizer, min_length=min_length,n_ctx=n_ctx, padding=padding)
    print('files built')
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device',
                        default='0,1,2,3',
                        type=str,
                        required=False,
                        help='设置使用哪些显卡')
    parser.add_argument('--model_config',
                        default='config/config.json',
                        type=str,
                        required=False,
                        help='选择模型参数')
    parser.add_argument('--tokenizer_path',
                        default='vocab/vocab.txt',
                        type=str,
                        required=False,
                        help='选择词库')
    parser.add_argument('--raw_data_path',
                        default='data/train.txt',
                        type=str,
                        required=False,
                        help='原始训练语料')
    parser.add_argument('--tokenized_data_path',
                        default='data/tokenized/',
                        type=str,
                        required=False,
                        help='tokenized语料存放位置')
    parser.add_argument('--epochs',
                        default=1,
                        type=int,
                        required=False,
                        help='训练循环')
    parser.add_argument('--batch_size',
                        default=8,
                        type=int,
                        required=False,
                        help='训练batch size')
    parser.add_argument('--lr',
                        default=1.5e-4,
                        type=float,
                        required=False,
                        help='学习率')
    parser.add_argument('--warmup_steps',
                        default=2000,
                        type=int,
                        required=False,
                        help='warm up步数')
    parser.add_argument('--log_step',
                        default=1,
                        type=int,
                        required=False,
                        help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
    parser.add_argument('--stride',
                        default=768,
                        type=int,
                        required=False,
                        help='训练时取训练数据的窗口步长')
    parser.add_argument('--gradient_accumulation',
                        default=1,
                        type=int,
                        required=False,
                        help='梯度积累')
    parser.add_argument('--fp16', action='store_true', help='混合精度')
    parser.add_argument('--fp16_opt_level',
                        default='O1',
                        type=str,
                        required=False)
    parser.add_argument('--max_grad_norm',
                        default=1.0,
                        type=float,
                        required=False)
    parser.add_argument('--num_pieces',
                        default=100,
                        type=int,
                        required=False,
                        help='将训练语料分成多少份')
    parser.add_argument('--min_length',
                        default=128,
                        type=int,
                        required=False,
                        help='最短收录文章长度')
    parser.add_argument('--output_dir',
                        default='model/',
                        type=str,
                        required=False,
                        help='模型输出路径')
    parser.add_argument('--pretrained_model',
                        default='model/',
                        type=str,
                        required=False,
                        help='模型训练起点路径')
    parser.add_argument('--writer_dir',
                        default='tensorboard_summary/',
                        type=str,
                        required=False,
                        help='Tensorboard路径')
    parser.add_argument('--segment', action='store_true', help='中文以词为单位')
    parser.add_argument('--bpe_token', action='store_true', help='subword')
    parser.add_argument('--encoder_json',
                        default="tokenizations/encoder.json",
                        type=str,
                        help="encoder.json")
    parser.add_argument('--vocab_bpe',
                        default="tokenizations/vocab.bpe",
                        type=str,
                        help="vocab.bpe")

    args = parser.parse_args()
    print('args:\n' + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    os.environ["CUDA_VISIBLE_DEVICES"] = args.device  # 此处设置程序使用哪些显卡

    model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(
        args.model_config)
    print('config:\n' + model_config.to_json_string())

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(
            vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('using device:', device)

    raw_data_path = args.raw_data_path
    tokenized_data_path = args.tokenized_data_path
    epochs = args.epochs
    batch_size = args.batch_size
    lr = args.lr
    warmup_steps = args.warmup_steps
    log_step = args.log_step
    stride = args.stride
    gradient_accumulation = args.gradient_accumulation
    fp16 = args.fp16
    fp16_opt_level = args.fp16_opt_level
    max_grad_norm = args.max_grad_norm
    num_pieces = args.num_pieces
    min_length = args.min_length
    output_dir = args.output_dir
    tb_writer = SummaryWriter(log_dir=args.writer_dir)
    assert log_step % gradient_accumulation == 0

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    if not args.pretrained_model:
        model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)
    else:
        model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(
            args.pretrained_model)
    model.train()
    model.to(device)

    num_parameters = 0
    parameters = model.parameters()
    for parameter in parameters:
        num_parameters += parameter.numel()
    print('number of parameters: {}'.format(num_parameters))

    multi_gpu = False
    full_len = 0
    print('calculating total steps')
    for i in tqdm(range(num_pieces)):
        with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i),
                  'r') as f:
            full_len += len([int(item) for item in f.read().strip().split()])
    total_steps = int(full_len / stride * epochs / batch_size /
                      gradient_accumulation)
    print('total steps = {}'.format(total_steps))

    optimizer = transformers.AdamW(model.parameters(),
                                   lr=lr,
                                   correct_bias=True)
    scheduler = transformers.WarmupLinearSchedule(optimizer,
                                                  warmup_steps=warmup_steps,
                                                  t_total=total_steps)

    if fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=fp16_opt_level)

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = DataParallel(
            model, device_ids=[int(i) for i in args.device.split(',')])
        multi_gpu = True
    print('starting training')
    overall_step = 0
    running_loss = 0
    for epoch in range(epochs):
        print('epoch {}'.format(epoch + 1))
        now = datetime.now()
        print('time: {}'.format(now))
        x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)
        random.shuffle(x)
        piece_num = 0
        for i in x:
            with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i),
                      'r') as f:
                line = f.read().strip()
            tokens = line.split()
            tokens = [int(token) for token in tokens]
            start_point = 0
            samples = []
            while start_point < len(tokens) - n_ctx:
                samples.append(tokens[start_point:start_point + n_ctx])
                start_point += stride
            if start_point < len(tokens):
                samples.append(tokens[len(tokens) - n_ctx:])
            random.shuffle(samples)
            for step in range(len(samples) // batch_size):  # drop last

                #  prepare data
                batch = samples[step * batch_size:(step + 1) * batch_size]
                batch_inputs = []
                for ids in batch:
                    int_ids = [int(x) for x in ids]
                    batch_inputs.append(int_ids)
                batch_inputs = torch.tensor(batch_inputs).long().to(device)

                #  forward pass
                with torch.no_grad():
                    outputs = model.forward(input_ids=batch_inputs,
                                            labels=batch_inputs)
                loss, logits = outputs[:2]
                loss.requires_grad_(True)

                #  get loss
                if multi_gpu:
                    loss = loss.mean()
                if gradient_accumulation > 1:
                    loss = loss / gradient_accumulation

                #  loss backward
                if fp16:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                        torch.nn.utils.clip_grad_norm_(
                            amp.master_params(optimizer), max_grad_norm)
                else:
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                   max_grad_norm)

                #  optimizer step
                if (overall_step + 1) % gradient_accumulation == 0:
                    running_loss += loss.item()
                    optimizer.step()
                    optimizer.zero_grad()
                    scheduler.step()
                if (overall_step + 1) % log_step == 0:
                    tb_writer.add_scalar('loss',
                                         loss.item() * gradient_accumulation,
                                         overall_step)
                    print(
                        'now time: {}:{}. Step {} of piece {} of epoch {}, loss {}'
                        .format(
                            datetime.now().hour,
                            datetime.now().minute, step + 1, piece_num,
                            epoch + 1, running_loss * gradient_accumulation /
                            (log_step / gradient_accumulation)))
                    running_loss = 0
                overall_step += 1
            piece_num += 1

        print('saving model for epoch {}'.format(epoch + 1))
        if not os.path.exists(output_dir + 'model_epoch{}'.format(epoch + 1)):
            os.mkdir(output_dir + 'model_epoch{}'.format(epoch + 1))
        model_to_save = model.module if hasattr(model, 'module') else model
        model_to_save.save_pretrained(output_dir +
                                      'model_epoch{}'.format(epoch + 1))
        print('epoch {} finished'.format(epoch + 1))

        then = datetime.now()
        print('time: {}'.format(then))
        print('time for one epoch: {}'.format(then - now))

    print('training finished')
    if not os.path.exists(output_dir + 'final_model'):
        os.mkdir(output_dir + 'final_model')
    model_to_save = model.module if hasattr(model, 'module') else model
    model_to_save.save_pretrained(output_dir + 'final_model')
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--device", default="0,1,2,3", type=str, required=False, help="设置使用哪些显卡"
    )
    parser.add_argument(
        "--model_config",
        default="config/model_config_small.json",
        type=str,
        required=False,
        help="选择模型参数",
    )
    parser.add_argument(
        "--tokenizer_path",
        default="cache/vocab_small.txt",
        type=str,
        required=False,
        help="选择词库",
    )
    parser.add_argument(
        "--raw_data_path",
        default="data/train.json",
        type=str,
        required=False,
        help="原始训练语料",
    )
    parser.add_argument(
        "--tokenized_data_path",
        default="data/tokenized/",
        type=str,
        required=False,
        help="tokenized语料存放位置",
    )
    parser.add_argument("--raw", action="store_true", help="是否先做tokenize")
    parser.add_argument("--epochs", default=5, type=int, required=False, help="训练循环")
    parser.add_argument(
        "--batch_size", default=8, type=int, required=False, help="训练batch size"
    )
    parser.add_argument("--lr", default=1.5e-4, type=float, required=False, help="学习率")
    parser.add_argument(
        "--warmup_steps", default=2000, type=int, required=False, help="warm up步数"
    )
    parser.add_argument(
        "--log_step",
        default=1,
        type=int,
        required=False,
        help="多少步汇报一次loss,设置为gradient accumulation的整数倍",
    )
    parser.add_argument(
        "--stride", default=768, type=int, required=False, help="训练时取训练数据的窗口步长"
    )
    parser.add_argument(
        "--gradient_accumulation", default=1, type=int, required=False, help="梯度积累"
    )
    parser.add_argument("--fp16", action="store_true", help="混合精度")
    parser.add_argument("--fp16_opt_level", default="O1", type=str, required=False)
    parser.add_argument("--max_grad_norm", default=1.0, type=float, required=False)
    parser.add_argument(
        "--num_pieces", default=100, type=int, required=False, help="将训练语料分成多少份"
    )
    parser.add_argument(
        "--min_length", default=128, type=int, required=False, help="最短收录文章长度"
    )
    parser.add_argument(
        "--output_dir", default="model/", type=str, required=False, help="模型输出路径"
    )
    parser.add_argument(
        "--pretrained_model", default="", type=str, required=False, help="模型训练起点路径"
    )
    parser.add_argument(
        "--writer_dir",
        default="tensorboard_summary/",
        type=str,
        required=False,
        help="Tensorboard路径",
    )
    parser.add_argument("--segment", action="store_true", help="中文以词为单位")
    parser.add_argument("--bpe_token", action="store_true", help="subword")
    parser.add_argument(
        "--encoder_json",
        default="tokenizations/encoder.json",
        type=str,
        help="encoder.json",
    )
    parser.add_argument(
        "--vocab_bpe", default="tokenizations/vocab.bpe", type=str, help="vocab.bpe"
    )

    args = parser.parse_args()
    print("args:\n" + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    os.environ["CUDA_VISIBLE_DEVICES"] = args.device  # 此处设置程序使用哪些显卡

    model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(
        args.model_config
    )
    print("config:\n" + model_config.to_json_string())

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print("using device:", device)

    raw_data_path = args.raw_data_path
    tokenized_data_path = args.tokenized_data_path
    raw = args.raw  # 选择是否从零开始构建数据集
    epochs = args.epochs
    batch_size = args.batch_size
    lr = args.lr
    warmup_steps = args.warmup_steps
    log_step = args.log_step
    stride = args.stride
    gradient_accumulation = args.gradient_accumulation
    fp16 = args.fp16  # 不支持半精度的显卡请勿打开
    fp16_opt_level = args.fp16_opt_level
    max_grad_norm = args.max_grad_norm
    num_pieces = args.num_pieces
    min_length = args.min_length
    output_dir = args.output_dir
    tb_writer = SummaryWriter(log_dir=args.writer_dir)
    assert log_step % gradient_accumulation == 0

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    if raw:
        print("building files")
        build_files(
            data_path=raw_data_path,
            tokenized_data_path=tokenized_data_path,
            num_pieces=num_pieces,
            full_tokenizer=full_tokenizer,
            min_length=min_length,
        )
        print("files built")

    if not args.pretrained_model:
        model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)
    else:
        model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(
            args.pretrained_model
        )
    model.train()
    model.to(device)

    num_parameters = 0
    parameters = model.parameters()
    for parameter in parameters:
        num_parameters += parameter.numel()
    print("number of parameters: {}".format(num_parameters))

    multi_gpu = False
    full_len = 0
    print("calculating total steps")
    for i in tqdm(range(num_pieces)):
        with open(tokenized_data_path + "tokenized_train_{}.txt".format(i), "r") as f:
            full_len += len([int(item) for item in f.read().strip().split()])
    total_steps = int(full_len / stride * epochs / batch_size / gradient_accumulation)
    print("total steps = {}".format(total_steps))

    optimizer = transformers.AdamW(model.parameters(), lr=lr, correct_bias=True)
    scheduler = transformers.WarmupLinearSchedule(
        optimizer, warmup_steps=warmup_steps, t_total=total_steps
    )
    if fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = DataParallel(model, device_ids=[int(i) for i in args.device.split(",")])
        multi_gpu = True
    print("starting training")
    overall_step = 0
    running_loss = 0
    saving_time = datetime.now()
    for epoch in range(epochs):
        print("epoch {}".format(epoch + 1))
        now = datetime.now()
        print("time: {}".format(now))
        x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)
        random.shuffle(x)
        piece_num = 0
        for i in x:
            with open(
                tokenized_data_path + "tokenized_train_{}.txt".format(i), "r"
            ) as f:
                line = f.read().strip()
            tokens = line.split()
            tokens = [int(token) for token in tokens]
            start_point = 0
            samples = []
            while start_point < len(tokens) - n_ctx:
                samples.append(tokens[start_point : start_point + n_ctx])
                start_point += stride
            if start_point < len(tokens):
                samples.append(tokens[len(tokens) - n_ctx :])
            random.shuffle(samples)
            for step in range(len(samples) // batch_size):  # drop last

                #  prepare data
                batch = samples[step * batch_size : (step + 1) * batch_size]
                batch_inputs = []
                for ids in batch:
                    int_ids = [int(x) for x in ids]
                    batch_inputs.append(int_ids)
                batch_inputs = torch.tensor(batch_inputs).long().to(device)

                #  forward pass
                outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs)
                loss, logits = outputs[:2]

                #  get loss
                if multi_gpu:
                    loss = loss.mean()
                if gradient_accumulation > 1:
                    loss = loss / gradient_accumulation

                #  loss backward
                if fp16:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                        torch.nn.utils.clip_grad_norm_(
                            amp.master_params(optimizer), max_grad_norm
                        )
                else:
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)

                #  optimizer step
                if (overall_step + 1) % gradient_accumulation == 0:
                    running_loss += loss.item()
                    optimizer.step()
                    optimizer.zero_grad()
                    scheduler.step()
                if (overall_step + 1) % log_step == 0:
                    tb_writer.add_scalar(
                        "loss", loss.item() * gradient_accumulation, overall_step
                    )
                    print(
                        "now time: {}:{}. Step {} of piece {} of epoch {}, loss {}".format(
                            datetime.now().hour,
                            datetime.now().minute,
                            step + 1,
                            piece_num,
                            epoch + 1,
                            running_loss
                            * gradient_accumulation
                            / (log_step / gradient_accumulation),
                        )
                    )
                    running_loss = 0
                delta_time = datetime.now() - saving_time
                if delta_time.seconds > 1800:
                    print("saving model for epoch {}".format(epoch + 1))
                    if not os.path.exists(
                        output_dir + "model_epoch{}".format(epoch + 1)
                    ):
                        os.mkdir(output_dir + "model_epoch{}".format(epoch + 1))
                    model_to_save = model.module if hasattr(model, "module") else model
                    model_to_save.save_pretrained(
                        output_dir + "model_epoch{}".format(epoch + 1)
                    )
                    saving_time = datetime.now()
                overall_step += 1
            piece_num += 1

        print("saving model for epoch {}".format(epoch + 1))
        if not os.path.exists(output_dir + "model_epoch{}".format(epoch + 1)):
            os.mkdir(output_dir + "model_epoch{}".format(epoch + 1))
        model_to_save = model.module if hasattr(model, "module") else model
        model_to_save.save_pretrained(output_dir + "model_epoch{}".format(epoch + 1))
        # torch.save(scheduler.state_dict(), output_dir + 'model_epoch{}/scheduler.pt'.format(epoch + 1))
        # torch.save(optimizer.state_dict(), output_dir + 'model_epoch{}/optimizer.pt'.format(epoch + 1))
        print("epoch {} finished".format(epoch + 1))

        then = datetime.now()
        print("time: {}".format(then))
        print("time for one epoch: {}".format(then - now))

    print("training finished")
    if not os.path.exists(output_dir + "final_model"):
        os.mkdir(output_dir + "final_model")
    model_to_save = model.module if hasattr(model, "module") else model
    model_to_save.save_pretrained(output_dir + "final_model")
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device',
                        default='0,1,2,3',
                        type=str,
                        required=False,
                        help='設定要使用的顯卡,以逗號區隔')
    parser.add_argument('--model_config',
                        type=str,
                        required=False,
                        help='模型參數設定檔的路徑')
    parser.add_argument('--tokenizer_path',
                        type=str,
                        required=True,
                        help='選擇字典檔的路徑')
    parser.add_argument('--raw_data_path',
                        type=str,
                        required=True,
                        help='訓練用語料庫的路徑')
    parser.add_argument('--tokenized_data_path',
                        default='data/tokenized/',
                        type=str,
                        required=False,
                        help='語料庫 Tokenized 後的存放路徑')
    parser.add_argument('--raw',
                        action='store_true',
                        help='是否已做過 Tokenization')
    parser.add_argument('--epochs',
                        default=5,
                        type=int,
                        required=False,
                        help='設定 Epochs')
    parser.add_argument('--batch_size',
                        default=8,
                        type=int,
                        required=False,
                        help='設定 Batch Size')
    parser.add_argument('--lr',
                        default=3e-5,
                        type=float,
                        required=False,
                        help='設定 Learning Rate')
    parser.add_argument('--warmup_steps',
                        default=0.1,
                        type=float,
                        required=False,
                        help='設定 Warmup Steps 的比例')
    parser.add_argument('--log_step',
                        default=1,
                        type=int,
                        required=False,
                        help='Loss 紀錄的間隔,必須是 Gradient Accumulation 的整數倍')
    parser.add_argument('--stride',
                        default=768,
                        type=int,
                        required=False,
                        help='設定訓練語料庫的窗口大小')
    parser.add_argument('--gradient_accumulation',
                        default=1,
                        type=int,
                        required=False,
                        help='梯度累積')
    parser.add_argument('--fp16', action='store_true', help='是否使用半精度浮點數')
    parser.add_argument('--fp16_opt_level',
                        default='O1',
                        type=str,
                        required=False)
    parser.add_argument('--max_grad_norm',
                        default=1.0,
                        type=float,
                        required=False)
    parser.add_argument('--num_pieces',
                        default=100,
                        type=int,
                        required=False,
                        help='將訓練語料庫分成多少份')
    parser.add_argument('--min_length',
                        default=1,
                        type=int,
                        required=False,
                        help='文章最短長度,若文章長度不足將被捨棄')
    parser.add_argument('--output_dir', type=str, required=True, help='模型輸出路徑')
    parser.add_argument('--pretrained_model',
                        default='',
                        type=str,
                        required=False,
                        help='模型起始路徑')
    parser.add_argument('--writer_dir',
                        default='tensorboard_summary/',
                        type=str,
                        required=False,
                        help='Tensorboard 輸出路徑')
    parser.add_argument('--segment', action='store_true', help='是否以詞為單位')
    parser.add_argument('--bpe_token',
                        action='store_true',
                        help='使用 Byte Pair Encoding')
    parser.add_argument('--encoder_json',
                        default='tokenizations/encoder.json',
                        type=str,
                        help='encoder.json')
    parser.add_argument('--vocab_bpe',
                        default='tokenizations/vocab.bpe',
                        type=str,
                        help='vocab.bpe')
    parser.add_argument('--timezone',
                        default=8,
                        type=int,
                        help='手動指定時區,預設為 GMT+8')
    parser.add_argument('--epoch_save',
                        default=1,
                        type=int,
                        help='每隔幾個 Epoch 就存一次權重')

    args = parser.parse_args()
    print(f'Arguments: {args.__repr__()}')

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    # 設定要使用的顯卡
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device

    model_config = transformers.GPT2Config.from_json_file(args.model_config)
    print(f'Config:\n{model_config.to_json_string()}')

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(
            vocab_file=args.tokenizer_path,
            do_lower_case=False,
            do_basic_tokenize=False)
    full_tokenizer.max_len = 999999
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f'Using Device: {device.upper()}')

    raw_data_path = args.raw_data_path
    tokenized_data_path = args.tokenized_data_path
    raw = args.raw
    epochs = args.epochs
    batch_size = args.batch_size
    lr = args.lr
    warmup_steps = args.warmup_steps
    log_step = args.log_step
    stride = args.stride
    gradient_accumulation = args.gradient_accumulation
    # 不支援半精度浮點數的顯卡不要使用
    fp16 = args.fp16
    fp16_opt_level = args.fp16_opt_level
    max_grad_norm = args.max_grad_norm
    num_pieces = args.num_pieces
    min_length = args.min_length
    output_dir = args.output_dir
    tz = args.timezone
    strlen = lambda n: len(str(n))
    get_time = lambda: datetime.utcnow() + timedelta(hours=tz)
    tb_writer = SummaryWriter(log_dir=args.writer_dir)
    assert log_step % gradient_accumulation == 0

    os.makedirs(output_dir, exist_ok=True)

    if raw:
        print('Building from Raw Data')
        build_files(data_path=raw_data_path,
                    tokenized_data_path=tokenized_data_path,
                    num_pieces=num_pieces,
                    tokenizer=full_tokenizer,
                    min_length=min_length)

    if not args.pretrained_model:
        model = transformers.GPT2LMHeadModel(config=model_config)
    else:
        model = transformers.GPT2LMHeadModel.from_pretrained(
            args.pretrained_model)

    if torch.cuda.device_count() == 2:
        device_map = {
            0: [0, 1, 2, 3, 4],
            1: [5, 6, 7, 8, 9, 10, 11],
        }
        model.parallelize(device_map)
        # model.parallelize()
        print('Model Parallelism!')

    model.train()
    if torch.cuda.device_count() < 2:
        model.to(device)

    num_parameters = 0
    parameters = model.parameters()
    for parameter in parameters:
        num_parameters += parameter.numel()
    print(f'Number of Parameters: {num_parameters}')

    multi_gpu = False
    full_len = 0
    print('Calculating Total Steps')
    for i in tqdm(range(num_pieces)):
        _fpath = os.path.join(tokenized_data_path, f'tokenized_train_{i}.txt')
        with open(_fpath, 'r') as f:
            full_len += len([int(item) for item in f.read().strip().split()])
    total_steps = int(full_len / stride * epochs / batch_size /
                      gradient_accumulation)
    warmup_steps = int(total_steps * warmup_steps)
    print(f'Total Steps: {total_steps}')

    optimizer = transformers.AdamW(model.parameters(),
                                   lr=lr,
                                   correct_bias=True)
    scheduler = transformers.get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=warmup_steps,
        num_training_steps=total_steps)

    if fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                'Please install apex from https://www.github.com/nvidia/apex to use fp16 training.'
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=fp16_opt_level)

    # if torch.cuda.device_count() > 1:
    #     print(f'Using {torch.cuda.device_count()} GPUs')
    #     model = DataParallel(
    #         model, device_ids=[int(i) for i in args.device.split(',')])
    #     model.to(f'cuda:{model.device_ids[0]}')
    #     multi_gpu = True

    with TimeCost('Training'):
        print('Training Begin')
        overall_step = 0
        running_loss = 0

        for epoch in range(epochs):
            now = get_time()
            print(f'Epoch {epoch + 1} - Time: {now}')
            x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)
            random.shuffle(x)
            piece_num = 0
            for i in x:
                _fpath = os.path.join(tokenized_data_path,
                                      f'tokenized_train_{i}.txt')
                with open(_fpath, 'r') as f:
                    line = f.read().strip()
                tokens = line.split()
                tokens = [int(token) for token in tokens]
                start_point = 0
                samples = []
                while start_point < len(tokens) - n_ctx:
                    samples.append(tokens[start_point:start_point + n_ctx])
                    start_point += stride
                if start_point < len(tokens):
                    idx = len(tokens) - n_ctx
                    samples.append(tokens[idx:])
                print(f'Tokenize {i} Sample Size: {len(samples)}')
                random.shuffle(samples)
                # 捨棄最後一個不足一個完整 Batch 的 Step
                _steps = len(samples) // batch_size
                # 若 Samples 數量小於 Batch Size 會發生完全沒有 Steps 可以訓練的問題
                # 不要把 num_pieces 設定的太大,也可以解決這個問題
                _steps = 1 if _steps <= 0 else _steps

                for step in range(_steps):
                    # prepare data
                    batch = samples[step * batch_size:(step + 1) * batch_size]
                    batch_inputs = []
                    for ids in batch:
                        int_ids = [int(x) for x in ids]
                        batch_inputs.append(int_ids)
                    _device = 'cuda:0' if torch.cuda.device_count(
                    ) > 1 else device
                    batch_inputs = torch.tensor(batch_inputs).long().to(
                        _device)

                    # forward pass
                    outputs = model.forward(input_ids=batch_inputs,
                                            labels=batch_inputs)
                    loss, _ = outputs[:2]

                    # get loss
                    if multi_gpu:
                        loss = loss.mean()
                    if gradient_accumulation > 1:
                        loss = loss / gradient_accumulation

                    # loss backward
                    if fp16:
                        with amp.scale_loss(loss, optimizer) as scaled_loss:
                            scaled_loss.backward()
                            torch.nn.utils.clip_grad_norm_(
                                amp.master_params(optimizer), max_grad_norm)
                    else:
                        loss.backward()
                        torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                       max_grad_norm)

                    # optimizer step
                    if (overall_step + 1) % gradient_accumulation == 0:
                        running_loss += loss.item()
                        optimizer.step()
                        optimizer.zero_grad()
                        scheduler.step()
                    if (overall_step + 1) % log_step == 0:
                        tb_writer.add_scalar(
                            'loss',
                            loss.item() * gradient_accumulation, overall_step)
                        ts = datetime.utcnow() + timedelta(hours=8)
                        ts = ts.strftime('%H:%M:%S')
                        display_loss = running_loss * gradient_accumulation
                        display_loss /= log_step / gradient_accumulation
                        print(
                            f'Time {ts} - '
                            f'Epoch {epoch + 1:{strlen(epochs)}d}/{epochs} - '
                            f'Step {step + 1:{strlen(_steps)}d}/{_steps} - '
                            f'Piece {piece_num + 1:{strlen(num_pieces)}d}/{num_pieces} - '
                            f'Loss {display_loss:.4f}')
                        running_loss = 0
                    overall_step += 1
                piece_num += 1

            if (epoch + 1) % args.epoch_save == 0:
                print(f'Saving Model of Epoch {epoch + 1}')
                model_output_dir = os.path.join(output_dir,
                                                f'model_epoch{epoch + 1}')
                os.makedirs(model_output_dir, exist_ok=True)
                model_to_save = model.module if hasattr(model,
                                                        'module') else model
                model_to_save.save_pretrained(model_output_dir)

            then = get_time()
            print(f'Epoch {epoch + 1} Finished - Time: {then}')
            delta = (then - now).total_seconds()
            mm, ss = delta // 60, delta % 60
            hh, mm = mm // 60, mm % 60
            print(
                f'Time Cost of the Epoch {epoch + 1} - {hh:.0f}:{mm:.0f}:{ss:.2f}'
            )

        print('Training Done')
    model_output_dir = os.path.join(output_dir, 'final_model')
    os.makedirs(model_output_dir, exist_ok=True)
    model_to_save = model.module if hasattr(model, 'module') else model
    model_to_save.save_pretrained(model_output_dir)
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
    parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
                        help='选择模型参数')
    parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='选择词库')
    parser.add_argument('--raw_data_path', default='data/train.json', type=str, required=False, help='原始训练语料')
    parser.add_argument('--tokenized_data_path', default='data/tokenized/', type=str, required=False,
                        help='tokenized语料存放位置')
    parser.add_argument('--raw', action='store_true', help='是否先做tokenize')
    parser.add_argument('--epochs', default=5, type=int, required=False, help='训练循环')
    parser.add_argument('--batch_size', default=64, type=int, required=False, help='训练batch size')
    parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
    parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
    parser.add_argument('--log_step', default=1, type=int, required=False, help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
    parser.add_argument('--stride', default=768, type=int, required=False, help='训练时取训练数据的窗口步长')
    parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
    parser.add_argument('--fp16', action='store_true', help='混合精度')
    parser.add_argument('--fp16_opt_level', default='O1', type=str, required=False)
    parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
    parser.add_argument('--num_pieces', default=100, type=int, required=False, help='将训练语料分成多少份')
    parser.add_argument('--min_length', default=128, type=int, required=False, help='最短收录文章长度')
    parser.add_argument('--max_length', default=256, type=int, required=False, help='最短收录文章长度')
    parser.add_argument('--output_dir', default='model/', type=str, required=False, help='模型输出路径')
    parser.add_argument('--pretrained_model', default='', type=str, required=False, help='模型训练起点路径')
    parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
    parser.add_argument('--segment', action='store_true', help='中文以词为单位')
    parser.add_argument('--bpe_token', action='store_true', help='subword')
    parser.add_argument('--encoder_json', default="tokenizations/encoder.json", type=str, help="encoder.json")
    parser.add_argument('--vocab_bpe', default="tokenizations/vocab.bpe", type=str, help="vocab.bpe")
    parser.add_argument('--max_steps_perEpoch_perPiece', default=1000000, type=int, required=False)
    parser.add_argument('--steps_savemodel', default=10000, type=int, required=False, help='保存模型步数')
    parser.add_argument('--padding', action='store_true', help='输入是否定长')
    args = parser.parse_args()
    print('args:\n' + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    #os.environ["CUDA_VISIBLE_DEVICES"] = args.device  # 此处设置程序使用哪些显卡

    model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)
    print('config:\n' + model_config.to_json_string())

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('using device:', device)

    raw_data_path = args.raw_data_path
    tokenized_data_path = args.tokenized_data_path
    raw = args.raw  # 选择是否从零开始构建数据集
    epochs = args.epochs
    batch_size = args.batch_size
    lr = args.lr
    warmup_steps = args.warmup_steps
    log_step = args.log_step
    stride = args.stride
    gradient_accumulation = args.gradient_accumulation
    fp16 = args.fp16  # 不支持半精度的显卡请勿打开
    fp16_opt_level = args.fp16_opt_level
    max_grad_norm = args.max_grad_norm
    num_pieces = args.num_pieces
    min_length = args.min_length
    output_dir = args.output_dir
    padding = args.padding
    max_length = args.max_length
    #tb_writer = SummaryWriter(log_dir=args.writer_dir)
    assert log_step % gradient_accumulation == 0
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    if not args.pretrained_model:
        model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)
    else:
        model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(args.pretrained_model)
    model.train()
    model.to(device)

    num_parameters = 0
    parameters = model.parameters()
    for parameter in parameters:
        num_parameters += parameter.numel()
    print('number of parameters: {}'.format(num_parameters))
    multi_gpu = False
    optimizer = transformers.AdamW(model.parameters(), lr=lr, correct_bias=True)
    #scheduler = transformers.WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps,
    #                                                      t_total=total_steps)
    if fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = DataParallel(model, device_ids=[int(i) for i in args.device.split(',')])
        multi_gpu = True
    print('starting training')
    step_loss = 0
    running_loss = 10
    loss_ = 10
    iter = iterData(args.tokenized_data_path, rate=1.0, batch_size=batch_size, epochs=epochs)
    step = 0
    epoch0 = -1
    while True:
        data = next(iter)
        if data=='__STOP__':
            break
        epoch, epochs, idx_file, nb_files, batch_inputs = data
        random.shuffle(batch_inputs)
        batch_inputs = torch.tensor(batch_inputs).long().to(device)
        #  forward pass
        outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs)
        loss, logits = outputs[:2]
        #  get loss
        if multi_gpu:
            loss = loss.mean()
        if gradient_accumulation > 1:
            loss = loss / gradient_accumulation
        #  loss backward
        if fp16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
        else:
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
        #  optimizer step
        if (step + 1) % gradient_accumulation == 0:
            running_loss += loss.item()
            optimizer.step()
            optimizer.zero_grad()
            step_loss += 1
            #scheduler.step()
        if (step + 1) % log_step == 0:
            loss_ = running_loss * gradient_accumulation / (log_step / gradient_accumulation)
            print('now time: {}:{}. step: {}, progress-innerEpoch: {}/{}, progress-outerEpoch: {}/{}, loss {}'.format(
                    datetime.now().hour,
                    datetime.now().minute,
                    step+1,
                    idx_file+1,
                    nb_files,
                    epoch + 1,
                    epochs,
                    loss_))
            running_loss = 0
        if step%args.steps_savemodel==0:
            print('saving model for epoch {}'.format(epoch + 1))
            output_dir_ = output_dir + 'model_epoch{}_step{}_loss-{}'.format(epoch + 1, step,'%0.2f'%loss_)
            if not os.path.exists(output_dir_):
                os.mkdir(output_dir_)
            model_to_save = model.module if hasattr(model, 'module') else model
            model_to_save.save_pretrained(output_dir_)
        step += 1
        if epoch!=epoch0:
            if not os.path.exists(output_dir + 'model_epoch{}'.format(epoch + 1)):
                os.mkdir(output_dir + 'model_epoch{}'.format(epoch + 1))
            model_to_save = model.module if hasattr(model, 'module') else model
            model_to_save.save_pretrained(output_dir + 'model_epoch{}'.format(epoch + 1))
            epoch0 = epoch
            print('epoch {} finished'.format(epoch + 1))
    if not os.path.exists(output_dir + 'final_model'):
        os.mkdir(output_dir + 'final_model')
    model_to_save = model.module if hasattr(model, 'module') else model
    model_to_save.save_pretrained(output_dir + 'final_model')
    print('training finished')
Ejemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device',
                        default='0',
                        type=str,
                        required=False,
                        help='设置使用哪些显卡')
    parser.add_argument('--model_config',
                        default='gpt2/config.json',
                        type=str,
                        required=False,
                        help='选择模型参数')
    parser.add_argument('--tokenizer_path',
                        default='cache/vocab_small.txt',
                        type=str,
                        required=False,
                        help='选择词库')
    parser.add_argument('--raw_data_path',
                        default='data/train.json',
                        type=str,
                        required=False,
                        help='原始训练语料')
    parser.add_argument('--tokenized_data_path',
                        default='data/tokenized/',
                        type=str,
                        required=False,
                        help='tokenized语料存放位置')
    parser.add_argument('--raw', action='store_true', help='是否先做tokenize')
    parser.add_argument('--epochs',
                        default=150,
                        type=int,
                        required=False,
                        help='训练循环')
    parser.add_argument('--batch_size',
                        default=1,
                        type=int,
                        required=False,
                        help='训练batch size')
    parser.add_argument('--lr',
                        default=1e-4,
                        type=float,
                        required=False,
                        help='学习率')
    parser.add_argument('--warmup_steps',
                        default=100,
                        type=int,
                        required=False,
                        help='warm up步数')
    # parser.add_argument('--log_step', default=2, type=int, required=False, help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
    parser.add_argument('--stride',
                        default=384,
                        type=int,
                        required=False,
                        help='训练时取训练数据的窗口步长')
    parser.add_argument('--gradient_accumulation',
                        default=1,
                        type=int,
                        required=False,
                        help='梯度积累')
    parser.add_argument('--fp16', action='store_true', help='混合精度')
    parser.add_argument('--fp16_opt_level',
                        default='O1',
                        type=str,
                        required=False)
    parser.add_argument('--max_grad_norm',
                        default=1.0,
                        type=float,
                        required=False)
    parser.add_argument('--output_dir',
                        default='model_classfier/',
                        type=str,
                        required=False,
                        help='模型输出路径')
    parser.add_argument('--pretrained_model',
                        default='',
                        type=str,
                        required=False,
                        help='模型训练起点路径')
    # parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
    parser.add_argument('--segment', action='store_true', help='中文以词为单位')
    parser.add_argument('--bpe_token', action='store_true', help='subword')
    parser.add_argument('--encoder_json',
                        default="tokenizations/encoder.json",
                        type=str,
                        help="encoder.json")
    parser.add_argument('--vocab_bpe',
                        default="tokenizations/vocab.bpe",
                        type=str,
                        help="vocab.bpe")

    args = parser.parse_args()
    print('args:\n' + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    os.environ["CUDA_VISIBLE_DEVICES"] = args.device  # 此处设置程序使用哪些显卡

    model_config = transformers.GPT2Config.from_json_file(args.model_config)
    print('config:\n' + model_config.to_json_string())

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(
            vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print('using device:', device)

    raw_data_path = args.raw_data_path
    # tokenized_data_path = args.tokenized_data_path
    raw = args.raw  # 选择是否从零开始构建数据集
    epochs = args.epochs
    batch_size = args.batch_size
    lr = args.lr
    warmup_steps = args.warmup_steps
    # log_step = args.log_step
    # stride = args.stride
    gradient_accumulation = args.gradient_accumulation
    # fp16 = args.fp16  # 不支持半精度的显卡请勿打开
    # fp16_opt_level = args.fp16_opt_level
    max_grad_norm = args.max_grad_norm
    # num_pieces = args.num_pieces
    # min_length = args.min_length
    output_dir = args.output_dir
    # tb_writer = SummaryWriter(log_dir=args.writer_dir)
    # assert log_step % gradient_accumulation == 0

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    if raw:
        print('building files')
        resources, resources_id, input_question_list, max_aq_len = build_files(
            data_path=raw_data_path, full_tokenizer=full_tokenizer)
        print('files built')
    input_ids = [] * len(resources_id)
    # labels = []
    for i in range(len(resources_id)):
        inputsss, _ = sliding_window(max_len=512,
                                     resources=resources_id[i],
                                     stride=512 - 128)
        input_ids.append(inputsss)
        # labels = labels + [choices['label']] * len(inputsss)
    print('sliding built')
    if True:  # shuffle
        index = [i for i in range(len(input_ids))]
        random.shuffle(index)
        new_input_ids = [input_ids[i] for i in index]
        new_input_question_list = [input_question_list[i] for i in index]
        input_ids = new_input_ids

    val_rate = 0.1
    split = int((1 - val_rate) * len(input_ids))
    val_input_ids = input_ids[split:]
    val_input_question_list = input_question_list[split:]

    input_ids = input_ids[:split]
    input_question_list = input_question_list[:split]

    # train_dataset = my_dataset(x=input_ids, y=labels, token_type_ids=token_type_ids)
    # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)

    # if not args.pretrained_model:
    #     model = transformers.models.gpt2.GPT2LMHeadModel(config=model_config)
    # else:
    #     model = transformers.models.gpt2.GPT2LMHeadModel.from_pretrained(args.pretrained_model)

    model = modelMy(args, device)

    model.to(device)
    # old_parameter = model.f**k.weight.clone()
    # num_parameters = 0
    # parameters = model.parameters()
    # for parameter in parameters:
    #     num_parameters += parameter.numel()
    # print('number of parameters: {}'.format(num_parameters))
    # param_optimizer = [p for n, p in model.named_parameters() if p.requires_grad]
    multi_gpu = False
    print('calculating total steps')
    # for i in tqdm(range(num_pieces)):
    #     with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'r') as f:
    #         full_len += len([int(item) for item in f.read().strip().split()])

    optimizer = transformers.optimization.AdamW(model.parameters(),
                                                lr=lr,
                                                weight_decay=0.01,
                                                correct_bias=True)
    scheduler = transformers.optimization.get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=1500,
        num_training_steps=args.epochs * len(input_ids))
    # scheduler = transformers.optimization.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps = args.)
    # from pytorch_pretrained_bert.optimization import BertAdam
    # optimizer = BertAdam(model.parameters(),
    #                      lr=0.1,
    #                      warmup=0.1,
    #                      t_total=100)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = DataParallel(
            model, device_ids=[int(i) for i in args.device.split(',')])
        multi_gpu = True
    print('starting training')
    overall_step = 0
    running_loss = 0
    best_loss = 9999999
    for epoch in range(epochs):
        print('epoch {}'.format(epoch + 1))
        now = datetime.now()
        print('time: {}'.format(now))
        # x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)
        acc_s = 0
        piece_num = 0

        model.train()
        for step in range(len(input_ids)):  # paper by paper
            # print ("step:{}".format(step))
            # if (overall_step + 2) % gradient_accumulation == 0:
            #     break
            batch_inputs = input_ids[step]
            batch_inputs = torch.tensor(batch_inputs).long().to(
                device).unsqueeze(0)
            batch_questions = [
                z['Question_token'] for z in input_question_list[step][:]
            ]
            batch_questions = torch.tensor(batch_questions).long().to(
                device).unsqueeze(0)
            batch_choices = [
                z['Choices_token'] for z in input_question_list[step][:]
            ]
            batch_choices = torch.tensor(batch_choices).long().to(
                device).unsqueeze(0)
            batch_labels = [z['Goal'] for z in input_question_list[step][:]]
            batch_labels = torch.tensor(batch_labels).long().to(
                device).unsqueeze(0)
            #  forward pass
            outputs = model.forward(inputs=batch_inputs,
                                    questions=batch_questions,
                                    choices=batch_choices,
                                    labels=batch_labels)
            loss, pred, acc = outputs
            acc_s += (acc.cpu())
            running_loss += loss.item()
            #  get loss
            if multi_gpu:
                loss = loss.mean()
            if gradient_accumulation > 1:
                loss = loss / gradient_accumulation

            #  loss backward
            loss.backward()
            if (overall_step + 1) % gradient_accumulation == 0:
                optimizer.step()
                scheduler.step()
                overall_step += 1
                # print("backwards")

            # if (overall_step + 1) % 1 == 0:

            overall_step += 1
        piece_num += 1
        running_loss = running_loss / len(input_ids)
        print('now time: {}:{}. epoch {}, loss {}, acc {:.6f}'.format(
            datetime.now().hour,
            datetime.now().minute, epoch + 1, running_loss * 1000,
            acc_s / len(input_ids)
            # acc_s / 16
            # acc_s / 800
        ))
        #---------------------------------
        running_loss = running_loss * gradient_accumulation / len(resources)
        if running_loss < best_loss:
            best_loss = running_loss
            print('saving model for epoch {}'.format(epoch + 1))
            if not os.path.exists(output_dir +
                                  'model_epoch{}'.format(epoch + 1)):
                os.mkdir(output_dir + 'model_epoch{}'.format(epoch + 1))
            model_to_save = model.module if hasattr(model, 'module') else model
            model_to_save.save_pretrained(output_dir + 'loss.best', optimizer,
                                          epoch)
        running_loss = 0

        model.eval()
        val_accs = 0
        for stepp in range(len(val_input_ids)):
            batch_inputs = val_input_ids[stepp]
            batch_inputs = torch.tensor(batch_inputs).long().to(
                device).unsqueeze(0)
            batch_questions = [
                z['Question_token'] for z in val_input_question_list[stepp][:]
            ]
            batch_questions = torch.tensor(batch_questions).long().to(
                device).unsqueeze(0)
            batch_choices = [
                z['Choices_token'] for z in val_input_question_list[stepp][:]
            ]
            batch_choices = torch.tensor(batch_choices).long().to(
                device).unsqueeze(0)
            batch_labels = [
                z['Goal'] for z in val_input_question_list[stepp][:]
            ]
            batch_labels = torch.tensor(batch_labels).long().to(
                device).unsqueeze(0)
            #  forward pass
            outputs = model.forward(inputs=batch_inputs,
                                    questions=batch_questions,
                                    choices=batch_choices,
                                    labels=batch_labels,
                                    training=False)
            loss, pred, acc = outputs
            val_accs += (acc)
        print('validation acc {}'.format(val_accs / (len(val_input_ids))))
        # print('validation acc {}'.format(val_accs))
        print('epoch {} finished'.format(epoch + 1))
        #------------------
        then = datetime.now()
        print('time: {}'.format(then))
        print('time for one epoch: {}'.format(then - now))

    print('training finished')
    if not os.path.exists(output_dir + 'final_model'):
        os.mkdir(output_dir + 'final_model')
    model_to_save = model.module if hasattr(model, 'module') else model
    model_to_save.save_pretrained(output_dir + 'final.best', optimizer, epoch)
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', default='0', type=str, required=False, help='设置使用哪些显卡')
    parser.add_argument('--model_config', default='gpt2/config.json', type=str, required=False,
                        help='选择模型参数')
    parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='选择词库')
    parser.add_argument('--raw_data_path', default='data/train.json', type=str, required=False, help='原始训练语料')
    parser.add_argument('--tokenized_data_path', default='data/tokenized/', type=str, required=False,
                        help='tokenized语料存放位置')
    parser.add_argument('--raw', action='store_true', help='是否先做tokenize')
    parser.add_argument('--epochs', default=100, type=int, required=False, help='训练循环')
    parser.add_argument('--batch_size', default=1, type=int, required=False, help='训练batch size')
    parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
    parser.add_argument('--warmup_steps', default=10000, type=int, required=False, help='warm up步数')
    # parser.add_argument('--log_step', default=2, type=int, required=False, help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
    parser.add_argument('--stride', default=768, type=int, required=False, help='训练时取训练数据的窗口步长')
    parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
    parser.add_argument('--fp16', action='store_true', help='混合精度')
    parser.add_argument('--fp16_opt_level', default='O1', type=str, required=False)
    parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
    parser.add_argument('--num_pieces', default=100, type=int, required=False, help='将训练语料分成多少份')
    parser.add_argument('--min_length', default=1, type=int, required=False, help='最短收录文章长度')
    parser.add_argument('--output_dir', default='model_classfier/', type=str, required=False, help='模型输出路径')
    parser.add_argument('--pretrained_model', default='', type=str, required=False, help='模型训练起点路径')
    parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
    parser.add_argument('--segment', action='store_true', help='中文以词为单位')
    parser.add_argument('--bpe_token', action='store_true', help='subword')
    parser.add_argument('--encoder_json', default="tokenizations/encoder.json", type=str, help="encoder.json")
    parser.add_argument('--vocab_bpe', default="tokenizations/vocab.bpe", type=str, help="vocab.bpe")

    args = parser.parse_args()
    print('args:\n' + args.__repr__())

    if args.segment:
        from tokenizations import tokenization_bert_word_level as tokenization_bert
    else:
        from tokenizations import tokenization_bert

    os.environ["CUDA_VISIBLE_DEVICES"] = args.device  # 此处设置程序使用哪些显卡

    model_config = transformers.GPT2Config.from_json_file(args.model_config)
    print('config:\n' + model_config.to_json_string())

    n_ctx = model_config.n_ctx
    if args.bpe_token:
        full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
    else:
        full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
    full_tokenizer.max_len = 999999
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print('using device:', device)

    raw_data_path = args.raw_data_path
    log_step = args.log_step
    gradient_accumulation = args.gradient_accumulation
    output_dir = args.output_dir
    assert log_step % gradient_accumulation == 0

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    if True:
        print('building files')
        resources,resources_id,input_question_list, max_aq_len = build_files(data_path=raw_data_path, full_tokenizer=full_tokenizer)
        print('files built')
    input_ids = [] * len(resources_id)
    # labels = []
    for i in range(len(resources_id)):
        inputsss,_ = sliding_window( max_len = 512, resources = resources_id[i], stride=384)
        input_ids.append(inputsss)
        # labels = labels + [choices['label']] * len(inputsss)
    print('sliding built')

    val_input_ids = input_ids
    val_input_question_list = input_question_list

    # if False:  # shuffle
    #     index = [i for i in range(len(token_type_ids))]
    #     random.shuffle(index)
    #     new_input_ids = [input_ids[i] for i in index]
    #     new_labels = [labels[i] for i in index]
    #     new_token_type_ids = [token_type_ids[i] for i in index]
    #     input_ids = new_input_ids
    #     labels = new_labels
    #     token_type_ids = new_token_type_ids
    # train_dataset = my_dataset(x=input_ids, y=labels, token_type_ids=token_type_ids)
    # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)

    # if not args.pretrained_model:
    #     model = transformers.models.gpt2.GPT2LMHeadModel(config=model_config)
    # else:
    #     model = transformers.models.gpt2.GPT2LMHeadModel.from_pretrained(args.pretrained_model)

    model = modelMy(args,device)
    model.load_pretrained(output_dir + 'loss.best')
    model.to(device)

    num_parameters = 0
    parameters = model.parameters()
    for parameter in parameters:
        num_parameters += parameter.numel()
    print('number of parameters: {}'.format(num_parameters))

    print('calculating total steps')
    model.eval()
    pred_list = []
    for stepp in range(len(val_input_ids)):
        batch_inputs = val_input_ids[stepp]
        batch_inputs = torch.tensor(batch_inputs).long().to(device).unsqueeze(0)
        batch_questions = [z['Question_token'] for z in val_input_question_list[stepp][:]]
        batch_questions = torch.tensor(batch_questions).long().to(device).unsqueeze(0)
        batch_choices = [z['Choices_token'] for z in val_input_question_list[stepp][:]]
        batch_choices = torch.tensor(batch_choices).long().to(device).unsqueeze(0)
        #  forward pass
        outputs = model.forward(inputs=batch_inputs, questions=batch_questions, choices=batch_choices,
                                labels=None,training=False)
        pred = outputs
        pred_list = pred_list + pred.squeeze(0).tolist()

    f = open('data/answer.data', 'wb')
    # 将文件中的变量加载到当前工作区
    pickle.dump(pred_list, f)
    f.close()

    import csv
    headers = ['id', 'label']
    rows = []
    n_id = 101
    for choice in pred_list:
        if choice == 0:
            charr = 'A'
        if choice == 1:
            charr = 'B'
        if choice == 2:
            charr = 'C'
        if choice == 3:
            charr = 'D'
        rows.append([n_id, charr])
    with open('submit.csv', 'w')as f:
        f_csv = csv.writer(f)
        f_csv.writerow(headers)
        f_csv.writerows(rows)