예제 #1
0
def predict_wrapper(args,
                    exe,
                    ernie_config,
                    test_prog=None,
                    pyreader=None,
                    fetch_list=None):
    # Context to do validation.
    filelist = args.test_filelist if args.do_test else args.valid_filelist
    data_reader = ErnieDataReader(filelist,
                                  vocab_path=args.vocab_path,
                                  batch_size=args.batch_size,
                                  voc_size=ernie_config['vocab_size'],
                                  shuffle_files=False,
                                  epoch=1,
                                  max_seq_len=args.max_seq_len,
                                  is_test=True,
                                  in_tokens=args.in_tokens,
                                  is_bidirection=args.is_bidirection)

    if args.do_test:
        assert args.init_checkpoint is not None, "[FATAL] Please use --init_checkpoint '/path/to/checkpoints' \
                                                  to specify you pretrained model checkpoints"

        init_pretraining_params(exe, args.init_checkpoint, test_prog)

    def predict(exe=exe, pyreader=pyreader):

        pyreader.decorate_tensor_provider(data_reader.data_generator())
        pyreader.start()

        cost = 0
        lm_cost = 0
        acc = 0
        steps = 0
        time_begin = time.time()
        while True:
            try:
                each_next_acc, each_mask_lm_cost, each_total_cost = exe.run(
                    fetch_list=fetch_list, program=test_prog)
                acc += each_next_acc
                lm_cost += each_mask_lm_cost
                cost += each_total_cost
                steps += 1
                if args.do_test and steps % args.skip_steps == 0:
                    print("[test_set] steps: %d" % steps)

            except fluid.core.EOFException:
                pyreader.reset()
                break

        used_time = time.time() - time_begin
        return cost, lm_cost, acc, steps, (args.skip_steps / used_time)

    return predict
예제 #2
0
def train(args):
    print("pretraining start")
    ernie_config = ErnieConfig(args.ernie_config_path)
    ernie_config.print_config()

    train_program = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_program, startup_prog):
        with fluid.unique_name.guard():
            train_pyreader, next_sent_acc, mask_lm_loss, total_loss = create_model(
                pyreader_name='train_reader', ernie_config=ernie_config)
            scheduled_lr = optimization(loss=total_loss,
                                        warmup_steps=args.warmup_steps,
                                        num_train_steps=args.num_train_steps,
                                        learning_rate=args.learning_rate,
                                        train_program=train_program,
                                        startup_prog=startup_prog,
                                        weight_decay=args.weight_decay,
                                        scheduler=args.lr_scheduler,
                                        use_fp16=args.use_fp16,
                                        loss_scaling=args.loss_scaling)

            fluid.memory_optimize(input_program=train_program,
                                  skip_opt_set=[
                                      next_sent_acc.name, mask_lm_loss.name,
                                      total_loss.name
                                  ])

    test_prog = fluid.Program()
    with fluid.program_guard(test_prog, startup_prog):
        with fluid.unique_name.guard():
            test_pyreader, next_sent_acc, mask_lm_loss, total_loss = create_model(
                pyreader_name='test_reader', ernie_config=ernie_config)

    test_prog = test_prog.clone(for_test=True)

    if args.use_cuda:
        place = fluid.CUDAPlace(0)
        dev_count = fluid.core.get_cuda_device_count()
    else:
        place = fluid.CPUPlace()
        dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    print("Device count %d" % dev_count)
    print("theoretical memory usage: ")
    if args.in_tokens:
        print(
            fluid.contrib.memory_usage(program=train_program,
                                       batch_size=args.batch_size //
                                       args.max_seq_len))
    else:
        print(
            fluid.contrib.memory_usage(program=train_program,
                                       batch_size=args.batch_size))

    nccl2_num_trainers = 1
    nccl2_trainer_id = 0
    print("args.is_distributed:", args.is_distributed)
    if args.is_distributed:
        worker_endpoints_env = os.getenv("worker_endpoints")
        worker_endpoints = worker_endpoints_env.split(",")
        trainers_num = len(worker_endpoints)
        current_endpoint = os.getenv("current_endpoint")
        trainer_id = worker_endpoints.index(current_endpoint)
        if trainer_id == 0:
            print("train_id == 0, sleep 60s")
            time.sleep(60)
        print("worker_endpoints:{} trainers_num:{} current_endpoint:{} \
              trainer_id:{}".format(worker_endpoints, trainers_num,
                                    current_endpoint, trainer_id))

        # prepare nccl2 env.
        config = fluid.DistributeTranspilerConfig()
        config.mode = "nccl2"
        t = fluid.DistributeTranspiler(config=config)
        t.transpile(trainer_id,
                    trainers=worker_endpoints_env,
                    current_endpoint=current_endpoint,
                    program=train_program,
                    startup_program=startup_prog)
        nccl2_num_trainers = trainers_num
        nccl2_trainer_id = trainer_id

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if args.init_checkpoint and args.init_checkpoint != "":
        init_checkpoint(exe, args.init_checkpoint, train_program,
                        args.use_fp16)

    data_reader = ErnieDataReader(filelist=args.train_filelist,
                                  batch_size=args.batch_size,
                                  vocab_path=args.vocab_path,
                                  voc_size=ernie_config['vocab_size'],
                                  epoch=args.epoch,
                                  max_seq_len=args.max_seq_len,
                                  generate_neg_sample=args.generate_neg_sample,
                                  in_tokens=args.in_tokens,
                                  is_bidirection=args.is_bidirection)

    exec_strategy = fluid.ExecutionStrategy()
    if args.use_fast_executor:
        exec_strategy.use_experimental_executor = True
    exec_strategy.num_threads = dev_count
    exec_strategy.num_iteration_per_drop_scope = min(10, args.skip_steps)

    build_strategy = fluid.BuildStrategy()
    build_strategy.remove_unnecessary_lock = False

    train_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                       loss_name=total_loss.name,
                                       build_strategy=build_strategy,
                                       exec_strategy=exec_strategy,
                                       main_program=train_program,
                                       num_trainers=nccl2_num_trainers,
                                       trainer_id=nccl2_trainer_id)

    if args.valid_filelist and args.valid_filelist != "":
        predict = predict_wrapper(args,
                                  exe,
                                  ernie_config,
                                  test_prog=test_prog,
                                  pyreader=test_pyreader,
                                  fetch_list=[
                                      next_sent_acc.name, mask_lm_loss.name,
                                      total_loss.name
                                  ])

    train_pyreader.decorate_tensor_provider(data_reader.data_generator())
    train_pyreader.start()
    steps = 0
    cost = []
    lm_cost = []
    acc = []
    time_begin = time.time()
    while steps < args.num_train_steps:
        try:
            steps += nccl2_num_trainers
            skip_steps = args.skip_steps * nccl2_num_trainers

            if nccl2_trainer_id != 0:
                train_exe.run(fetch_list=[])
                continue

            if steps % skip_steps != 0:
                train_exe.run(fetch_list=[])
            else:
                each_next_acc, each_mask_lm_cost, each_total_cost, np_lr = train_exe.run(
                    fetch_list=[
                        next_sent_acc.name, mask_lm_loss.name, total_loss.name,
                        scheduled_lr.name
                    ])
                acc.extend(each_next_acc)
                lm_cost.extend(each_mask_lm_cost)
                cost.extend(each_total_cost)

                print("feed_queue size", train_pyreader.queue.size())
                time_end = time.time()
                used_time = time_end - time_begin
                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                print("current learning_rate:%f" % np_lr[0])
                print(
                    "epoch: %d, progress: %d/%d, step: %d, loss: %f, "
                    "ppl: %f, next_sent_acc: %f, speed: %f steps/s, file: %s, mask_type: %s"
                    %
                    (epoch, current_file_index, total_file, steps,
                     np.mean(np.array(cost)), np.mean(np.exp(
                         np.array(lm_cost))), np.mean(np.array(acc)),
                     skip_steps / used_time, current_file, mask_type))
                cost = []
                lm_cost = []
                acc = []
                time_begin = time.time()

            if steps % args.save_steps == 0:
                save_path = os.path.join(args.checkpoints,
                                         "step_" + str(steps))
                fluid.io.save_persistables(exe, save_path, train_program)

            if args.valid_filelist and steps % args.validation_steps == 0:
                vali_cost, vali_lm_cost, vali_acc, vali_steps, vali_speed = predict(
                )
                print("[validation_set] epoch: %d, step: %d, "
                      "loss: %f, global ppl: %f, batch-averged ppl: %f, "
                      "next_sent_acc: %f, speed: %f steps/s" %
                      (epoch, steps, np.mean(np.array(vali_cost) / vali_steps),
                       np.exp(np.mean(np.array(vali_lm_cost) / vali_steps)),
                       np.mean(np.exp(np.array(vali_lm_cost) / vali_steps)),
                       np.mean(np.array(vali_acc) / vali_steps), vali_speed))

        except fluid.core.EOFException:
            train_pyreader.reset()
            break
예제 #3
0
def train(args):
    print("pretraining start")
    ernie_config = ErnieConfig(args.ernie_config_path)
    ernie_config.print_config()

    with open(args.task_group_json) as f:
        task_group = json.load(f)

    exec_strategy = fluid.ExecutionStrategy()
    if args.use_fast_executor:
        exec_strategy.use_experimental_executor = True
    exec_strategy.num_threads = 4 if args.use_amp else 2
    exec_strategy.num_iteration_per_drop_scope = min(1, args.skip_steps)

    node_nums = int(os.getenv("PADDLE_NODES_NUM"))
    print("args.is_distributed:", args.is_distributed)
    num_trainers = 1
    trainer_id = 0
    
    if args.is_distributed:
        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
        trainer_id = fleet.worker_index()
        current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
        worker_endpoints = fleet.worker_endpoints()
        trainers_num = len(worker_endpoints)
        print("worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"
              .format(worker_endpoints, trainers_num, current_endpoint, trainer_id))

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
        dist_strategy.remove_unnecessary_lock = False # not useful
        dist_strategy.fuse_all_reduce_ops = True if args.use_fuse else False
        dist_strategy.nccl_comm_num = args.nccl_comm_num

        if args.use_hierarchical_allreduce \
            and trainers_num > args.hierarchical_allreduce_inter_nranks:
                dist_strategy.use_hierarchical_allreduce = args.use_hierarchical_allreduce
                dist_strategy.hierarchical_allreduce_inter_nranks = \
                        args.hierarchical_allreduce_inter_nranks
                assert dist_strategy.use_hierarchical_allreduce > 1
                assert trainers_num % dist_strategy.hierarchical_allreduce_inter_nranks == 0
                dist_strategy.hierarchical_allreduce_exter_nranks = \
                         trainers_num / dist_strategy.hierarchical_allreduce_inter_nranks

        if args.use_amp:
            dist_strategy.use_amp = True
            dist_strategy.amp_loss_scaling = args.init_loss_scaling
        if args.use_recompute:
            dist_strategy.forward_recompute = True
            dist_strategy.enable_sequential_execution=True

        trainer_id = fleet.worker_index()
        current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
        worker_endpoints = fleet.worker_endpoints()
        trainers_num = len(worker_endpoints)
        print("worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"
              .format(worker_endpoints,trainers_num, current_endpoint, trainer_id))
    else:
        dist_strategy=None

    gpu_id=0
    gpus = fluid.core.get_cuda_device_count()
    if args.is_distributed:
        gpus = os.getenv("FLAGS_selected_gpus").split(",")
        gpu_id = int(gpus[0])

    if args.use_cuda:
        place = fluid.CUDAPlace(gpu_id)
        dev_count = len(gpus)
    else:
        place = fluid.CPUPlace()
        dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    print("Device count %d, gpu_id:%d" % (dev_count, gpu_id))

    train_program = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_program, startup_prog):
        with fluid.unique_name.guard():
            train_pyreader, fetch_vars = create_model(
                pyreader_name='train_reader', ernie_config=ernie_config, task_group=task_group)
            graph_vars = fetch_vars["graph_vars"]
            checkpoints = fetch_vars["checkpoints"]
            total_loss = graph_vars[-1]
            if args.use_recompute:
                dist_strategy.recompute_checkpoints = checkpoints
            scheduled_lr, loss_scaling = optimization(
                loss=total_loss,
                warmup_steps=args.warmup_steps,
                num_train_steps=args.num_train_steps,
                learning_rate=args.learning_rate,
                train_program=train_program,
                startup_prog=startup_prog,
                weight_decay=args.weight_decay,
                scheduler=args.lr_scheduler,
                use_fp16=args.use_amp,
                use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
                init_loss_scaling=args.init_loss_scaling,
                incr_every_n_steps=args.incr_every_n_steps,
                decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
                incr_ratio=args.incr_ratio,
                decr_ratio=args.decr_ratio,
                dist_strategy=dist_strategy)    

    origin_train_program = train_program
    if args.is_distributed:
        #raped by fleet, need to assign fleet's modified train_grogram back
        train_program = fleet.main_program
        origin_train_program = fleet._origin_program

    test_prog = fluid.Program()
    with fluid.program_guard(test_prog, startup_prog):
        with fluid.unique_name.guard():
            test_pyreader, fetch_vars = create_model(
                pyreader_name='test_reader', ernie_config=ernie_config, task_group=task_group)
            graph_vars = fetch_vars["graph_vars"]
            total_loss = graph_vars[-1]

    test_prog = test_prog.clone(for_test=True)
    
    exe = fluid.Executor(place)
    exe.run(startup_prog)
    
    if args.init_checkpoint and args.init_checkpoint != "":
        #init_checkpoint(exe, args.init_checkpoint, origin_train_program, args.use_amp)
        init_pretraining_params(exe, args.init_checkpoint, origin_train_program, args.use_amp)

    data_reader = ErnieDataReader(
        task_group,
        False,
        batch_size=args.batch_size,
        vocab_path=args.vocab_path,
        voc_size=ernie_config['vocab_size'],
        epoch=args.epoch,
        max_seq_len=args.max_seq_len,
        generate_neg_sample=args.generate_neg_sample,
        hack_old_trainset=args.hack_old_data)
    
    #only fleet
    train_exe = exe

    predict = predict_wrapper(
        args,
        exe,
        ernie_config,
        task_group,
        test_prog=test_prog,
        pyreader=test_pyreader,
        fetch_list=[var.name for var in graph_vars])

    train_pyreader.set_batch_generator(data_reader.data_generator())
    train_pyreader.start()
    steps = 112000
    time_begin = time.time()
    node_nums = int(os.getenv("PADDLE_NODES_NUM"))
    while True:#steps < args.num_train_steps:
        try:
            steps += 1#node_nums
            skip_steps = args.skip_steps# * node_nums

            fetch_list = []
            if trainer_id == 0 and steps % skip_steps == 0:
                fetch_list = [var.name for var in graph_vars] + [scheduled_lr.name]
                if args.use_amp:
                    fetch_list.append(loss_scaling.name)

            outputs = train_exe.run(fetch_list=fetch_list, program=train_program)
            time_end = time.time()
            used_time = time_end - time_begin
            
            if outputs:
                each_mask_lm_cost, lm_w = outputs[:2]
                if args.use_amp:
                    each_total_constract_loss, each_total_cost, np_lr, l_scaling = outputs[-4:]
                else:
                    each_total_constract_loss, each_total_cost, np_lr = outputs[-3:]
                acc_list =[]
                index = 2
                for task in task_group:
                    each_task_acc = outputs[index]
                    task_w = outputs[index + 1]
                    acc = np.sum(each_task_acc * task_w) / np.sum(task_w)
                    acc_list.append("%s acc: %f" % (task["task_name"], acc))
                    index += 2

                print("feed_queue size", train_pyreader.queue.size())
                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress()
                if args.use_amp:
                    print("current learning_rate:%f, loss scaling:%f" % (np_lr[0], l_scaling[0]))
                else:
                    print("current learning_rate:%f" % np_lr[0])
                print(
                    "epoch: %d, progress: %d/%d, step: %d, constract_loss: %f, loss: %f, "
                    "ppl: %f, %s, speed: %f steps/s, file: %s, mask_type: %s"
                    % (epoch, current_file_index, total_file, steps,
                       np.mean(each_total_constract_loss), np.mean(each_total_cost),
                       np.exp(np.sum(each_mask_lm_cost * lm_w) / np.sum(lm_w)),
                       ", ".join(acc_list), skip_steps / used_time,
                       current_file, mask_type))
                time_begin = time.time()
            elif steps % skip_steps == 0:
                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                print("feed_queue size", train_pyreader.queue.size())
                print("epoch: %d, progress: %d/%d, step: %d, "
                        "speed: %f steps/s, file: %s, mask_type: %s"
                        % (epoch, current_file_index, total_file, steps,
                            skip_steps / used_time, current_file, mask_type))
                time_begin = time.time()

            if not trainer_id == 0:
                continue

            if steps % args.save_steps == 0:
                save_path = os.path.join(args.checkpoints, "step_" + str(steps))
                fluid.io.save_persistables(exe, save_path, origin_train_program)

            if steps % args.validation_steps == 0:
                valid_list = predict()
                print("[validation_set] epoch: %d, step: %d, %s" % \
                      (epoch, steps, ", ".join(valid_list)))

        except fluid.core.EOFException:
            train_pyreader.reset()
            break
예제 #4
0
def predict_wrapper(args,
                    exe,
                    ernie_config,
                    task_group,
                    test_prog=None,
                    pyreader=None,
                    fetch_list=None):
    # Context to do validation.
    data_reader = ErnieDataReader(
        task_group,
        True,
        vocab_path=args.vocab_path,
        batch_size=2048,#args.batch_size,
        voc_size=ernie_config['vocab_size'],
        shuffle_files=False,
        epoch=1,
        max_seq_len=args.max_seq_len,
        hack_old_trainset=args.hack_old_data,
        is_test=True)

    if args.do_test:
        assert args.init_checkpoint is not None, "[FATAL] Please use --init_checkpoint '/path/to/checkpoints' \
                                                  to specify you pretrained model checkpoints"

        init_pretraining_params(exe, args.init_checkpoint, test_prog)

    def predict(exe=exe, pyreader=pyreader):

        pyreader.set_batch_generator(data_reader.data_generator())
        pyreader.start()

        cost = 0
        constract_loss = 0
        lm_cost = 0
        lm_steps = 0
        task_acc = {}
        task_steps = {}
        steps = 0
        time_begin = time.time()
        while True:
            try:
                outputs = exe.run(fetch_list=fetch_list, program=test_prog)
                each_mask_lm_cost, lm_w = outputs[:2]
                each_total_constract_loss = outputs[-2]
                each_total_cost =  outputs[-1]
                lm_cost += np.sum(each_mask_lm_cost * lm_w)
                lm_steps += np.sum(lm_w)
                cost += np.mean(each_total_cost)
                constract_loss += np.mean(each_total_constract_loss)
                steps += 1

                index = 2
                for task in task_group:
                    each_task_acc = outputs[index]
                    task_w = outputs[index + 1]
                    task_acc[task["task_name"]] = task_acc.get(task["task_name"], 0.0) \
                                                + np.sum(each_task_acc * task_w)
                    task_steps[task["task_name"]] = task_steps.get(task["task_name"], 0.0) \
                                                  + np.sum(task_w)
                    index += 2

            except fluid.core.EOFException:
                pyreader.reset()
                break

        used_time = time.time() - time_begin

        ret = ["loss: %f" % (cost / steps),
               "constract_loss: %f" % (constract_loss / steps),
               "ppl: %f" % (np.exp(lm_cost / lm_steps))]
        for task in task_group:
            acc = task_acc[task["task_name"]] / task_steps[task["task_name"]]
            ret.append("%s acc: %f" % (task["task_name"], acc))

        ret.append("speed: " + str(args.skip_steps / used_time) + " steps/s")
        return ret

    return predict
예제 #5
0
def train(args):
    print("pretraining start")
    ernie_config = ErnieConfig(args.ernie_config_path)
    ernie_config.print_config()

    with open(args.task_group_json) as f:
        task_group = json.load(f)

    exec_strategy = fluid.ExecutionStrategy()
    if args.use_fast_executor:
        exec_strategy.use_experimental_executor = True
    exec_strategy.num_threads = 4 if args.use_amp else 2
    exec_strategy.num_iteration_per_drop_scope = min(1, args.skip_steps)

    node_nums = 1  #int(os.getenv("PADDLE_NODES_NUM"))
    print("args.is_distributed:", args.is_distributed)
    num_trainers = 1
    trainer_id = 0

    dist_strategy = None

    gpu_id = 0
    gpus = 1  #fluid.core.get_cuda_device_count()
    print(gpus)
    if args.is_distributed:
        gpus = os.getenv("FLAGS_selected_gpus").split(",")
        gpu_id = int(gpus[0])

    place = fluid.CPUPlace()
    dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    print("Device count %d, gpu_id:%d" % (dev_count, gpu_id))

    train_program = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_program, startup_prog):
        with fluid.unique_name.guard():
            fetch_vars, train_data_names = create_model(
                pyreader_name='train_reader',
                ernie_config=ernie_config,
                task_group=task_group)
            graph_vars = fetch_vars["graph_vars"]
            checkpoints = fetch_vars["checkpoints"]
            total_loss = graph_vars[-1]
            if args.use_recompute:
                dist_strategy.recompute_checkpoints = checkpoints
            fetch_list_ascend = [var for var in graph_vars]
            scheduled_lr, loss_scaling = optimization(
                loss=total_loss,
                warmup_steps=args.warmup_steps,
                num_train_steps=args.num_train_steps,
                learning_rate=args.learning_rate,
                train_program=train_program,
                startup_prog=startup_prog,
                weight_decay=args.weight_decay,
                scheduler=args.lr_scheduler,
                use_fp16=args.use_amp,
                use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
                init_loss_scaling=args.init_loss_scaling,
                incr_every_n_steps=args.incr_every_n_steps,
                decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
                incr_ratio=args.incr_ratio,
                decr_ratio=args.decr_ratio,
                fetch_list=fetch_list_ascend,
                dist_strategy=dist_strategy)

    origin_train_program = train_program

    test_prog = fluid.Program()
    with fluid.program_guard(test_prog, startup_prog):
        with fluid.unique_name.guard():
            fetch_vars, test_data_names = create_model(
                pyreader_name='test_reader',
                ernie_config=ernie_config,
                task_group=task_group)
            graph_vars = fetch_vars["graph_vars"]
            total_loss = graph_vars[-1]

    test_prog = test_prog.clone(for_test=True)

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if args.init_checkpoint and args.init_checkpoint != "":
        #init_checkpoint(exe, args.init_checkpoint, origin_train_program, args.use_amp)
        init_pretraining_params(exe, args.init_checkpoint,
                                origin_train_program, args.use_amp)

    data_reader = ErnieDataReader(task_group,
                                  False,
                                  batch_size=args.batch_size,
                                  vocab_path=args.vocab_path,
                                  voc_size=ernie_config['vocab_size'],
                                  epoch=args.epoch,
                                  max_seq_len=args.max_seq_len,
                                  generate_neg_sample=args.generate_neg_sample,
                                  hack_old_trainset=args.hack_old_data)

    #only fleet
    train_exe = exe

    predict = predict_wrapper(args,
                              exe,
                              ernie_config,
                              task_group,
                              test_prog=test_prog,
                              data_names=test_data_names,
                              fetch_list=[var for var in graph_vars])

    #train_pyreader.set_batch_generator(data_reader.data_generator())
    #train_pyreader.start()
    train_data_generator = data_reader.data_generator()
    steps = 0
    time_begin = time.time()
    feed_list = {}
    while True:  #steps < args.num_train_steps:
        try:
            steps += 1  #node_nums
            skip_steps = args.skip_steps  # * node_nums

            input_list = next(train_data_generator(), None)
            for index in range(len(input_list)):
                feed_list[train_data_names[index]] = input_list[index]

            fetch_list = []
            if trainer_id == 0 and steps % skip_steps == 0:
                fetch_list = [var for var in graph_vars] + [scheduled_lr.name]
                if args.use_amp:
                    fetch_list.append(loss_scaling.name)

            outputs = train_exe.run(feed=feed_list,
                                    fetch_list=fetch_list,
                                    program=train_program)
            time_end = time.time()
            used_time = time_end - time_begin

            if outputs:
                each_mask_lm_cost, lm_w = outputs[:2]
                if args.use_amp:
                    each_total_constract_loss, each_total_cost, np_lr, l_scaling = outputs[
                        -4:]
                else:
                    each_total_constract_loss, each_total_cost, np_lr = outputs[
                        -3:]
                acc_list = []
                index = 2
                for task in task_group:
                    each_task_acc = outputs[index]
                    task_w = outputs[index + 1]
                    acc = np.sum(each_task_acc * task_w) / np.sum(task_w)
                    acc_list.append("%s acc: %f" % (task["task_name"], acc))
                    index += 2

                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                if args.use_amp:
                    print("current learning_rate:%f, loss scaling:%f" %
                          (np_lr[0], l_scaling[0]))
                else:
                    print("current learning_rate:%f" % np_lr[0])
                print(
                    "epoch: %d, progress: %d/%d, step: %d, constract_loss: %f, loss: %f, "
                    "ppl: %f, %s, speed: %f steps/s, file: %s, mask_type: %s" %
                    (epoch, current_file_index, total_file, steps,
                     np.mean(each_total_constract_loss),
                     np.mean(each_total_cost),
                     np.exp(np.sum(each_mask_lm_cost * lm_w) / np.sum(lm_w)),
                     ", ".join(acc_list), skip_steps / used_time, current_file,
                     mask_type))
                time_begin = time.time()
            elif steps % skip_steps == 0:
                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                print("epoch: %d, progress: %d/%d, step: %d, "
                      "speed: %f steps/s, file: %s, mask_type: %s" %
                      (epoch, current_file_index, total_file, steps,
                       skip_steps / used_time, current_file, mask_type))
                time_begin = time.time()

            if not trainer_id == 0:
                continue

            if steps % args.save_steps == 0:
                save_path = os.path.join(args.checkpoints,
                                         "step_" + str(steps))
                fluid.io.save_persistables(exe, save_path,
                                           origin_train_program)

            if steps % args.validation_steps == 0:
                valid_list = predict()
                print("[validation_set] epoch: %d, step: %d, %s" % \
                      (epoch, steps, ", ".join(valid_list)))

        except fluid.core.EOFException:
            train_pyreader.reset()
            break
import tokenization
from batching import pad_batch_data
from reader.pretraining import ErnieDataReader
from model.ernie import ErnieModel, ErnieConfig

if __name__ == '__main__':
    with open("./config/task.json") as f:
        task_group = json.load(f)
    ernie_config = ErnieConfig("./config/ernie_config.json")

    data_reader = ErnieDataReader(
        task_group,
        True,
        batch_size=128,
        vocab_path="./config/vocab.txt",
        voc_size=ernie_config['vocab_size'],
        epoch=1,
        max_seq_len=64,
        generate_neg_sample=False,
        hack_old_trainset=False,
        is_test=True)
    train_data_generator = data_reader.data_generator()
    cnt = 0
    while True:
    #for batch_data in train_data_generator():
        batch_data = next(train_data_generator(), None)
        src_id, pos_id, sent_id, task_id, self_input_mask, mask_label, mask_pos, lm_w, batch_mask, loss_mask, gather_idx = batch_data[:11]
        
        print(mask_pos)
        #for data in batch_data:
        #    print(data.shape)
예제 #7
0
def train(args):
    print("pretraining start")
    ernie_config = ErnieConfig(args.ernie_config_path)
    ernie_config.print_config()

    with open(args.task_group_json) as f:
        task_group = json.load(f)

    trainer_id = 0
    dist_strategy = None

    place = fluid.CPUPlace()
    train_program = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_program, startup_prog):
        with fluid.unique_name.guard():
            fetch_vars, train_data_names = create_model(
                pyreader_name='train_reader',
                ernie_config=ernie_config,
                task_group=task_group)
            graph_vars = fetch_vars["graph_vars"]
            total_loss = graph_vars[-1]
            fetch_list_ascend = [var for var in graph_vars]
            optimizer, scheduled_lr, loss_scaling = optimization(
                loss=total_loss,
                warmup_steps=args.warmup_steps,
                num_train_steps=args.num_train_steps,
                learning_rate=args.learning_rate,
                train_program=train_program,
                startup_prog=startup_prog,
                weight_decay=args.weight_decay,
                scheduler=args.lr_scheduler,
                use_fp16=args.use_amp,
                use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
                init_loss_scaling=args.init_loss_scaling,
                incr_every_n_steps=args.incr_every_n_steps,
                decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
                incr_ratio=args.incr_ratio,
                decr_ratio=args.decr_ratio,
                fetch_list=fetch_list_ascend,
                ascend=ascend,
                dist_strategy=dist_strategy)

    #test_prog = fluid.Program()
    #with fluid.program_guard(test_prog, startup_prog):
    #    with fluid.unique_name.guard():
    #        fetch_vars, test_data_names = create_model(
    #            pyreader_name='test_reader', ernie_config=ernie_config, task_group=task_group)
    #        graph_vars = fetch_vars["graph_vars"]
    #        total_loss = graph_vars[-1]

    #test_prog = test_prog.clone(for_test=True)

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if args.init_checkpoint and args.init_checkpoint != "":
        #init_checkpoint(exe, args.init_checkpoint, train_program, args.use_amp)
        init_pretraining_params(exe, args.init_checkpoint, train_program,
                                args.use_amp)

    data_reader = ErnieDataReader(task_group,
                                  False,
                                  batch_size=args.batch_size,
                                  vocab_path=args.vocab_path,
                                  voc_size=ernie_config['vocab_size'],
                                  epoch=args.epoch,
                                  max_seq_len=args.max_seq_len,
                                  generate_neg_sample=args.generate_neg_sample,
                                  hack_old_trainset=args.hack_old_data)

    #only fleet
    train_exe = exe

    #predict = predict_wrapper(
    #    args,
    #    exe,
    #    ernie_config,
    #    task_group,
    #    test_prog=test_prog,
    #    data_names=test_data_names,
    #    fetch_list=[var for var in graph_vars])

    train_data_generator = data_reader.data_generator()
    steps = 0
    time_begin = time.time()
    feed_list = {}
    while steps < 1:  #args.num_train_steps:
        try:
            steps += 1  #node_nums
            skip_steps = args.skip_steps  # * node_nums

            input_list = next(train_data_generator(), None)
            for index in range(len(input_list)):
                feed_list[train_data_names[index]] = input_list[index]

            fetch_list = []
            if trainer_id == 0 and steps % skip_steps == 0:
                fetch_list = [var for var in graph_vars] + [scheduled_lr.name]
                if args.use_amp:
                    fetch_list.append(loss_scaling.name)

            outputs = train_exe.run(feed=feed_list,
                                    fetch_list=fetch_list,
                                    program=train_program)
            time_end = time.time()
            used_time = time_end - time_begin

            if outputs:
                #each_mask_lm_cost, lm_w = outputs[:2]
                #if args.use_amp:
                #    each_total_constract_loss, each_total_cost, np_lr, l_scaling = outputs[-4:]
                #else:
                #    each_total_constract_loss, each_total_cost, np_lr = outputs[-3:]
                #acc_list =[]
                #index = 2
                #for task in task_group:
                #    each_task_acc = outputs[index]
                #    task_w = outputs[index + 1]
                #    acc = np.sum(each_task_acc * task_w) / np.sum(task_w)
                #    acc_list.append("%s acc: %f" % (task["task_name"], acc))
                #    index += 2

                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                #if args.use_amp:
                #    print("current learning_rate:%f, loss scaling:%f" % (np_lr[0], l_scaling[0]))
                #else:
                #    print("current learning_rate:%f" % np_lr[0])
                print("lm_weight: %f", outputs[0])
                #print(
                #    "epoch: %d, progress: %d/%d, step: %d, constract_loss: %f, loss: %f, "
                #    "ppl: %f, %s, speed: %f steps/s, file: %s, mask_type: %s"
                #    % (epoch, current_file_index, total_file, steps,
                #       np.mean(each_total_constract_loss), np.mean(each_total_cost),
                #       np.exp(np.sum(each_mask_lm_cost * lm_w) / np.sum(lm_w)),
                #       ", ".join(acc_list), skip_steps / used_time,
                #       current_file, mask_type))
                time_begin = time.time()
            elif steps % skip_steps == 0:
                epoch, current_file_index, total_file, current_file, mask_type = data_reader.get_progress(
                )
                print("epoch: %d, progress: %d/%d, step: %d, "
                      "speed: %f steps/s, file: %s, mask_type: %s" %
                      (epoch, current_file_index, total_file, steps,
                       skip_steps / used_time, current_file, mask_type))
                time_begin = time.time()

            if not trainer_id == 0:
                continue

            if steps % args.save_steps == 0:
                save_path = os.path.join(args.checkpoints,
                                         "step_" + str(steps))
                fluid.io.save_persistables(exe, save_path, train_program)

            #if steps % args.validation_steps == 0:
            #    valid_list = predict()
            #    print("[validation_set] epoch: %d, step: %d, %s" % \
            #          (epoch, steps, ", ".join(valid_list)))

        except fluid.core.EOFException:
            train_pyreader.reset()
            break