Beispiel #1
0
def distributed_optimize(optimizer):
    '''
    A part of configuration for distributed training
    '''
    strategy = DistributedStrategy()
    strategy.fuse_all_reduce_ops = True
    strategy.nccl_comm_num = 2 
    strategy.fuse_elewise_add_act_ops=True
    strategy.fuse_bn_act_ops = True
    return fleet.distributed_optimizer(optimizer, strategy=strategy)
Beispiel #2
0
def train(args):
    # parameters from arguments
    model_name = args.model
    checkpoint = args.checkpoint
    pretrained_model = args.pretrained_model
    model_save_dir = args.model_save_dir
    use_mixup = args.use_mixup
    use_ngraph = os.getenv('FLAGS_use_ngraph')

    startup_prog = fluid.Program()
    train_prog = fluid.Program()
    test_prog = fluid.Program()

    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = args.num_threads
    exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope

    dist_strategy = DistributedStrategy()
    dist_strategy.exec_strategy = exec_strategy
    dist_strategy.enable_inplace = args.with_inplace
    if not args.fuse:
        dist_strategy.fuse_all_reduce_ops = False
    dist_strategy.nccl_comm_num = args.nccl_comm_num
    dist_strategy.fuse_elewise_add_act_ops=args.fuse_elewise_add_act_ops

    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    b_out = build_program(
                     is_train=True,
                     main_prog=train_prog,
                     startup_prog=startup_prog,
                     args=args,
                     dist_strategy=dist_strategy,
                     data_layout=args.data_format)
    if use_mixup:
        train_data_loader, train_cost, global_lr = b_out[0], b_out[1], b_out[2]
        train_fetch_vars = [train_cost, global_lr]
        train_fetch_list = []
        for var in train_fetch_vars:
            var.persistable=True
            train_fetch_list.append(var.name)

    else:
        train_data_loader, train_cost, train_acc1, train_acc5, global_lr = b_out[0],b_out[1],b_out[2],b_out[3],b_out[4]
        train_fetch_vars = [train_cost, train_acc1, train_acc5, global_lr]
        train_fetch_list = []
        for var in train_fetch_vars:
            var.persistable=True
            train_fetch_list.append(var.name)

    train_prog = fleet.main_program

    b_out_test = build_program(
                     is_train=False,
                     main_prog=test_prog,
                     startup_prog=startup_prog,
                     args=args,
                     dist_strategy=dist_strategy,
                     data_layout=args.data_format)
    test_data_loader, test_cost, test_acc1, test_acc5 = b_out_test[0],b_out_test[1],b_out_test[2],b_out_test[3]

    test_prog = test_prog.clone(for_test=True)
    test_prog = compiler.CompiledProgram(test_prog).with_data_parallel(loss_name=test_cost.name, exec_strategy=exec_strategy)

    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if checkpoint is not None:
        fluid.io.load_persistables(exe, checkpoint, main_program=train_prog)

    if pretrained_model:
        def if_exist(var):
            return os.path.exists(os.path.join(pretrained_model, var.name))

        fluid.io.load_vars(
            exe, pretrained_model, main_program=train_prog, predicate=if_exist)

    if args.use_gpu:
        device_num = get_device_num()
    else:
        device_num = 1

    train_batch_size = args.batch_size
    print("train_batch_size: %d device_num:%d" % (train_batch_size, device_num))

    test_batch_size = args.batch_size
    # NOTE: the order of batch data generated by batch_reader
    # must be the same in the respective processes.
    shuffle_seed = 1 if num_trainers > 1 else None

    if args.use_dali:
        import dali
        train_iter = dali.train(settings=args, trainer_id=trainer_id, trainers_num=num_trainers,
                                gpu_id=gpu_id, data_layout=args.data_format)
    else:
        train_reader = reader.train(settings=args, data_dir=args.data_dir,
                                    pass_id_as_seed=shuffle_seed, data_layout=args.data_format, threads=10)
        train_batch_reader=paddle.batch(train_reader, batch_size=train_batch_size)

        test_reader = reader.val(settings=args, data_dir=args.data_dir, data_layout=args.data_format, threads=10)
        test_batch_reader=paddle.batch(test_reader, batch_size=test_batch_size)

        places = place
        if num_trainers <= 1 and args.use_gpu:
            places = fluid.framework.cuda_places()

        train_data_loader.set_sample_list_generator(train_batch_reader, places)
        test_data_loader.set_sample_list_generator(test_batch_reader, place)

    test_fetch_vars = [test_cost, test_acc1, test_acc5]
    test_fetch_list = []
    for var in test_fetch_vars:
        var.persistable=True
        test_fetch_list.append(var.name)

    train_exe = exe

    params = models.__dict__[args.model]().params

    train_speed_list = []
    acc1_logs = []
    acc5_logs = []
    for pass_id in range(params["num_epochs"]):
        train_info = [[], [], []]
        test_info = [[], [], []]
        train_begin=time.time()
        batch_id = 0
        time_record=[]

        if not args.use_dali:
            train_iter = train_data_loader()

        for data in train_iter:
            t1 = time.time()

            if batch_id % args.fetch_steps != 0:
                train_exe.run(train_prog, feed=data)
            else:
                if use_mixup:
                    loss, lr = train_exe.run(train_prog, feed=data, fetch_list=train_fetch_list)
                else:
                    loss, acc1, acc5, lr = train_exe.run(train_prog,  feed=data,  fetch_list=train_fetch_list)
                    acc1 = np.mean(np.array(acc1))
                    acc5 = np.mean(np.array(acc5))
                    train_info[1].append(acc1)
                    train_info[2].append(acc5)

            t2 = time.time()
            period = t2 - t1
            time_record.append(period)

            if args.profile and batch_id == 100:
                print("begin profiler")
                if trainer_id == 0:
                    profiler.start_profiler("All")
            elif args.profile and batch_id == 105:
                print("begin to end profiler")
                if trainer_id == 0:
                    profiler.stop_profiler("total", "./profile_pass_%d" % (pass_id))
                print("end profiler break!")
                args.profile=False

            if batch_id % args.fetch_steps == 0:
                loss = np.mean(np.array(loss))
                train_info[0].append(loss)
                lr = np.mean(np.array(lr))
                period = np.mean(time_record)
                speed = args.batch_size * 1.0 / period
                time_record=[]
                if use_mixup:
                    print("Pass {0}, trainbatch {1}, loss {2}, lr {3}, time {4}, speed {5}"
                          .format(pass_id, batch_id, "%.5f"%loss, "%.5f" %lr, "%2.4f sec" % period, "%.2f" % speed))
                else:
                    print("Pass {0}, trainbatch {1}, loss {2}, \
                        acc1 {3}, acc5 {4}, lr {5}, time {6}, speed {7}"
                          .format(pass_id, batch_id, "%.5f"%loss, "%.5f"%acc1, "%.5f"%acc5, "%.5f" %
                                  lr, "%2.4f sec" % period, "%.2f" % speed))
                sys.stdout.flush()
            batch_id += 1

        if args.use_dali:
            train_iter.reset()

        train_loss = np.array(train_info[0]).mean()
        if not use_mixup:
            train_acc1 = np.array(train_info[1]).mean()
            train_acc5 = np.array(train_info[2]).mean()
        train_end=time.time()
        train_speed = (batch_id * train_batch_size) / (train_end - train_begin)
        train_speed_list.append(train_speed)

        if trainer_id == 0 and (args.do_test or (pass_id + 1) == params["num_epochs"]):
            if args.use_dali:
                test_iter = dali.val(settings=args, trainer_id=trainer_id, trainers_num=num_trainers,
                                 gpu_id=gpu_id, data_layout=args.data_format)
            else:
                test_iter = test_data_loader()

            test_batch_id = 0
            for data in test_iter:
                t1 = time.time()
                loss, acc1, acc5 = exe.run(program=test_prog,
                                           feed=data,
                                           fetch_list=test_fetch_list)
                t2 = time.time()
                period = t2 - t1
                loss = np.mean(loss)
                acc1 = np.mean(acc1)
                acc5 = np.mean(acc5)
                test_info[0].append(loss)
                test_info[1].append(acc1)
                test_info[2].append(acc5)

                if test_batch_id % 10 == 0:
                    test_speed = test_batch_size * 1.0 / period
                    print("Pass {0},testbatch {1},loss {2}, \
                        acc1 {3},acc5 {4},time {5},speed {6}"
                        .format(pass_id, test_batch_id, "%.5f"%loss,"%.5f"%acc1, "%.5f"%acc5,
                                "%2.2f sec" % period, "%.2f" % test_speed))
                    sys.stdout.flush()
                test_batch_id += 1

            if args.use_dali:
                test_iter.reset()
                del test_iter

            test_loss = np.array(test_info[0]).mean()
            test_acc1 = np.array(test_info[1]).mean()
            test_acc5 = np.array(test_info[2]).mean()

            acc1_logs.append(test_acc1)
            acc5_logs.append(test_acc5)

            if use_mixup:
                print("End pass {0}, train_loss {1}, test_loss {2}, test_acc1 {3}, test_acc5 {4}, speed {5}".format(
                      pass_id, "%.5f"%train_loss, "%.5f"%test_loss, "%.5f"%test_acc1, "%.5f"%test_acc5,
                      "%.2f" % train_speed))
            else:
                print("End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
                  "test_loss {4}, test_acc1 {5}, test_acc5 {6}, speed {7}".format(
                      pass_id, "%.5f"%train_loss, "%.5f"%train_acc1, "%.5f"%train_acc5, "%.5f"%test_loss,
                      "%.5f"%test_acc1, "%.5f"%test_acc5, "%.2f" % train_speed))
        else:
            if use_mixup:
                print("End pass {0}, train_loss {1}, speed {2}".format(pass_id, "%.5f"%train_loss, "%.2f" % train_speed))
            else:
                print("End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, ""speed {4}".format(
                    pass_id, "%.5f"%train_loss, "%.5f"%train_acc1, "%.5f"%train_acc5, "%.2f" % train_speed))

        sys.stdout.flush()

    # save in last epoch
    if trainer_id == 0:
        model_path = os.path.join(model_save_dir + '/' + model_name, str(pass_id))
        if not os.path.isdir(model_path):
            os.makedirs(model_path)

        fluid.io.save_persistables(exe, model_path, main_program=fleet._origin_program)
        if args.benchmark_test:
            if not os.path.isdir("./benchmark_logs/"):
                os.makedirs("./benchmark_logs/")
            with open("./benchmark_logs/log_%d" % trainer_id, 'w') as f:
                result = dict()
                result['0'] = dict()
                result['0']['acc1'] = test_acc1
                result['0']['acc5'] = test_acc5
                result['0']['result_log'] = dict()
                result['0']['result_log']['acc1'] = acc1_logs
                result['0']['result_log']['acc5'] = acc5_logs
                # maximum speed of all epochs
                result['1'] = max(train_speed_list) * num_trainers
                result['14'] = args.batch_size

                print(str(result))
                f.writelines(str(result))