def _build_strategy(self, context): from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory mode = envs.get_runtime_environ("train.trainer.strategy") assert mode in ["async", "geo", "sync", "half_async"] strategy = None if mode == "async": strategy = StrategyFactory.create_async_strategy() elif mode == "geo": push_num = envs.get_global_env("train.strategy.mode.push_num", 100) strategy = StrategyFactory.create_geo_strategy(push_num) elif mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() assert strategy is not None context["strategy"] = strategy return strategy
def _get_distributed_strategy(self): strategy = None from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \ StrategyFactory dist_strategy = self.context["valid_strategy"] k_steps = dist_strategy.a_sync_configs["k_steps"] if not dist_strategy.a_sync and k_steps == 0: strategy = StrategyFactory.create_sync_strategy() if dist_strategy.a_sync and k_steps == 0: strategy = StrategyFactory.create_async_strategy() if dist_strategy.a_sync and k_steps > 0: strategy = StrategyFactory.create_geo_strategy(k_steps) if not strategy: raise ValueError("k_steps must be invalid value, please check") return strategy
def _set_strategy(self, args): """配置运行的distributed_strategy, build_strategy 配置在do_training中""" if int(os.getenv("PADDLE_COMPATIBILITY_CHECK", '0')): self.strategy = DistributeTranspilerConfig() if args.run_params["sync_mode"] == "sync": self.strategy.sync_mode = True self.strategy.runtime_split_send_recv = False self.async_mode = False elif args.run_params["sync_mode"] == "half_async": self.strategy.sync_mode = False self.async_mode = False elif args.run_params["sync_mode"] == "async": self.strategy.sync_mode = False self.async_mode = True elif args.run_params["sync_mode"] == "geo_async": self.strategy.sync_mode = False self.async_mode = True self.strategy.geo_sgd_mode = True self.strategy.geo_sgd_need_push_nums = 400 self.strategy.mode = "pserver" self.strategy.slice_var_up = args.run_params['slice_var_up'] self.strategy.enable_dc_asgd = args.run_params['enable_dc_asgd'] #TODO: split_method=HashName, it will cause a bug, this option can open after repair # if args.run_params['split_method']: # self.strategy.split_method = HashName # else: # self.strategy.split_method = RoundRobin self.strategy.wait_port = args.run_params['wait_port'] self.strategy.runtime_split_send_recv = args.run_params[ 'runtime_split_send_recv'] self.strategy.use_hierarchical_allreduce = args.run_params[ 'use_hierarchical_allreduce'] self.strategy.geo_sgd_need_push_nums = args.run_params['push_nums'] else: self.strategy = StrategyFactory.create_sync_strategy() # trainer_runtime_config = TrainerRuntimeConfig() # trainer_runtime_config.send_queue_size = "16" # trainer_runtime_config.thread_pool_size="32" # trainer_runtime_config.max_merge_var_num="16" # trainer_runtime_config.is_sgd_communicator="0" if args.run_params["sync_mode"] == "sync": self.strategy = StrategyFactory.create_sync_strategy() elif args.run_params["sync_mode"] == "half_async": self.strategy = StrategyFactory.create_half_async_strategy() elif args.run_params["sync_mode"] == "async": self.strategy = StrategyFactory.create_async_strategy() build_strategy = self.strategy.get_build_strategy() build_strategy.memory_optimize = False self.strategy.set_build_strategy(build_strategy) elif args.run_params["sync_mode"] == "geo_async": self.strategy = StrategyFactory.create_geo_strategy(400) program_config = self.strategy.get_program_config() program_config.slice_var_up = args.run_params['slice_var_up'] program_config.enable_dc_asgd = args.run_params['enable_dc_asgd'] #TODO: split_method=HashName, it will cause a bug, this option can open after repair # if args.run_params['split_method']: # program_config.split_method = HashName # else: # program_config.split_method = RoundRobin program_config.wait_port = args.run_params['wait_port'] program_config.runtime_split_send_recv = args.run_params[ 'runtime_split_send_recv'] program_config.use_hierarchical_allreduce = args.run_params[ 'use_hierarchical_allreduce'] program_config.geo_sgd_need_push_nums = args.run_params[ 'push_nums']
def train(args): """run train""" # set random program = fluid.default_main_program() program.random_seed = args.random_seed # 根据环境变量确定当前机器/进程在分布式训练中扮演的角色 # 然后使用 fleet api的 init()方法初始化这个节点 role = role_maker.PaddleCloudRoleMaker() fleet.init(role) # 我们还可以进一步指定分布式的运行模式,通过 DistributeTranspilerConfig进行配置 # 如下,我们设置分布式运行模式为异步(async),同时将参数进行切分,以分配到不同的节点 if args.sync_mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif args.sync_mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() elif args.sync_mode == "async": strategy = StrategyFactory.create_async_strategy() # set model logger.info("TDM Begin build network.") tdm_model = TdmTrainNet(args) inputs = tdm_model.input_data() logger.info("TDM Begin load tree travel & layer.") avg_cost, acc = tdm_model.tdm(inputs) logger.info("TDM End build network.") # 配置分布式的optimizer,传入我们指定的strategy,构建program optimizer = fluid.optimizer.AdamOptimizer(learning_rate=args.learning_rate, lazy_mode=True) optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(avg_cost) logger.info("TDM End append backward.") # 根据节点角色,分别运行不同的逻辑 if fleet.is_server(): logger.info("TDM Run server ...") # 初始化及运行参数服务器节点 logger.info("TDM init model path: {}".format( args.init_model_files_path)) # 模型中除了tdm树结构相关的变量都应该在此处初始化 fleet.init_server(args.init_model_files_path) lr = fluid.global_scope().find_var("learning_rate_0") if lr: lr.get_tensor().set( np.array(args.learning_rate).astype('float32'), fluid.CPUPlace()) logger.info("TDM Set learning rate {}".format(args.learning_rate)) else: logger.info("TDM Didn't find learning_rate_0 param") logger.info("TDM load End") fleet.run_server() logger.info("TDM Run server success!") elif fleet.is_worker(): logger.info("TDM Run worker ...") # 初始化工作节点 fleet.init_worker() place = fluid.CPUPlace() exe = fluid.Executor(place) logger.info("TDM Run Startup Begin") # 初始化含有分布式流程的fleet.startup_program exe.run(fleet.startup_program) # Set Learning Rate lr = fluid.global_scope().find_var("learning_rate_0") if lr: lr.get_tensor().set( np.array(args.learning_rate).astype('float32'), place) logger.info("TDM Set learning rate {}".format(args.learning_rate)) # Set TDM Variable logger.info("TDM Begin load parameter.") # Set TDM_Tree_Info # 树结构相关的变量不参与网络更新,不存储于参数服务器,因此需要在本地手动Set tdm_param_prepare_dict = tdm_sampler_prepare(args) tdm_param_prepare_dict['info_array'] = tdm_child_prepare(args) Numpy_model = {} Numpy_model['TDM_Tree_Travel'] = tdm_param_prepare_dict['travel_array'] Numpy_model['TDM_Tree_Layer'] = tdm_param_prepare_dict['layer_array'] Numpy_model['TDM_Tree_Info'] = tdm_param_prepare_dict['info_array'] # Numpy_model['TDM_Tree_Emb'] = tdm_emb_prepare(args) # 分布式训练中,Emb存储与参数服务器,无需在本地set for param_name in Numpy_model: param_t = fluid.global_scope().find_var(param_name).get_tensor() param_t.set(Numpy_model[str(param_name)].astype('int32'), place) logger.info("TDM Run Startup End") # Train loop dataset, file_list, example_num = get_dataset(inputs, args) logger.info("TDM Distributed training begin ...") for epoch in range(args.epoch_num): # local shuffle random.shuffle(file_list) dataset.set_filelist(file_list) # 训练节点运行的是经过分布式裁剪的fleet.mian_program start_time = time.time() exe.train_from_dataset(program=fleet.main_program, dataset=dataset, fetch_list=[acc, avg_cost], fetch_info=[ "Epoch {} acc ".format(epoch), "Epoch {} loss ".format(epoch) ], print_period=1, debug=False) end_time = time.time() logger.info( "Epoch {} finished, use time {} second, speed {} example/s". format(epoch, end_time - start_time, example_num * 1.0 / (end_time - start_time))) # 默认使用0号节点保存模型 if fleet.is_first_worker(): model_path = os.path.join(args.model_files_path, "epoch_" + str(epoch)) fleet.save_persistables(executor=exe, dirname=model_path) logger.info("Begin upload files") # upload_files(model_path, warm_up=False) # 在分布式环境下时,支持上传模型到hdfs logger.info("TDM Before stop worker") fleet.stop_worker() logger.info("TDM Distributed training success!")