def build_strategy(self, args): self.strategy = None if args.mode == "async": self.strategy = StrategyFactory.create_async_strategy() elif args.mode == "sync": self.strategy = StrategyFactory.create_sync_strategy() elif args.mode == "half_async": self.strategy = StrategyFactory.create_half_async_strategy() elif args.mode == "geo": self.strategy = StrategyFactory.create_geo_strategy( args.geo_sgd_need_push_nums) self.dump_param = os.getenv("dump_param", "").split(",") self.dump_fields = os.getenv("dump_fields", "").split(",") self.dump_fields_path = os.getenv("dump_fields_path", "") debug = int(os.getenv("Debug", "0")) if debug: self.strategy.set_debug_opt({ "dump_param": self.dump_param, "dump_fields": self.dump_fields, "dump_fields_path": self.dump_fields_path }) return self.strategy
def test_half_async_strategy(self): strategy = StrategyFactory.create_half_async_strategy() self.assertEqual(strategy._program_config.sync_mode, False) self.assertEqual(strategy._program_config.runtime_split_send_recv, True) self.assertEqual(strategy._build_strategy.async_mode, True) # test set_server_runtime_config using ServerRuntimeConfig server_runtime_config_class = ServerRuntimeConfig() server_runtime_config_class._rpc_send_thread_num = 24 strategy.set_server_runtime_config(server_runtime_config_class) server_runtime_config = strategy.get_server_runtime_config() self.assertEqual(server_runtime_config._rpc_send_thread_num, 24) # test set_server_runtime_config using dict server_runtime_config_dict = dict() server_runtime_config_dict['_rpc_send_thread_num'] = 20 strategy.set_server_runtime_config(server_runtime_config_dict) server_runtime_config = strategy.get_server_runtime_config() self.assertEqual(server_runtime_config._rpc_send_thread_num, 20) # test set_server_runtime_config exception server_runtime_config_dict['unknown'] = None self.assertRaises(Exception, strategy.set_server_runtime_config, server_runtime_config_dict) server_runtime_config_illegal = None self.assertRaises(Exception, strategy.set_server_runtime_config, server_runtime_config_illegal)
def run_ut(self): strategy = StrategyFactory.create_half_async_strategy() training_role = os.getenv("TRAINING_ROLE", "TRAINER") role = role_maker.UserDefinedRoleMaker( current_id=0, role=role_maker.Role.WORKER if training_role == "TRAINER" else role_maker.Role.SERVER, worker_num=2, server_endpoints=["127.0.0.1:6002"]) if training_role == "TRAINER": self.run_trainer(role, strategy) else: self.run_pserver(role, strategy)
def build_strategy(self): mode = envs.get_runtime_environ("train.trainer.strategy") assert mode in ["async", "geo", "sync", "half_async"] strategy = None if mode == "async": strategy = StrategyFactory.create_async_strategy() elif mode == "geo": push_num = envs.get_global_env("train.strategy.mode.push_num", 100) strategy = StrategyFactory.create_geo_strategy(push_num) elif mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() assert strategy is not None self.strategy = strategy return strategy
def _build_strategy(self, context): from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory mode = envs.get_runtime_environ("train.trainer.strategy") assert mode in ["async", "geo", "sync", "half_async"] strategy = None if mode == "async": strategy = StrategyFactory.create_async_strategy() elif mode == "geo": push_num = envs.get_global_env("train.strategy.mode.push_num", 100) strategy = StrategyFactory.create_geo_strategy(push_num) elif mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() assert strategy is not None context["strategy"] = strategy return strategy
def test_half_async_strategy(self): strategy = StrategyFactory.create_half_async_strategy() self.assertEqual(strategy._program_config.sync_mode, False) self.assertEqual(strategy._program_config.runtime_split_send_recv, True) self.assertEqual(strategy._build_strategy.async_mode, True) # test set_server_runtime_config using ServerRuntimeConfig server_runtime_config_class = ServerRuntimeConfig() server_runtime_config_class._rpc_send_thread_num = 24 strategy.set_server_runtime_config(server_runtime_config_class) server_runtime_config = strategy.get_server_runtime_config() self.assertEqual(server_runtime_config._rpc_send_thread_num, 24) # test set_server_runtime_config using dict server_runtime_config_dict = dict() server_runtime_config_dict['_rpc_send_thread_num'] = 20 strategy.set_server_runtime_config(server_runtime_config_dict) server_runtime_config = strategy.get_server_runtime_config() self.assertEqual(server_runtime_config._rpc_send_thread_num, 20) # test set_server_runtime_config exception server_runtime_config_dict['unknown'] = None self.assertRaises(Exception, strategy.set_server_runtime_config, server_runtime_config_dict) server_runtime_config_illegal = None self.assertRaises(Exception, strategy.set_server_runtime_config, server_runtime_config_illegal) os.environ["CPU_NUM"] = '100' trainer_runtime_config = strategy.get_trainer_runtime_config() trainer_runtime_config.runtime_configs[ 'communicator_send_queue_size'] = '50' runtime_configs = trainer_runtime_config.get_communicator_flags() self.assertIn('communicator_send_queue_size', runtime_configs) self.assertNotIn('communicator_independent_recv_thread', runtime_configs) self.assertEqual(runtime_configs['communicator_send_queue_size'], '100')
def _set_strategy(self, args): """配置运行的distributed_strategy, build_strategy 配置在do_training中""" if int(os.getenv("PADDLE_COMPATIBILITY_CHECK", '0')): self.strategy = DistributeTranspilerConfig() if args.run_params["sync_mode"] == "sync": self.strategy.sync_mode = True self.strategy.runtime_split_send_recv = False self.async_mode = False elif args.run_params["sync_mode"] == "half_async": self.strategy.sync_mode = False self.async_mode = False elif args.run_params["sync_mode"] == "async": self.strategy.sync_mode = False self.async_mode = True elif args.run_params["sync_mode"] == "geo_async": self.strategy.sync_mode = False self.async_mode = True self.strategy.geo_sgd_mode = True self.strategy.geo_sgd_need_push_nums = 400 self.strategy.mode = "pserver" self.strategy.slice_var_up = args.run_params['slice_var_up'] self.strategy.enable_dc_asgd = args.run_params['enable_dc_asgd'] #TODO: split_method=HashName, it will cause a bug, this option can open after repair # if args.run_params['split_method']: # self.strategy.split_method = HashName # else: # self.strategy.split_method = RoundRobin self.strategy.wait_port = args.run_params['wait_port'] self.strategy.runtime_split_send_recv = args.run_params[ 'runtime_split_send_recv'] self.strategy.use_hierarchical_allreduce = args.run_params[ 'use_hierarchical_allreduce'] self.strategy.geo_sgd_need_push_nums = args.run_params['push_nums'] else: self.strategy = StrategyFactory.create_sync_strategy() # trainer_runtime_config = TrainerRuntimeConfig() # trainer_runtime_config.send_queue_size = "16" # trainer_runtime_config.thread_pool_size="32" # trainer_runtime_config.max_merge_var_num="16" # trainer_runtime_config.is_sgd_communicator="0" if args.run_params["sync_mode"] == "sync": self.strategy = StrategyFactory.create_sync_strategy() elif args.run_params["sync_mode"] == "half_async": self.strategy = StrategyFactory.create_half_async_strategy() elif args.run_params["sync_mode"] == "async": self.strategy = StrategyFactory.create_async_strategy() build_strategy = self.strategy.get_build_strategy() build_strategy.memory_optimize = False self.strategy.set_build_strategy(build_strategy) elif args.run_params["sync_mode"] == "geo_async": self.strategy = StrategyFactory.create_geo_strategy(400) program_config = self.strategy.get_program_config() program_config.slice_var_up = args.run_params['slice_var_up'] program_config.enable_dc_asgd = args.run_params['enable_dc_asgd'] #TODO: split_method=HashName, it will cause a bug, this option can open after repair # if args.run_params['split_method']: # program_config.split_method = HashName # else: # program_config.split_method = RoundRobin program_config.wait_port = args.run_params['wait_port'] program_config.runtime_split_send_recv = args.run_params[ 'runtime_split_send_recv'] program_config.use_hierarchical_allreduce = args.run_params[ 'use_hierarchical_allreduce'] program_config.geo_sgd_need_push_nums = args.run_params[ 'push_nums']
def train(args): """run train""" # set random program = fluid.default_main_program() program.random_seed = args.random_seed # 根据环境变量确定当前机器/进程在分布式训练中扮演的角色 # 然后使用 fleet api的 init()方法初始化这个节点 role = role_maker.PaddleCloudRoleMaker() fleet.init(role) # 我们还可以进一步指定分布式的运行模式,通过 DistributeTranspilerConfig进行配置 # 如下,我们设置分布式运行模式为异步(async),同时将参数进行切分,以分配到不同的节点 if args.sync_mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif args.sync_mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() elif args.sync_mode == "async": strategy = StrategyFactory.create_async_strategy() # set model logger.info("TDM Begin build network.") tdm_model = TdmTrainNet(args) inputs = tdm_model.input_data() logger.info("TDM Begin load tree travel & layer.") avg_cost, acc = tdm_model.tdm(inputs) logger.info("TDM End build network.") # 配置分布式的optimizer,传入我们指定的strategy,构建program optimizer = fluid.optimizer.AdamOptimizer(learning_rate=args.learning_rate, lazy_mode=True) optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(avg_cost) logger.info("TDM End append backward.") # 根据节点角色,分别运行不同的逻辑 if fleet.is_server(): logger.info("TDM Run server ...") # 初始化及运行参数服务器节点 logger.info("TDM init model path: {}".format( args.init_model_files_path)) # 模型中除了tdm树结构相关的变量都应该在此处初始化 fleet.init_server(args.init_model_files_path) lr = fluid.global_scope().find_var("learning_rate_0") if lr: lr.get_tensor().set( np.array(args.learning_rate).astype('float32'), fluid.CPUPlace()) logger.info("TDM Set learning rate {}".format(args.learning_rate)) else: logger.info("TDM Didn't find learning_rate_0 param") logger.info("TDM load End") fleet.run_server() logger.info("TDM Run server success!") elif fleet.is_worker(): logger.info("TDM Run worker ...") # 初始化工作节点 fleet.init_worker() place = fluid.CPUPlace() exe = fluid.Executor(place) logger.info("TDM Run Startup Begin") # 初始化含有分布式流程的fleet.startup_program exe.run(fleet.startup_program) # Set Learning Rate lr = fluid.global_scope().find_var("learning_rate_0") if lr: lr.get_tensor().set( np.array(args.learning_rate).astype('float32'), place) logger.info("TDM Set learning rate {}".format(args.learning_rate)) # Set TDM Variable logger.info("TDM Begin load parameter.") # Set TDM_Tree_Info # 树结构相关的变量不参与网络更新,不存储于参数服务器,因此需要在本地手动Set tdm_param_prepare_dict = tdm_sampler_prepare(args) tdm_param_prepare_dict['info_array'] = tdm_child_prepare(args) Numpy_model = {} Numpy_model['TDM_Tree_Travel'] = tdm_param_prepare_dict['travel_array'] Numpy_model['TDM_Tree_Layer'] = tdm_param_prepare_dict['layer_array'] Numpy_model['TDM_Tree_Info'] = tdm_param_prepare_dict['info_array'] # Numpy_model['TDM_Tree_Emb'] = tdm_emb_prepare(args) # 分布式训练中,Emb存储与参数服务器,无需在本地set for param_name in Numpy_model: param_t = fluid.global_scope().find_var(param_name).get_tensor() param_t.set(Numpy_model[str(param_name)].astype('int32'), place) logger.info("TDM Run Startup End") # Train loop dataset, file_list, example_num = get_dataset(inputs, args) logger.info("TDM Distributed training begin ...") for epoch in range(args.epoch_num): # local shuffle random.shuffle(file_list) dataset.set_filelist(file_list) # 训练节点运行的是经过分布式裁剪的fleet.mian_program start_time = time.time() exe.train_from_dataset(program=fleet.main_program, dataset=dataset, fetch_list=[acc, avg_cost], fetch_info=[ "Epoch {} acc ".format(epoch), "Epoch {} loss ".format(epoch) ], print_period=1, debug=False) end_time = time.time() logger.info( "Epoch {} finished, use time {} second, speed {} example/s". format(epoch, end_time - start_time, example_num * 1.0 / (end_time - start_time))) # 默认使用0号节点保存模型 if fleet.is_first_worker(): model_path = os.path.join(args.model_files_path, "epoch_" + str(epoch)) fleet.save_persistables(executor=exe, dirname=model_path) logger.info("Begin upload files") # upload_files(model_path, warm_up=False) # 在分布式环境下时,支持上传模型到hdfs logger.info("TDM Before stop worker") fleet.stop_worker() logger.info("TDM Distributed training success!")
def train(args): datas, avg_cost, predict, train_file_path = model() endpoints = args.endpoints.split(",") if args.role.upper() == "PSERVER": current_id = endpoints.index(args.current_endpoint) else: current_id = 0 role = role_maker.UserDefinedRoleMaker( current_id=current_id, role=role_maker.Role.WORKER if args.role.upper() == "TRAINER" else role_maker.Role.SERVER, worker_num=args.trainers, server_endpoints=endpoints) exe = fluid.Executor(fluid.CPUPlace()) fleet.init(role) strategy = StrategyFactory.create_half_async_strategy() optimizer = fluid.optimizer.SGD(learning_rate=0.0001) optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(avg_cost) if fleet.is_server(): logger.info("run pserver") fleet.init_server() fleet.run_server() elif fleet.is_worker(): logger.info("run trainer") fleet.init_worker() exe.run(fleet.startup_program) thread_num = 2 filelist = [] for _ in range(thread_num): filelist.append(train_file_path) # config dataset dataset = fluid.DatasetFactory().create_dataset() dataset.set_batch_size(128) dataset.set_use_var(datas) pipe_command = 'python ctr_dataset_reader.py' dataset.set_pipe_command(pipe_command) dataset.set_filelist(filelist) dataset.set_thread(thread_num) for epoch_id in range(10): logger.info("epoch {} start".format(epoch_id)) pass_start = time.time() dataset.set_filelist(filelist) exe.train_from_dataset( program=fleet.main_program, dataset=dataset, fetch_list=[avg_cost], fetch_info=["cost"], print_period=100, debug=False) pass_time = time.time() - pass_start logger.info("epoch {} finished, pass_time {}".format(epoch_id, pass_time)) fleet.stop_worker()