예제 #1
0
    def build_strategy(self, args):
        self.strategy = None
        if args.mode == "async":
            self.strategy = StrategyFactory.create_async_strategy()
        elif args.mode == "sync":
            self.strategy = StrategyFactory.create_sync_strategy()
        elif args.mode == "half_async":
            self.strategy = StrategyFactory.create_half_async_strategy()
        elif args.mode == "geo":
            self.strategy = StrategyFactory.create_geo_strategy(
                args.geo_sgd_need_push_nums)
        self.dump_param = os.getenv("dump_param", "").split(",")
        self.dump_fields = os.getenv("dump_fields", "").split(",")
        self.dump_fields_path = os.getenv("dump_fields_path", "")
        debug = int(os.getenv("Debug", "0"))
        if debug:
            self.strategy.set_debug_opt({
                "dump_param":
                self.dump_param,
                "dump_fields":
                self.dump_fields,
                "dump_fields_path":
                self.dump_fields_path
            })

        return self.strategy
예제 #2
0
    def distributed_optimizer(self, optimizer, strategy=None):
        """
        Optimizer for distributed training.

        For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
        Which has basic Optimizer function and special features for distributed training.

        Args:
            optimizer(Optimizer): The executor to run for init server.
            strategy(DistributeTranspilerConfig): Extra properties for distributed optimizer.

        Returns:
            TranspilerOptimizer: subclass of DistributedOptimizer.
        """

        if not isinstance(optimizer, Optimizer):
            raise ValueError("optimizer must be an instance of Optimizer")
        if not self._is_initialized:
            raise ValueError(
                "fleet.init(role) to initialize before optimizer.minimize(loss)"
            )

        if not strategy:
            _strategy = StrategyFactory.create_async_strategy()

        if isinstance(strategy, DistributedStrategy):
            _strategy = strategy
        elif isinstance(strategy, DistributeTranspilerConfig):
            if strategy.sync_mode:
                _strategy = SyncStrategy()
            else:
                if strategy.runtime_split_send_recv:
                    if strategy.geo_sgd_mode:
                        _strategy = GeoStrategy(
                            strategy.geo_sgd_need_push_nums)
                    elif strategy.half_async:
                        _strategy = HalfAsyncStrategy()
                    else:
                        _strategy = AsyncStrategy()
                else:
                    _strategy = HalfAsyncStrategy()
                    # for half_async compatibility
                    strategy.half_async = True
                    strategy.runtime_split_send_recv = True
            _strategy.set_program_config(strategy)
        elif isinstance(strategy, dict):
            if self._inner_mode != PSMode.PSLIB:
                raise TypeError("Dict strategy can only be used at PSLIB Mode")

            _strategy = StrategyFactory.create_async_strategy()
            _strategy.set_pslib_runtime_config(strategy)
        else:
            raise TypeError(
                "strategy must be an instance of DistributeTranspilerConfig, DistributedStrategy"
            )

        self._strategy = _strategy
        self._optimizer = ParameterServerOptimizer(optimizer, _strategy)
        return self._optimizer
예제 #3
0
    def test_geo_strategy(self):
        strategy = StrategyFactory.create_geo_strategy(5)
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._program_config.geo_sgd_mode, True)
        self.assertEqual(strategy._program_config.geo_sgd_need_push_nums, 5)
        self.assertEqual(strategy._build_strategy.async_mode, True)

        # test set_build_strategy using fluid.BuildStrategy
        build_strategy_class = fluid.BuildStrategy()
        build_strategy_class.memory_optimize = False
        strategy.set_build_strategy(build_strategy_class)
        build_strategy = strategy.get_build_strategy()
        self.assertEqual(build_strategy.memory_optimize, False)

        # test set_build_strategy using dict
        build_strategy_dict = dict()
        build_strategy_dict['memory_optimize'] = True
        strategy.set_build_strategy(build_strategy_dict)
        build_strategy = strategy.get_build_strategy()
        self.assertEqual(build_strategy.memory_optimize, True)

        # test set_build_strategy exception
        build_strategy_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_build_strategy,
                          build_strategy_dict)
        build_strategy_illegal = None
        self.assertRaises(Exception, strategy.set_build_strategy,
                          build_strategy_illegal)
예제 #4
0
    def test_sync_strategy(self):
        os.environ['CPU_NUM'] = "2"
        strategy = StrategyFactory.create_sync_strategy()
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._build_strategy.async_mode, True)
        self.assertEqual(strategy._execute_strategy.num_threads, 2)

        # test set_program_config using DistributeTranspilerConfig()
        program_config_class = DistributeTranspilerConfig()
        program_config_class.min_block_size = 81920
        strategy.set_program_config(program_config_class)
        program_config = strategy.get_program_config()
        self.assertEqual(program_config.min_block_size, 81920)

        # test set_program_config using dict
        program_config_dict = dict()
        program_config_dict['min_block_size'] = 8192
        strategy.set_program_config(program_config_dict)
        program_config = strategy.get_program_config()
        self.assertEqual(program_config.min_block_size, 8192)

        # test set_program_config exception
        program_config_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_program_config,
                          program_config_dict)
        program_config_illegal = None
        self.assertRaises(Exception, strategy.set_program_config,
                          program_config_illegal)
예제 #5
0
    def test_half_async_strategy(self):
        strategy = StrategyFactory.create_half_async_strategy()
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._build_strategy.async_mode, True)

        # test set_server_runtime_config using ServerRuntimeConfig
        server_runtime_config_class = ServerRuntimeConfig()
        server_runtime_config_class._rpc_send_thread_num = 24
        strategy.set_server_runtime_config(server_runtime_config_class)
        server_runtime_config = strategy.get_server_runtime_config()
        self.assertEqual(server_runtime_config._rpc_send_thread_num, 24)

        # test set_server_runtime_config using dict
        server_runtime_config_dict = dict()
        server_runtime_config_dict['_rpc_send_thread_num'] = 20
        strategy.set_server_runtime_config(server_runtime_config_dict)
        server_runtime_config = strategy.get_server_runtime_config()
        self.assertEqual(server_runtime_config._rpc_send_thread_num, 20)

        # test set_server_runtime_config exception
        server_runtime_config_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_server_runtime_config,
                          server_runtime_config_dict)
        server_runtime_config_illegal = None
        self.assertRaises(Exception, strategy.set_server_runtime_config,
                          server_runtime_config_illegal)
예제 #6
0
    def test(self):
        endpoints = [
            "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006",
            "127.0.0.1:36007"
        ]

        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.SERVER,
            worker_num=2,
            server_endpoints=endpoints)

        fleet.init(role)
        loss, acc, _ = self.net()

        optimizer = fluid.optimizer.Adagrad(
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=base_lr,
                decay_steps=500,
                decay_rate=0.969,
                staircase=True))

        strategy = StrategyFactory.create_async_strategy()
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(loss)
예제 #7
0
    def _get_distributed_strategy(self):
        from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory

        k_steps = self.user_defined_strategy.a_sync_configs["k_steps"]
        strategy = None

        if not self.user_defined_strategy.a_sync and k_steps == 0:
            strategy = StrategyFactory.create_sync_strategy()

        if self.user_defined_strategy.a_sync and k_steps == 0:
            strategy = StrategyFactory.create_async_strategy()

        if self.user_defined_strategy.a_sync and k_steps > 0:
            strategy = StrategyFactory.create_geo_strategy(k_steps)

        if not strategy:
            raise ValueError("k_steps must be invalid value, please check")

        return strategy
예제 #8
0
    def test_async_strategy(self):
        os.environ["CPU_NUM"] = '100'

        strategy = StrategyFactory.create_async_strategy()
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._build_strategy.async_mode, True)

        trainer_runtime_config = strategy.get_trainer_runtime_config()
        self.assertEqual(
            trainer_runtime_config.
            runtime_configs['communicator_send_queue_size'], '100')

        # test set_trainer_runtime_config using dict
        trainer_runtime_config_dict = dict()
        trainer_runtime_config_dict['communicator_send_queue_size'] = '20'
        strategy.set_trainer_runtime_config(trainer_runtime_config_dict)
        trainer_runtime_config = strategy.get_trainer_runtime_config()
        trainer_communicator_flags = trainer_runtime_config.get_communicator_flags(
        )
        self.assertIn('communicator_send_queue_size',
                      trainer_communicator_flags)
        self.assertEqual(
            trainer_communicator_flags['communicator_send_queue_size'], '20')

        # test set_trainer_runtime_config exception
        trainer_runtime_config_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_trainer_runtime_config,
                          trainer_runtime_config_dict)
        trainer_runtime_config_illegal = None
        self.assertRaises(Exception, strategy.set_trainer_runtime_config,
                          trainer_runtime_config_illegal)

        # test set_execute_strategy using fluid.ExecutionStrategy
        exec_strategy_class = fluid.ExecutionStrategy()
        exec_strategy_class.num_threads = 4
        strategy.set_execute_strategy(exec_strategy_class)
        exec_strategy = strategy.get_execute_strategy()
        self.assertEqual(exec_strategy.num_threads, 4)

        # test set_execute_strategy using dict
        exec_strategy_dict = dict()
        exec_strategy_dict['num_threads'] = 8
        strategy.set_execute_strategy(exec_strategy_dict)
        exec_strategy = strategy.get_execute_strategy()
        self.assertEqual(exec_strategy.num_threads, 8)

        # test set_execute_strategy exception
        exec_strategy_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_execute_strategy,
                          exec_strategy_dict)
        exec_strategy_illegal = None
        self.assertRaises(Exception, strategy.set_execute_strategy,
                          exec_strategy_illegal)
예제 #9
0
    def build_strategy(self):
        mode = envs.get_runtime_environ("train.trainer.strategy")
        assert mode in ["async", "geo", "sync", "half_async"]

        strategy = None

        if mode == "async":
            strategy = StrategyFactory.create_async_strategy()
        elif mode == "geo":
            push_num = envs.get_global_env("train.strategy.mode.push_num", 100)
            strategy = StrategyFactory.create_geo_strategy(push_num)
        elif mode == "sync":
            strategy = StrategyFactory.create_sync_strategy()
        elif mode == "half_async":
            strategy = StrategyFactory.create_half_async_strategy()

        assert strategy is not None

        self.strategy = strategy
        return strategy
예제 #10
0
    def _build_strategy(self, context):
        from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
        mode = envs.get_runtime_environ("train.trainer.strategy")
        assert mode in ["async", "geo", "sync", "half_async"]

        strategy = None

        if mode == "async":
            strategy = StrategyFactory.create_async_strategy()
        elif mode == "geo":
            push_num = envs.get_global_env("train.strategy.mode.push_num", 100)
            strategy = StrategyFactory.create_geo_strategy(push_num)
        elif mode == "sync":
            strategy = StrategyFactory.create_sync_strategy()
        elif mode == "half_async":
            strategy = StrategyFactory.create_half_async_strategy()

        assert strategy is not None

        context["strategy"] = strategy
        return strategy
예제 #11
0
    def run_ut(self):
        strategy = StrategyFactory.create_half_async_strategy()

        training_role = os.getenv("TRAINING_ROLE", "TRAINER")

        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.WORKER
            if training_role == "TRAINER" else role_maker.Role.SERVER,
            worker_num=2,
            server_endpoints=["127.0.0.1:6002"])

        if training_role == "TRAINER":
            self.run_trainer(role, strategy)
        else:
            self.run_pserver(role, strategy)
예제 #12
0
    def test(self):
        endpoints = [
            "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006",
            "127.0.0.1:36007"
        ]

        role = role_maker.UserDefinedRoleMaker(current_id=0,
                                               role=role_maker.Role.SERVER,
                                               worker_num=2,
                                               server_endpoints=endpoints)

        fleet.init(role)
        loss, acc, _ = self.net()
        optimizer = fluid.optimizer.SGD(base_lr)
        strategy = StrategyFactory.create_geo_strategy(20)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(loss)
    def test_dist_geo_server_transpiler(self):
        num_voc = 128
        embed_dim = 64
        x_shape, x_lod = [16, 10], [[3, 5, 2, 6]]
        x = fluid.data(name='x', shape=x_shape, dtype='int32', lod_level=1)
        hash_embd = fluid.contrib.layers.search_pyramid_hash(
            input=x,
            num_emb=embed_dim,
            space_len=num_voc * embed_dim,
            pyramid_layer=4,
            rand_len=16,
            drop_out_percent=0.5,
            is_training=True,
            use_filter=False,
            white_list_len=6400,
            black_list_len=2800,
            seed=3,
            lr=0.002,
            param_attr=fluid.ParamAttr(
                name="PyramidHash_emb_0",
                learning_rate=0, ),
            param_attr_wl=fluid.ParamAttr(
                name="Filter",
                learning_rate=0, ),
            param_attr_bl=None,
            distribute_update_vars=["PyramidHash_emb_0"],
            name=None)

        cost = fluid.layers.reduce_sum(hash_embd)

        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.SERVER,
            worker_num=2,
            server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"])

        fleet.init(role)

        strategy = StrategyFactory.create_geo_strategy(5)
        optimizer = fluid.optimizer.SGD(0.1)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(cost)

        pserver_startup_program = fleet.startup_program
        pserver_mian_program = fleet.main_program
예제 #14
0
    def test_communicator_async(self):
        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.WORKER,
            worker_num=2,
            server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"])

        fleet.init(role)
        avg_cost = self.net()

        optimizer = fluid.optimizer.SGD(0.01)
        strategy = StrategyFactory.create_async_strategy()
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(avg_cost)

        fleet.init_worker()
        time.sleep(10)
        fleet.stop_worker()
예제 #15
0
    def test_half_async_strategy(self):
        strategy = StrategyFactory.create_half_async_strategy()
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._build_strategy.async_mode, True)

        # test set_server_runtime_config using ServerRuntimeConfig
        server_runtime_config_class = ServerRuntimeConfig()
        server_runtime_config_class._rpc_send_thread_num = 24
        strategy.set_server_runtime_config(server_runtime_config_class)
        server_runtime_config = strategy.get_server_runtime_config()
        self.assertEqual(server_runtime_config._rpc_send_thread_num, 24)

        # test set_server_runtime_config using dict
        server_runtime_config_dict = dict()
        server_runtime_config_dict['_rpc_send_thread_num'] = 20
        strategy.set_server_runtime_config(server_runtime_config_dict)
        server_runtime_config = strategy.get_server_runtime_config()
        self.assertEqual(server_runtime_config._rpc_send_thread_num, 20)

        # test set_server_runtime_config exception
        server_runtime_config_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_server_runtime_config,
                          server_runtime_config_dict)
        server_runtime_config_illegal = None
        self.assertRaises(Exception, strategy.set_server_runtime_config,
                          server_runtime_config_illegal)

        os.environ["CPU_NUM"] = '100'
        trainer_runtime_config = strategy.get_trainer_runtime_config()
        trainer_runtime_config.runtime_configs[
            'communicator_send_queue_size'] = '50'
        runtime_configs = trainer_runtime_config.get_communicator_flags()
        self.assertIn('communicator_send_queue_size', runtime_configs)
        self.assertNotIn('communicator_independent_recv_thread',
                         runtime_configs)
        self.assertEqual(runtime_configs['communicator_send_queue_size'],
                         '100')
예제 #16
0
    def test_debug_info(self):
        x = fluid.layers.data(name='x', shape=[1], dtype='float32')
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        y_predict = fluid.layers.fc(input=x, size=1, act=None)
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
        avg_cost = fluid.layers.mean(cost)

        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.WORKER,
            worker_num=2,
            server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"])
        fleet.init(role)

        optimizer = fluid.optimizer.SGD(0.0001)
        strategy = StrategyFactory.create_sync_strategy()
        strategy.set_debug_opt({
            "dump_param": ["fc_0.tmp_0"],
            "dump_fields": ["fc_0.tmp_0", "fc_0.tmp_0@GRAD"],
            "dump_fields_path": "dump_text/"
        })
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
예제 #17
0
    def test_sync_strategy(self):
        os.environ['CPU_NUM'] = "2"
        strategy = StrategyFactory.create_sync_strategy()
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._build_strategy.async_mode, True)
        self.assertEqual(strategy._execute_strategy.num_threads, 2)

        # test set_program_config using DistributeTranspilerConfig()
        program_config_class = DistributeTranspilerConfig()
        program_config_class.min_block_size = 81920
        strategy.set_program_config(program_config_class)
        program_config = strategy.get_program_config()
        self.assertEqual(program_config.min_block_size, 81920)

        # test set_program_config using dict
        program_config_dict = dict()
        program_config_dict['min_block_size'] = 8192
        strategy.set_program_config(program_config_dict)
        program_config = strategy.get_program_config()
        self.assertEqual(program_config.min_block_size, 8192)

        # test set_program_config exception
        program_config_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_program_config,
                          program_config_dict)
        program_config_illegal = None
        self.assertRaises(Exception, strategy.set_program_config,
                          program_config_illegal)

        trainer_runtime_config = strategy.get_trainer_runtime_config()
        trainer_runtime_config.runtime_configs[
            'communicator_send_queue_size'] = '50'
        runtime_configs = trainer_runtime_config.get_communicator_flags()
        self.assertIn('communicator_send_queue_size', runtime_configs)
        self.assertNotIn('communicator_independent_recv_thread',
                         runtime_configs)
        self.assertEqual(runtime_configs['communicator_send_queue_size'], '2')
예제 #18
0
    def test_geo_strategy(self):
        strategy = StrategyFactory.create_geo_strategy(5)
        self.assertEqual(strategy._program_config.sync_mode, False)
        self.assertEqual(strategy._program_config.runtime_split_send_recv,
                         True)
        self.assertEqual(strategy._program_config.geo_sgd_mode, True)
        self.assertEqual(strategy._program_config.geo_sgd_need_push_nums, 5)
        self.assertEqual(strategy._build_strategy.async_mode, True)

        # test set_build_strategy using fluid.BuildStrategy
        build_strategy_class = fluid.BuildStrategy()
        build_strategy_class.memory_optimize = False
        strategy.set_build_strategy(build_strategy_class)
        build_strategy = strategy.get_build_strategy()
        self.assertEqual(build_strategy.memory_optimize, False)

        # test set_build_strategy using dict
        build_strategy_dict = dict()
        build_strategy_dict['memory_optimize'] = True
        strategy.set_build_strategy(build_strategy_dict)
        build_strategy = strategy.get_build_strategy()
        self.assertEqual(build_strategy.memory_optimize, True)

        # test set_build_strategy exception
        build_strategy_dict['unknown'] = None
        self.assertRaises(Exception, strategy.set_build_strategy,
                          build_strategy_dict)
        build_strategy_illegal = None
        self.assertRaises(Exception, strategy.set_build_strategy,
                          build_strategy_illegal)

        os.environ["CPU_NUM"] = '100'
        trainer_runtime_config = strategy.get_trainer_runtime_config()
        runtime_configs = trainer_runtime_config.get_communicator_flags()
        self.assertIn('communicator_thread_pool_size', runtime_configs)
        self.assertIn('communicator_send_wait_times', runtime_configs)
        self.assertNotIn('communicator_independent_recv_thread',
                         runtime_configs)
예제 #19
0
    def __init__(self, optimizer, strategy=None):
        super(TranspilerOptimizer, self).__init__(optimizer, strategy)

        self.opt_info = dict()
        if strategy:
            if isinstance(strategy, DistributeTranspilerConfig):
                self._strategy = strategy
            elif isinstance(strategy, DistributedStrategy):
                self._strategy = strategy
            else:
                raise TypeError(
                    "In {} mode, strategy must be an instance of DistributeTranspilerConfig, SyncStrategy, HalfAsyncStrategy, AsyncStrategy, or GeoStrategy".
                    format(fleet._mode))
        else:
            self._strategy = StrategyFactory.create_sync_strategy()

        if isinstance(self._strategy, DistributedStrategy):
            self.opt_info = self._strategy.get_debug_opt()
            self.opt_info["mpi_rank"] = fleet.worker_index()
            self.opt_info["mpi_size"] = fleet.worker_num()
            self.opt_info["trainer"] = "MultiTrainer"
            self.opt_info["device_worker"] = "Hogwild"
            fleet._set_opt_info(self.opt_info)
예제 #20
0
    def test_pserver(self):
        role = role_maker.UserDefinedRoleMaker(
            current_id=0,
            role=role_maker.Role.SERVER,
            worker_num=2,
            server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"])

        fleet.init(role)

        batch_size = 128
        is_sparse = True
        is_distribute = False

        strategy = StrategyFactory.create_geo_strategy(5)

        avg_cost, _, _, _ = train_network(batch_size, is_distribute, is_sparse)

        optimizer = fluid.optimizer.SGD(0.1)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(avg_cost)

        pserver_startup_program = fleet.startup_program
        pserver_mian_program = fleet.main_program
예제 #21
0
def train(args):
    """run train"""
    # set random
    program = fluid.default_main_program()
    program.random_seed = args.random_seed

    # 根据环境变量确定当前机器/进程在分布式训练中扮演的角色
    # 然后使用 fleet api的 init()方法初始化这个节点
    role = role_maker.PaddleCloudRoleMaker()
    fleet.init(role)

    # 我们还可以进一步指定分布式的运行模式,通过 DistributeTranspilerConfig进行配置
    # 如下,我们设置分布式运行模式为异步(async),同时将参数进行切分,以分配到不同的节点
    if args.sync_mode == "sync":
        strategy = StrategyFactory.create_sync_strategy()
    elif args.sync_mode == "half_async":
        strategy = StrategyFactory.create_half_async_strategy()
    elif args.sync_mode == "async":
        strategy = StrategyFactory.create_async_strategy()

    # set model
    logger.info("TDM Begin build network.")
    tdm_model = TdmTrainNet(args)
    inputs = tdm_model.input_data()

    logger.info("TDM Begin load tree travel & layer.")
    avg_cost, acc = tdm_model.tdm(inputs)
    logger.info("TDM End build network.")
    # 配置分布式的optimizer,传入我们指定的strategy,构建program
    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=args.learning_rate,
                                              lazy_mode=True)

    optimizer = fleet.distributed_optimizer(optimizer, strategy)
    optimizer.minimize(avg_cost)
    logger.info("TDM End append backward.")

    # 根据节点角色,分别运行不同的逻辑
    if fleet.is_server():
        logger.info("TDM Run server ...")
        # 初始化及运行参数服务器节点
        logger.info("TDM init model path: {}".format(
            args.init_model_files_path))
        # 模型中除了tdm树结构相关的变量都应该在此处初始化
        fleet.init_server(args.init_model_files_path)
        lr = fluid.global_scope().find_var("learning_rate_0")
        if lr:
            lr.get_tensor().set(
                np.array(args.learning_rate).astype('float32'),
                fluid.CPUPlace())
            logger.info("TDM Set learning rate {}".format(args.learning_rate))
        else:
            logger.info("TDM Didn't find learning_rate_0 param")
        logger.info("TDM load End")

        fleet.run_server()
        logger.info("TDM Run server success!")
    elif fleet.is_worker():
        logger.info("TDM Run worker ...")
        # 初始化工作节点
        fleet.init_worker()
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        logger.info("TDM Run Startup Begin")
        # 初始化含有分布式流程的fleet.startup_program
        exe.run(fleet.startup_program)

        # Set Learning Rate
        lr = fluid.global_scope().find_var("learning_rate_0")
        if lr:
            lr.get_tensor().set(
                np.array(args.learning_rate).astype('float32'), place)
            logger.info("TDM Set learning rate {}".format(args.learning_rate))

        # Set TDM Variable
        logger.info("TDM Begin load parameter.")
        # Set TDM_Tree_Info
        # 树结构相关的变量不参与网络更新,不存储于参数服务器,因此需要在本地手动Set
        tdm_param_prepare_dict = tdm_sampler_prepare(args)
        tdm_param_prepare_dict['info_array'] = tdm_child_prepare(args)
        Numpy_model = {}
        Numpy_model['TDM_Tree_Travel'] = tdm_param_prepare_dict['travel_array']
        Numpy_model['TDM_Tree_Layer'] = tdm_param_prepare_dict['layer_array']
        Numpy_model['TDM_Tree_Info'] = tdm_param_prepare_dict['info_array']
        # Numpy_model['TDM_Tree_Emb'] = tdm_emb_prepare(args)
        # 分布式训练中,Emb存储与参数服务器,无需在本地set
        for param_name in Numpy_model:
            param_t = fluid.global_scope().find_var(param_name).get_tensor()
            param_t.set(Numpy_model[str(param_name)].astype('int32'), place)

        logger.info("TDM Run Startup End")

        # Train loop
        dataset, file_list, example_num = get_dataset(inputs, args)
        logger.info("TDM Distributed training begin ...")
        for epoch in range(args.epoch_num):
            # local shuffle
            random.shuffle(file_list)
            dataset.set_filelist(file_list)

            # 训练节点运行的是经过分布式裁剪的fleet.mian_program
            start_time = time.time()
            exe.train_from_dataset(program=fleet.main_program,
                                   dataset=dataset,
                                   fetch_list=[acc, avg_cost],
                                   fetch_info=[
                                       "Epoch {} acc ".format(epoch),
                                       "Epoch {} loss ".format(epoch)
                                   ],
                                   print_period=1,
                                   debug=False)
            end_time = time.time()
            logger.info(
                "Epoch {} finished, use time {} second, speed {} example/s".
                format(epoch, end_time - start_time,
                       example_num * 1.0 / (end_time - start_time)))

            # 默认使用0号节点保存模型
            if fleet.is_first_worker():
                model_path = os.path.join(args.model_files_path,
                                          "epoch_" + str(epoch))
                fleet.save_persistables(executor=exe, dirname=model_path)
                logger.info("Begin upload files")
                # upload_files(model_path, warm_up=False)
                # 在分布式环境下时,支持上传模型到hdfs
        logger.info("TDM Before stop worker")
        fleet.stop_worker()
        logger.info("TDM Distributed training success!")
예제 #22
0
 def _set_strategy(self, args):
     """配置运行的distributed_strategy, 
        build_strategy 配置在do_training中"""
     if int(os.getenv("PADDLE_COMPATIBILITY_CHECK", '0')):
         self.strategy = DistributeTranspilerConfig()
         if args.run_params["sync_mode"] == "sync":
             self.strategy.sync_mode = True
             self.strategy.runtime_split_send_recv = False
             self.async_mode = False
         elif args.run_params["sync_mode"] == "half_async":
             self.strategy.sync_mode = False
             self.async_mode = False
         elif args.run_params["sync_mode"] == "async":
             self.strategy.sync_mode = False
             self.async_mode = True
         elif args.run_params["sync_mode"] == "geo_async":
             self.strategy.sync_mode = False
             self.async_mode = True
             self.strategy.geo_sgd_mode = True
             self.strategy.geo_sgd_need_push_nums = 400
         self.strategy.mode = "pserver"
         self.strategy.slice_var_up = args.run_params['slice_var_up']
         self.strategy.enable_dc_asgd = args.run_params['enable_dc_asgd']
         #TODO: split_method=HashName, it will cause a bug, this option can open after repair
         # if args.run_params['split_method']:
         #    self.strategy.split_method = HashName
         # else:
         #    self.strategy.split_method = RoundRobin
         self.strategy.wait_port = args.run_params['wait_port']
         self.strategy.runtime_split_send_recv = args.run_params[
             'runtime_split_send_recv']
         self.strategy.use_hierarchical_allreduce = args.run_params[
             'use_hierarchical_allreduce']
         self.strategy.geo_sgd_need_push_nums = args.run_params['push_nums']
     else:
         self.strategy = StrategyFactory.create_sync_strategy()
         # trainer_runtime_config = TrainerRuntimeConfig()
         # trainer_runtime_config.send_queue_size = "16"
         # trainer_runtime_config.thread_pool_size="32"
         # trainer_runtime_config.max_merge_var_num="16"
         # trainer_runtime_config.is_sgd_communicator="0"
         if args.run_params["sync_mode"] == "sync":
             self.strategy = StrategyFactory.create_sync_strategy()
         elif args.run_params["sync_mode"] == "half_async":
             self.strategy = StrategyFactory.create_half_async_strategy()
         elif args.run_params["sync_mode"] == "async":
             self.strategy = StrategyFactory.create_async_strategy()
             build_strategy = self.strategy.get_build_strategy()
             build_strategy.memory_optimize = False
             self.strategy.set_build_strategy(build_strategy)
         elif args.run_params["sync_mode"] == "geo_async":
             self.strategy = StrategyFactory.create_geo_strategy(400)
         program_config = self.strategy.get_program_config()
         program_config.slice_var_up = args.run_params['slice_var_up']
         program_config.enable_dc_asgd = args.run_params['enable_dc_asgd']
         #TODO: split_method=HashName, it will cause a bug, this option can open after repair
         # if args.run_params['split_method']:
         #    program_config.split_method = HashName
         # else:
         #    program_config.split_method = RoundRobin
         program_config.wait_port = args.run_params['wait_port']
         program_config.runtime_split_send_recv = args.run_params[
             'runtime_split_send_recv']
         program_config.use_hierarchical_allreduce = args.run_params[
             'use_hierarchical_allreduce']
         program_config.geo_sgd_need_push_nums = args.run_params[
             'push_nums']
예제 #23
0
def train(args):
    datas, avg_cost, predict, train_file_path = model()

    endpoints = args.endpoints.split(",")
    if args.role.upper() == "PSERVER":
        current_id = endpoints.index(args.current_endpoint)
    else:
        current_id = 0
    role = role_maker.UserDefinedRoleMaker(
        current_id=current_id,
        role=role_maker.Role.WORKER
        if args.role.upper() == "TRAINER" else role_maker.Role.SERVER,
        worker_num=args.trainers,
        server_endpoints=endpoints)

    exe = fluid.Executor(fluid.CPUPlace())
    fleet.init(role)

    strategy = StrategyFactory.create_half_async_strategy()

    optimizer = fluid.optimizer.SGD(learning_rate=0.0001)
    optimizer = fleet.distributed_optimizer(optimizer, strategy)
    optimizer.minimize(avg_cost)

    if fleet.is_server():
        logger.info("run pserver")

        fleet.init_server()
        fleet.run_server()
    elif fleet.is_worker():
        logger.info("run trainer")

        fleet.init_worker()
        exe.run(fleet.startup_program)

        thread_num = 2
        filelist = []
        for _ in range(thread_num):
            filelist.append(train_file_path)

        # config dataset
        dataset = fluid.DatasetFactory().create_dataset()
        dataset.set_batch_size(128)
        dataset.set_use_var(datas)
        pipe_command = 'python ctr_dataset_reader.py'
        dataset.set_pipe_command(pipe_command)

        dataset.set_filelist(filelist)
        dataset.set_thread(thread_num)

        for epoch_id in range(10):
            logger.info("epoch {} start".format(epoch_id))
            pass_start = time.time()
            dataset.set_filelist(filelist)
            exe.train_from_dataset(
                program=fleet.main_program,
                dataset=dataset,
                fetch_list=[avg_cost],
                fetch_info=["cost"],
                print_period=100,
                debug=False)
            pass_time = time.time() - pass_start
            logger.info("epoch {} finished, pass_time {}".format(epoch_id,
                                                                 pass_time))
        fleet.stop_worker()