def test_restart_ps(self): model_def = "mnist.mnist_functional_api.custom_model" num_data = 8 training_data = [ get_random_batch(self._batch_size) for _ in range(num_data) ] workers = [] self._create_pserver(model_def, 2) for w in range(2): self._reset_pserver() arguments = [ "--worker_id", 0, "--job_type", elasticdl_pb2.TRAINING, "--minibatch_size", self._batch_size, "--model_zoo", self._model_zoo_path, "--model_def", model_def, "--distribution_strategy", DistributionStrategy.PARAMETER_SERVER, ] args = parse_worker_args(arguments) tf.keras.backend.clear_session() tf.random.set_seed(22) worker = Worker(args, ps_client=PSClient(self._channels)) workers.append(worker) worker._trainer._run_model_call_before_training( training_data[0][0]) for i in range(num_data): worker._trainer._get_model() w_loss, w_grads = worker._trainer._training_process_eagerly( training_data[i][0], training_data[i][1]) worker._trainer._report_gradient(w_grads) if w == 1 and i == 3: # Restart ps for the 2nd worker at i==3 # self._restart_pserver(model_def) self._reset_pserver() # `push_dense_parameters` will be called in `get_model` to # initialize variables on ps with worker variables worker._trainer._get_model() # send the grads again as these grads are not applied # on worker variables worker._trainer._report_gradient(w_grads) for var_name in workers[0]._trainer._non_embed_vars: np.testing.assert_array_equal( workers[0]._trainer._non_embed_vars[var_name].numpy(), workers[1]._trainer._non_embed_vars[var_name].numpy(), ) self._close_channels()
def test_worker_pull_embedding(self): model_def = "mnist.mnist_functional_api.custom_model" self._create_pserver(model_def, 2) arguments = [ "--worker_id", 0, "--job_type", elasticdl_pb2.TRAINING, "--minibatch_size", self._batch_size, "--model_zoo", self._model_zoo_path, "--model_def", model_def, "--distribution_strategy", DistributionStrategy.PARAMETER_SERVER, ] args = parse_worker_args(arguments) worker = Worker(args, ps_client=PSClient(self._channels)) # Test lookup embedding vectors that do not exist layers = ["test-2", "test-2-slot"] ids = [3, 5, 1, 6, 10, 2, 1, 2, 4, 7, 9] embedding_table_args = [ (layers[0], 8, "uniform", False), (layers[1], 8, 3.3, True), ] # initialize embedding table object for pserver in self._pservers: for layer, table_args in zip(layers, embedding_table_args): pserver.parameters.embedding_params[layer] = EmbeddingTable( *table_args) result_dict = {} for layer in layers: embedding = worker._ps_client.pull_embedding_vectors(layer, ids) result_dict[layer] = embedding for layer in layers: expected_result = [] for embedding_id in ids: ps_id = int_to_id(embedding_id, len(self._pservers)) table = self._pservers[ps_id].parameters.embedding_params[ layer] expected_result.append(table.get([embedding_id])) expected_result = np.concatenate(expected_result) self.assertTrue(np.allclose(expected_result, result_dict[layer])) self._close_channels()
def main(): args = parse_worker_args() logger = log_utils.get_logger(__name__) logger.info("Starting worker %d", args.worker_id) if args.master_addr is None: raise ValueError("master_addr is missing for worker") master_client = MasterClient(build_channel(args.master_addr), args.worker_id) ps_client = None if (args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER and args.ps_addrs): ps_channels = [] ps_addrs = args.ps_addrs.split(",") for addr in ps_addrs: # addr is in the form as "ps-pod-name.namespace.svc:port" channel = build_channel(addr) succeeded = False for i in range(CONNECT_PS_MAX_RETRIES): try: grpc.channel_ready_future(channel).result( timeout=CONNECT_PS_TIMEOUT) logger.info("grpc channel %s to connect pod %s is ready" % (addr, addr.split(".")[0])) ps_channels.append(channel) succeeded = True break except grpc.FutureTimeoutError: logger.warning("Failed to connect pod %s with %d retry" % (addr.split(".")[0], i)) if not succeeded: raise TimeoutError( "Time out to connect pod %s with 3 retries" % addr.split(".")[0]) ps_client = PSClient(ps_channels) worker = Worker( args, master_client=master_client, ps_client=ps_client, set_parallelism=True, ) worker.run()
def _create_worker(self, worker_num): for i in range(worker_num): tf.keras.backend.clear_session() tf.random.set_seed(22) arguments = [ "--job_type", elasticai_api_pb2.TRAINING, "--minibatch_size", self._batch_size, "--model_zoo", self._model_zoo_path, "--model_def", self._model_def, "--distribution_strategy", DistributionStrategy.PARAMETER_SERVER, ] args = parse_worker_args(arguments) worker = Worker(args, ps_client=PSClient(self._channels)) self._workers.append(worker)
def distributed_train_and_evaluate( feature_shape, model_zoo_path, model_def, model_params="", eval_metrics_fn="eval_metrics_fn", loss="loss", training=True, dataset_name=DatasetName.IMAGE_DEFAULT, use_async=False, get_model_steps=1, ps_channels=None, pservers=None, distribution_strategy=DistributionStrategy.PARAMETER_SERVER, ): """Runs distributed training and evaluation with a local master. Grpc calls are mocked by local master call. Args: feature_shape: The shape of model input. model_zoo_path: The directory that contains user-defined model files or a specific model file. model_def: The import path to the model definition function/class in the model zoo, e.g. "cifar10_subclass.CustomModel". model_params: The dictionary of model parameters in a string that will be used to instantiate the model, e.g. "param1=1,param2=2". eval_metrics_fn: The name of the evaluation metrics function defined in the model file. loss: The name of the loss function defined in the model file. training: True for job type `TRAIN_WITH_EVALUATION`, False for job type `EVALUATION`. dataset_name: A dataset name from `DatasetName`. use_async: A bool. True if using asynchronous updates. get_model_steps: Worker will perform `get_model` from the parameter server every this many steps. ps_channels: A channel list to all parameter server pods. pservers: A list of parameter server pods. distribution_strategy: The distribution startegy used by workers, e.g. DistributionStrategy.PARAMETER_SERVER or DistributionStrategy.AllreduceStrategy. Returns: An integer indicating the model version after the distributed training and evaluation. """ job_type = (JobType.TRAINING_WITH_EVALUATION if training else JobType.EVALUATION_ONLY) evaluation_steps = 1 if job_type == JobType.TRAINING_WITH_EVALUATION else 0 batch_size = 8 if dataset_name == DatasetName.IMAGENET else 16 pservers = pservers or [] ps_channels = ps_channels or [] model_module = load_module(get_module_file_path(model_zoo_path, model_def)).__dict__ for channel in ps_channels: grpc.channel_ready_future(channel).result() worker_arguments = [ "--worker_id", "1", "--job_type", job_type, "--minibatch_size", batch_size, "--model_zoo", model_zoo_path, "--model_def", model_def, "--model_params", model_params, "--loss", loss, "--get_model_steps", get_model_steps, "--distribution_strategy", distribution_strategy, ] args = parse_worker_args(worker_arguments) if dataset_name in [DatasetName.IMAGENET, DatasetName.FRAPPE]: record_num = batch_size else: record_num = 128 shards = { create_recordio_file(record_num, dataset_name, feature_shape): ( 0, record_num, ) } if training: training_shards = shards evaluation_shards = shards else: training_shards = {} evaluation_shards = shards task_d = _TaskDispatcher( training_shards, evaluation_shards, {}, records_per_task=64, num_epochs=1, ) if training: evaluation_service = EvaluationService( None, task_d, 0, 0, evaluation_steps, False, model_module[eval_metrics_fn], ) else: evaluation_service = EvaluationService( None, task_d, 0, 0, evaluation_steps, True, model_module[eval_metrics_fn], ) task_d.set_evaluation_service(evaluation_service) def master_creator(): return MasterServicer( batch_size, task_d, evaluation_service=evaluation_service, master=None, ) svc, port = _server(master_creator) mc = MasterClient(build_channel("localhost:%d" % port), 1) worker = Worker(args, master_client=mc, ps_client=PSClient(ps_channels)) for pservicer in pservers: # FIXME(yancey1989): decouple pserver and master client pservicer._master_stub = mc worker.run() task = mc.get_task() # stop the master servicer svc.stop(0) # No more task. if task.shard_name: raise RuntimeError( "There are some tasks unfinished after worker exits.") return task.model_version
def __init__( self, args, channel=None, ps_channels=None, max_minibatch_retry_num=DEFAULT_MAX_MINIBATCH_RETRY_NUM, max_allreduce_retry_num=DEFAULT_MAX_ALLREDUCE_RETRY_NUM, set_parallelism=False, ): """ Arguments: channel: The channel for the gRPC master service. ps_channels: The PS channels for PS service max_minibatch_retry_num: The maximum number of a minibatch retry as its results (e.g. gradients) are not accepted by master. max_allreduce_retry_num: The maximum number of retries for allreduce operation if allreduce-based distributed training strategy is used. """ self._args = args self.logger = get_logger("Worker", level=args.log_level.upper()) if set_parallelism: # Explicitly setting the parallelism will avoid multi-process hangs # Maybe due to an unknown bug in Tensorflow? # Must called before TensorFlow is initialized. # Not set_parallelism by default to make unittests happy. num_threads = os.cpu_count() tf.config.threading.set_inter_op_parallelism_threads(num_threads) tf.config.threading.set_intra_op_parallelism_threads(num_threads) if channel is None: self._stub = None else: self._stub = elasticdl_pb2_grpc.MasterStub(channel) self._use_multi_ps = False self._ps_vars = {} if isinstance(ps_channels, list): if len(ps_channels) > 0: self._use_multi_ps = True self._ps_stubs = [ elasticdl_pb2_grpc.PserverStub(c) for c in ps_channels ] self._var_to_ps = {} self._ps_num = len(self._ps_stubs) self._ps_client = PSClient(self._ps_stubs) else: self._ps_num = 0 self._distribution_strategy = args.distribution_strategy if (self._distribution_strategy == DistributionStrategy.PARAMETER_SERVER and self._use_multi_ps is False): raise ValueError( "PS channels are not set up under parameter server strategy") self._max_minibatch_retry_num = max_minibatch_retry_num self._max_allreduce_retry_num = max_allreduce_retry_num self._init_from_args(args) self._timing = Timing(args.log_level.upper() == "DEBUG", self.logger) self._log_loss_count = 0 self._var_created = False
def test_compare_onebatch_train(self): model_def = "mnist.mnist_functional_api.custom_model" self._create_pserver(model_def, 2) images, labels = get_random_batch(self._batch_size) # TODO(yunjian.lmh): test optimizer wrapper arguments = [ "--job_type", elasticai_api_pb2.TRAINING, "--minibatch_size", self._batch_size, "--model_zoo", self._model_zoo_path, "--model_def", model_def, "--distribution_strategy", DistributionStrategy.PARAMETER_SERVER, ] args = parse_worker_args(arguments) tf.keras.backend.clear_session() tf.random.set_seed(22) worker = Worker(args, ps_client=PSClient(self._channels)) worker._trainer._run_model_call_before_training(images) worker._trainer._get_model() w_loss, w_grads = worker._trainer._training_process_eagerly( images, labels) worker._trainer._report_gradient(w_grads) tf.keras.backend.clear_session() tf.random.set_seed(22) ( model, feed, loss_fn, opt_fn, eval_metrics_fn, prediction_outputs_processor, create_data_reader_fn, callback_list, ) = get_model_spec( model_zoo=self._model_zoo_path, model_def=model_def, feed="feed", loss="loss", optimizer="optimizer", eval_metrics_fn="eval_metrics_fn", prediction_outputs_processor="PredictionOutputsProcessor", custom_data_reader="custom_data_reader", callbacks="callbacks", ) with tf.GradientTape() as tape: output = model.call(images, training=True) labels = tf.reshape(labels, [-1]) loss = loss_fn(labels, output) grads = tape.gradient(loss, model.trainable_variables) opt_fn().apply_gradients(zip(grads, model.trainable_variables)) for v in model.trainable_variables: ps_id = string_to_id(v.name, len(self._channels)) ps_v = self._pservers[ps_id].parameters.get_non_embedding_param( v.name) np.testing.assert_array_equal(ps_v.numpy(), v.numpy()) self._close_channels()