def __init__(self, config, start=True): """Creates a new worker server. Args: config: A `tf.data.experimental.service.WorkerConfig` configration. start: (Optional.) Boolean, indicating whether to start the server after creating it. """ if config.dispatcher_address is None: raise ValueError("must specify a dispatcher_address") config_proto = service_config_pb2.WorkerConfig( dispatcher_address=config.dispatcher_address, worker_address=config.worker_address, port=config.port, protocol=config.protocol) self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer( config_proto.SerializeToString()) if start: self._server.start()
def __init__(self, port, dispatcher_address, worker_address=None, protocol=None, start=True): """Creates a new worker server. Args: port: Specifies the port to bind to. A value of 0 indicates that the worker can bind to any available port. dispatcher_address: Specifies the address of the dispatcher. worker_address: (Optional.) Specifies the address of the worker server. This address is passed to the dispatcher so that the dispatcher can tell clients how to connect to this worker. Defaults to `"localhost:%port%"`, where `%port%` will be replaced with the port used by the worker. protocol: (Optional.) Specifies the protocol to be used by the server. Acceptable values include `"grpc", "grpc+local"`. Defaults to `"grpc"`. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to `True`. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while creating the TensorFlow server. """ if worker_address is None: worker_address = "localhost:%port%" if protocol is None: protocol = "grpc" self._protocol = protocol config = service_config_pb2.WorkerConfig( port=port, protocol=protocol, dispatcher_address=dispatcher_address, worker_address=worker_address) self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer( config.SerializeToString()) if start: self._server.start()