def __init__(self, config=None, start=True): """Creates a new dispatch server. Args: config: (Optional.) A `tf.data.experimental.service.DispatcherConfig` configration. If `None`, the dispatcher will be use default configuration values. start: (Optional.) Boolean, indicating whether to start the server after creating it. """ config = config or DispatcherConfig() if config.fault_tolerant_mode and not config.work_dir: raise ValueError( "Cannot enable fault tolerant mode without configuring a work_dir" ) self._config = config config_proto = service_config_pb2.DispatcherConfig( port=config.port, protocol=config.protocol, work_dir=config.work_dir, fault_tolerant_mode=config.fault_tolerant_mode, job_gc_check_interval_ms=config.job_gc_check_interval_ms, job_gc_timeout_ms=config.job_gc_timeout_ms) self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer( config_proto.SerializeToString()) if start: self._server.start()
def __init__(self, port, protocol=None, work_dir=None, fault_tolerant_mode=None, start=True): """Creates a new dispatch server. Args: port: Specifies the port to bind to. protocol: (Optional.) Specifies the protocol to be used by the server. Acceptable values include `"grpc", "grpc+local"`. Defaults to `"grpc"`. work_dir: (Optional.) A directory to store dispatcher state in. This argument is required for the dispatcher to be able to recover from restarts. fault_tolerant_mode: (Optional.) Whether the dispatcher should write its state to a journal so that it can recover from restarts. Dispatcher state, including registered datasets and created jobs, is synchronously written to the journal before responding to RPCs. If `True`, `work_dir` must also be specified. Defaults to `False`. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to `True`. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while creating the TensorFlow server. """ self._protocol = DEFAULT_PROTOCOL if protocol is None else protocol self._work_dir = "" if work_dir is None else work_dir self._dataset_sharing_mode = ("shared_filesystem" if self._work_dir else "rpc") self._fault_tolerant_mode = (False if fault_tolerant_mode is None else fault_tolerant_mode) if self._fault_tolerant_mode and not self._work_dir: raise ValueError( "Cannot enable fault tolerant mode without configuring a work_dir" ) config = service_config_pb2.DispatcherConfig( port=port, protocol=self._protocol, work_dir=self._work_dir, fault_tolerant_mode=self._fault_tolerant_mode, dataset_sharing_mode=service_config_pb2.DatasetSharingMode.Value( self._dataset_sharing_mode.upper())) self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer( config.SerializeToString()) if start: self._server.start()
def __init__(self, port, protocol=None, start=True): """Creates a new dispatch server. Args: port: Specifies the port to bind to. protocol: (Optional.) Specifies the protocol to be used by the server. Acceptable values include `"grpc", "grpc+local"`. Defaults to `"grpc"`. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to `True`. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while creating the TensorFlow server. """ if protocol is None: protocol = "grpc" self._protocol = protocol config = service_config_pb2.DispatcherConfig(port=port, protocol=protocol) self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer( config.SerializeToString()) if start: self._server.start()