Example #1
0
 def test_factory_returns_none(self):
     xb.register_backend_factory("none", lambda: None, priority=10)
     default_backend = xb.get_backend()
     self.assertEqual(default_backend.platform, "cpu")
     with self.assertRaisesRegex(
             RuntimeError, "Backend 'none' failed to initialize: "
             "Could not initialize backend 'none'"):
         xb.get_backend("none")
Example #2
0
    def test_backend_init_error(self):
        def factory():
            raise RuntimeError("I'm not a real backend")

        xb.register_backend_factory("error", factory, priority=10)
        # No error raised if there's a fallback backend.
        default_backend = xb.get_backend()
        self.assertEqual(default_backend.platform, "cpu")

        with self.assertRaisesRegex(RuntimeError, "I'm not a real backend"):
            xb.get_backend("error")
Example #3
0
def initialize(coordinator_address: str, num_processes: int, process_id: int):
    """Initialize distributed system for topology discovery.

  Currently, calling ``initialize`` sets up the multi-host GPU backend, and
  is not required for CPU or TPU backends.

  Args:
    coordinator_address: IP address and port of the coordinator. The choice of
      port does not matter, so long as the port is available on the coordinator
      and all processes agree on the port.
    num_processes: Number of processes.
    process_id: Id of the current process.

  Example:

  Suppose there are two GPU hosts, and host 0 is the designated coordinator
  with address ``10.0.0.1:1234``. To initialize the GPU cluster, run the
  following commands before anything else.

  On host 0:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 0)  # doctest: +SKIP

  On host 1:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 1)  # doctest: +SKIP
  """
    if process_id == 0:
        global _service
        assert _service is None, 'initialize should be called once only'
        logging.info('Starting JAX distributed service on %s',
                     coordinator_address)
        _service = xla_extension.get_distributed_runtime_service(
            coordinator_address, num_processes)

    client = xla_extension.get_distributed_runtime_client(
        coordinator_address, process_id)
    logging.info('Connecting to JAX distributed service on %s',
                 coordinator_address)
    client.connect()

    factory = functools.partial(xla_client.make_gpu_client,
                                client,
                                process_id,
                                platform_name='cuda')
    xla_bridge.register_backend_factory('cuda', factory, priority=300)
    factory = functools.partial(xla_client.make_gpu_client,
                                client,
                                process_id,
                                platform_name='rocm')
    xla_bridge.register_backend_factory('rocm', factory, priority=300)
Example #4
0
    def _register_factory(self,
                          platform: str,
                          priority,
                          device_count=1,
                          assert_used_at_most_once=False):
        if assert_used_at_most_once:
            used = []

        def factory():
            if assert_used_at_most_once:
                if used:
                    # We need to fail aggressively here since exceptions are caught by
                    # the caller and suppressed.
                    logging.fatal(
                        "Backend factory for %s was called more than once",
                        platform)
                else:
                    used.append(True)
            return self._DummyBackend(platform, device_count)

        xb.register_backend_factory(platform, factory, priority=priority)
Example #5
0
def initialize(coordinator_address: Optional[str] = None,
               num_processes: Optional[int] = None,
               process_id: Optional[int] = None):
    """Initialize distributed system for topology discovery.

  Currently, calling ``initialize`` sets up the multi-host GPU backend and Cloud
  TPU backend.

  If you are on GPU platform, you will have to provide the coordinator_address
  and other args to the `initialize` API.

  If you are on TPU platform, the coordinator_address and other args will be
  auto detected but you have the option to provide it too.

  Args:
    coordinator_address: IP address and port of the coordinator. The choice of
      port does not matter, so long as the port is available on the coordinator
      and all processes agree on the port.
      Can be None only for TPU platform. If coordinator_address is None on TPU,
      then it will be auto detected.
    num_processes: Number of processes. Can be None only for TPU platform and
      if None will be determined from the TPU slice metadata.
    process_id: Id of the current process. Can be None only for TPU platform and
      if None will default to the current TPU worker id determined via the TPU
      slice metadata.

  Raises:
    RuntimeError: If `distributed.initialize` is called more than once.

  Example:

  Suppose there are two GPU hosts, and host 0 is the designated coordinator
  with address ``10.0.0.1:1234``. To initialize the GPU cluster, run the
  following commands before anything else.

  On host 0:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 0)  # doctest: +SKIP

  On host 1:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 1)  # doctest: +SKIP
  """
    global_state.initialize(coordinator_address, num_processes, process_id)
    atexit.register(shutdown)
    if xla_client._version >= 65:
        factory = functools.partial(xla_client.make_gpu_client,
                                    global_state.client,
                                    process_id,
                                    platform_name='cuda')
        xla_bridge.register_backend_factory('cuda', factory, priority=300)
        factory = functools.partial(xla_client.make_gpu_client,
                                    global_state.client,
                                    process_id,
                                    platform_name='rocm')
        xla_bridge.register_backend_factory('rocm', factory, priority=300)
    else:
        factory = functools.partial(xla_client.make_gpu_client,
                                    global_state.client, process_id)
        xla_bridge.register_backend_factory('gpu', factory, priority=300)
Example #6
0
def initialize(coordinator_address: Optional[str] = None,
               num_processes: Optional[int] = None,
               process_id: Optional[int] = None):
    """Initialize distributed system for topology discovery.

  Currently, calling ``initialize`` sets up the multi-host GPU backend and Cloud
  TPU backend.

  If you are on GPU platform, you will have to provide the coordinator_address
  and other args to the `initialize` API.

  If you are on TPU platform, the coordinator_address and other args will be
  auto detected but you have the option to provide it too.

  Args:
    coordinator_address: IP address and port of the coordinator. The choice of
      port does not matter, so long as the port is available on the coordinator
      and all processes agree on the port.
      Can be None only for TPU platform. If coordinator_address is None on TPU,
      then it will be auto detected.
    num_processes: Number of processes. Can be None only for TPU platform and
      if None will be determined from the TPU slice metadata.
    process_id: Id of the current process. Can be None only for TPU platform and
      if None will default to the current TPU worker id determined via the TPU
      slice metadata.

  Raises:
    RuntimeError: If `distributed.initialize` is called more than once.

  Example:

  Suppose there are two GPU hosts, and host 0 is the designated coordinator
  with address ``10.0.0.1:1234``. To initialize the GPU cluster, run the
  following commands before anything else.

  On host 0:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 0)  # doctest: +SKIP

  On host 1:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 1)  # doctest: +SKIP
  """

    coordinator_address = os.environ.get('JAX_COORDINATOR_ADDRESS',
                                         None) or coordinator_address

    if cloud_tpu_init.running_in_cloud_tpu_vm:
        worker_endpoints = cloud_tpu_init.get_metadata(
            'worker-network-endpoints').split(',')
        if coordinator_address is None:
            coordinator_address = worker_endpoints[0].split(':')[2] + ':8476'
        if num_processes is None:
            num_processes = xla_bridge.process_count()
        if process_id is None:
            process_id = int(
                cloud_tpu_init.get_metadata('agent-worker-number'))

        if num_processes != len(worker_endpoints):
            raise RuntimeError(
                'Number of workers does not equal the number of '
                'processes. Auto detecting process_id is not possible.'
                'Please pass process_id manually.')

    if coordinator_address is None:
        raise ValueError('coordinator_address should be defined.')
    if num_processes is None:
        raise ValueError('Number of processes must be defined.')
    if process_id is None:
        raise ValueError(
            'The process id of the current process must be defined.')

    if process_id == 0:
        global jax_service
        if jax_service is not None:
            raise RuntimeError(
                'distributed.initialize should only be called once.')

        logging.info('Starting JAX distributed service on %s',
                     coordinator_address)
        jax_service = xla_extension.get_distributed_runtime_service(
            coordinator_address, num_processes)

    global distributed_client
    if distributed_client is not None:
        raise RuntimeError(
            'distributed.initialize should only be called once.')

    distributed_client = xla_extension.get_distributed_runtime_client(
        coordinator_address, process_id)
    logging.info('Connecting to JAX distributed service on %s',
                 coordinator_address)
    distributed_client.connect()

    factory = functools.partial(xla_client.make_gpu_client,
                                distributed_client,
                                process_id,
                                platform_name='cuda')
    xla_bridge.register_backend_factory('cuda', factory, priority=300)
    factory = functools.partial(xla_client.make_gpu_client,
                                distributed_client,
                                process_id,
                                platform_name='rocm')
    xla_bridge.register_backend_factory('rocm', factory, priority=300)
Example #7
0
 def _register_factory(self, platform: str, priority, device_count=1):
   xb.register_backend_factory(
       platform, lambda: self._DummyBackend(platform, device_count),
       priority=priority)