コード例 #1
0
ファイル: dispatch.py プロジェクト: jbampton/jax
def lower_xla_callable(fun: lu.WrappedFun, device, backend, name,
                       donated_invars, *arg_specs):
  if device is not None and backend is not None:
    raise ValueError("can't specify both a device and a backend for jit, "
                     "got device={} and backend={}".format(device, backend))

  abstract_args, arg_devices = util.unzip2(arg_specs)
  with log_elapsed_time(f"Finished tracing + transforming {fun.__name__} "
                        "for jit in {elapsed_time} sec"):
    jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
        fun, abstract_args, pe.debug_info_final(fun, "jit"))
  if any(isinstance(c, core.Tracer) for c in consts):
    raise UnexpectedTracerError("Encountered an unexpected tracer.")
  jaxpr, kept_const_idx, kept_var_idx = _prune_unused_inputs(jaxpr)
  consts = [c for i, c in enumerate(consts) if i in kept_const_idx]
  pruned_arg_specs = (a for i, a in enumerate(arg_specs) if i in kept_var_idx)
  abstract_args, arg_devices = util.unzip2(pruned_arg_specs)
  donated_invars = [
      x for i, x in enumerate(donated_invars) if i in kept_var_idx
  ]
  map(prefetch, itertools.chain(consts, jaxpr_literals(jaxpr)))
  jaxpr = apply_outfeed_rewriter(jaxpr)

  nreps = jaxpr_replicas(jaxpr)
  device = _xla_callable_device(nreps, backend, device, arg_devices)
  backend = xb.get_device_backend(device) if device else xb.get_backend(backend)

  # Computations that only produce constants and/or only rearrange their inputs,
  # which are often produced from partial evaluation, don't need compilation,
  # and don't need to evaluate their arguments.
  if not jaxpr.eqns:
    return XlaComputation(
        name, None, True, None, jaxpr=jaxpr, consts=consts, device=device,
        in_avals=abstract_args, out_avals=out_avals, kept_var_idx=kept_var_idx)

  if not _on_exit:
    log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG
    if len(abstract_args) > 10:
      msg = f"Compiling {fun.__name__} ({id(fun)}) for {len(abstract_args)} args."
    else:
      msg = f"Compiling {fun.__name__} ({id(fun)} for args {abstract_args}."
    logging.log(log_priority, msg)

  if nreps > 1:
    warnings.warn(
        f"The jitted function {name} includes a pmap. Using "
         "jit-of-pmap can lead to inefficient data movement, as the outer jit "
         "does not preserve sharded data representations and instead collects "
         "input and output arrays onto a single device. "
         "Consider removing the outer jit unless you know what you're doing. "
         "See https://github.com/google/jax/issues/2926.")

  if nreps > xb.device_count(backend):
    raise ValueError(
        f"compiling computation `{name}` that requires {nreps} replicas, but "
        f"only {xb.device_count(backend)} XLA devices are available.")

  if xb.process_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
    raise NotImplementedError(
        "jit of multi-host pmap not implemented (and jit-of-pmap can cause "
        "extra data movement anyway, so maybe you don't want it after all).")

  # pass long arg lists as tuple for TPU
  tuple_args = len(abstract_args) > 100
  axis_env = xla.AxisEnv(nreps, (), ())
  name_stack = xla.new_name_stack(xla.wrap_name(name, 'jit'))
  closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
  module: Union[str, xc.XlaComputation]
  module_name = f"jit_{fun.__name__}"
  if config.jax_enable_mlir:
    module = mlir.lower_jaxpr_to_module(
        module_name, closed_jaxpr, backend.platform,
        mlir.ReplicaAxisContext(axis_env), name_stack, donated_invars)
  else:
    module = xla.lower_jaxpr_to_xla_module(
        module_name, closed_jaxpr, backend.platform, axis_env,
        name_stack, tuple_args, donated_invars, replicated_args=None,
        arg_partitions=None, out_partitions=None)
  return XlaComputation(
      name, module, False, donated_invars, nreps=nreps, device=device,
      backend=backend, tuple_args=tuple_args, in_avals=abstract_args,
      out_avals=out_avals, kept_var_idx=kept_var_idx)
コード例 #2
0
ファイル: dispatch.py プロジェクト: cloudhan/jax
def lower_xla_callable(fun: lu.WrappedFun, device, backend, name,
                       donated_invars, always_lower: bool, keep_unused: bool,
                       *arg_specs):
    """Lower into XLA.

  Args:
    always_lower: If `True`, even trivial programs (not doing any computation
      such as lambda x: x) will be lowered into an XLA program.
    keep_unused: If `False` (the default), arguments that JAX determines to be
      unused by `fun` *may* be dropped from resulting compiled XLA executables.
      Such arguments will not be transferred to the device nor provided to the
      underlying executable. If `True`, unused arguments will not be pruned.
  """
    if device is not None and backend is not None:
        raise ValueError("can't specify both a device and a backend for jit, "
                         "got device={} and backend={}".format(
                             device, backend))
    abstract_args, arg_devices = util.unzip2(arg_specs)
    if fun.in_type is not None:
        abstract_args, which_explicit = util.unzip2(fun.in_type)
    else:
        which_explicit = None
    with log_elapsed_time(f"Finished tracing + transforming {fun.__name__} "
                          "for jit in {elapsed_time} sec"):
        jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
            fun, abstract_args, pe.debug_info_final(fun, "jit"),
            which_explicit)
    if any(isinstance(c, core.Tracer) for c in consts):
        raise UnexpectedTracerError("Encountered an unexpected tracer.")
    # TODO(mattjj): handle argument pruning w/ dynamic shapes
    if fun.in_type is None and not keep_unused:
        jaxpr, kept_const_idx, kept_var_idx = _prune_unused_inputs(jaxpr)
        consts = [c for i, c in enumerate(consts) if i in kept_const_idx]
        abstract_args, arg_devices = util.unzip2(
            [a for i, a in enumerate(arg_specs) if i in kept_var_idx])
        donated_invars = [
            x for i, x in enumerate(donated_invars) if i in kept_var_idx
        ]
        del kept_const_idx
    else:
        kept_var_idx = set(range(len(abstract_args)))
    map(prefetch, itertools.chain(consts, jaxpr_literals(jaxpr)))
    jaxpr = apply_outfeed_rewriter(jaxpr)

    nreps = jaxpr_replicas(jaxpr)
    device = _xla_callable_device(nreps, backend, device, arg_devices)
    backend = xb.get_device_backend(device) if device else xb.get_backend(
        backend)

    if (config.jax_dynamic_shapes and jaxpr_has_bints(jaxpr)
            and not _backend_supports_unbounded_dynamic_shapes(backend)):
        jaxpr, consts = pe.pad_jaxpr(jaxpr, consts)

    # Computations that only produce constants and/or only rearrange their inputs,
    # which are often produced from partial evaluation, don't need compilation,
    # and don't need to evaluate their arguments.
    if not jaxpr.eqns and not always_lower:
        return XlaComputation(name,
                              None,
                              True,
                              None,
                              None,
                              jaxpr=jaxpr,
                              consts=consts,
                              device=device,
                              in_avals=abstract_args,
                              out_avals=out_avals,
                              has_unordered_effects=False,
                              ordered_effects=[],
                              kept_var_idx=kept_var_idx,
                              keepalive=None)

    if not _on_exit:
        log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG
        if len(abstract_args) > 10:
            msg = f"Compiling {fun.__name__} ({id(fun)}) for {len(abstract_args)} args."
        else:
            msg = f"Compiling {fun.__name__} ({id(fun)} for args {abstract_args}."
        logging.log(log_priority, msg)

    if nreps > 1:
        warnings.warn(
            f"The jitted function {name} includes a pmap. Using "
            "jit-of-pmap can lead to inefficient data movement, as the outer jit "
            "does not preserve sharded data representations and instead collects "
            "input and output arrays onto a single device. "
            "Consider removing the outer jit unless you know what you're doing. "
            "See https://github.com/google/jax/issues/2926.")

    if nreps > xb.device_count(backend):
        raise ValueError(
            f"compiling computation `{name}` that requires {nreps} replicas, but "
            f"only {xb.device_count(backend)} XLA devices are available.")

    if xb.process_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
        raise NotImplementedError(
            "jit of multi-host pmap not implemented (and jit-of-pmap can cause "
            "extra data movement anyway, so maybe you don't want it after all)."
        )

    # pass long arg lists as tuple for TPU
    tuple_args = len(abstract_args) > 100
    axis_env = xla.AxisEnv(nreps, (), ())
    name_stack = util.new_name_stack(util.wrap_name(name, 'jit'))
    closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
    module_name = f"jit_{fun.__name__}"
    unordered_effects = [
        eff for eff in closed_jaxpr.effects if eff not in core.ordered_effects
    ]
    ordered_effects = [
        eff for eff in closed_jaxpr.effects if eff in core.ordered_effects
    ]
    module, keepalive = mlir.lower_jaxpr_to_module(
        module_name, closed_jaxpr,
        unordered_effects, ordered_effects, backend.platform,
        mlir.ReplicaAxisContext(axis_env), name_stack, donated_invars)
    return XlaComputation(name,
                          module,
                          False,
                          donated_invars,
                          which_explicit,
                          nreps=nreps,
                          device=device,
                          backend=backend,
                          tuple_args=tuple_args,
                          in_avals=abstract_args,
                          out_avals=out_avals,
                          has_unordered_effects=bool(unordered_effects),
                          ordered_effects=ordered_effects,
                          kept_var_idx=kept_var_idx,
                          keepalive=keepalive)
コード例 #3
0
def initialize(coordinator_address: Optional[str] = None,
               num_processes: Optional[int] = None,
               process_id: Optional[int] = None):
    """Initialize distributed system for topology discovery.

  Currently, calling ``initialize`` sets up the multi-host GPU backend and Cloud
  TPU backend.

  If you are on GPU platform, you will have to provide the coordinator_address
  and other args to the `initialize` API.

  If you are on TPU platform, the coordinator_address and other args will be
  auto detected but you have the option to provide it too.

  Args:
    coordinator_address: IP address and port of the coordinator. The choice of
      port does not matter, so long as the port is available on the coordinator
      and all processes agree on the port.
      Can be None only for TPU platform. If coordinator_address is None on TPU,
      then it will be auto detected.
    num_processes: Number of processes. Can be None only for TPU platform and
      if None will be determined from the TPU slice metadata.
    process_id: Id of the current process. Can be None only for TPU platform and
      if None will default to the current TPU worker id determined via the TPU
      slice metadata.

  Raises:
    RuntimeError: If `distributed.initialize` is called more than once.

  Example:

  Suppose there are two GPU hosts, and host 0 is the designated coordinator
  with address ``10.0.0.1:1234``. To initialize the GPU cluster, run the
  following commands before anything else.

  On host 0:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 0)  # doctest: +SKIP

  On host 1:

  >>> jax.distributed.initialize('10.0.0.1:1234', 2, 1)  # doctest: +SKIP
  """

    coordinator_address = os.environ.get('JAX_COORDINATOR_ADDRESS',
                                         None) or coordinator_address

    if cloud_tpu_init.running_in_cloud_tpu_vm:
        worker_endpoints = cloud_tpu_init.get_metadata(
            'worker-network-endpoints').split(',')
        if coordinator_address is None:
            coordinator_address = worker_endpoints[0].split(':')[2] + ':8476'
        if num_processes is None:
            num_processes = xla_bridge.process_count()
        if process_id is None:
            process_id = int(
                cloud_tpu_init.get_metadata('agent-worker-number'))

        if num_processes != len(worker_endpoints):
            raise RuntimeError(
                'Number of workers does not equal the number of '
                'processes. Auto detecting process_id is not possible.'
                'Please pass process_id manually.')

    if coordinator_address is None:
        raise ValueError('coordinator_address should be defined.')
    if num_processes is None:
        raise ValueError('Number of processes must be defined.')
    if process_id is None:
        raise ValueError(
            'The process id of the current process must be defined.')

    if process_id == 0:
        global jax_service
        if jax_service is not None:
            raise RuntimeError(
                'distributed.initialize should only be called once.')

        logging.info('Starting JAX distributed service on %s',
                     coordinator_address)
        jax_service = xla_extension.get_distributed_runtime_service(
            coordinator_address, num_processes)

    global distributed_client
    if distributed_client is not None:
        raise RuntimeError(
            'distributed.initialize should only be called once.')

    distributed_client = xla_extension.get_distributed_runtime_client(
        coordinator_address, process_id)
    logging.info('Connecting to JAX distributed service on %s',
                 coordinator_address)
    distributed_client.connect()

    factory = functools.partial(xla_client.make_gpu_client,
                                distributed_client,
                                process_id,
                                platform_name='cuda')
    xla_bridge.register_backend_factory('cuda', factory, priority=300)
    factory = functools.partial(xla_client.make_gpu_client,
                                distributed_client,
                                process_id,
                                platform_name='rocm')
    xla_bridge.register_backend_factory('rocm', factory, priority=300)
コード例 #4
0
    def initialize(self,
                   coordinator_address: Optional[str] = None,
                   num_processes: Optional[int] = None,
                   process_id: Optional[int] = None):
        coordinator_address = os.environ.get('JAX_COORDINATOR_ADDRESS',
                                             None) or coordinator_address

        if cloud_tpu_init.running_in_cloud_tpu_vm:
            worker_endpoints = cloud_tpu_init.get_metadata(
                'worker-network-endpoints').split(',')
            if coordinator_address is None:
                coordinator_address = worker_endpoints[0].split(
                    ':')[2] + ':8476'
            if num_processes is None:
                num_processes = xla_bridge.process_count()
            if process_id is None:
                process_id = int(
                    cloud_tpu_init.get_metadata('agent-worker-number'))

            if num_processes != len(worker_endpoints):
                raise RuntimeError(
                    'Number of workers does not equal the number of '
                    'processes. Auto detecting process_id is not possible.'
                    'Please pass process_id manually.')

        if coordinator_address is None:
            raise ValueError('coordinator_address should be defined.')
        if num_processes is None:
            raise ValueError('Number of processes must be defined.')
        if process_id is None:
            raise ValueError(
                'The process id of the current process must be defined.')

        if process_id == 0:
            if self.service is not None:
                raise RuntimeError(
                    'distributed.initialize should only be called once.')
            logging.info('Starting JAX distributed service on %s',
                         coordinator_address)
            if xla_client._version >= 72:
                self.service = xla_extension.get_distributed_runtime_service(
                    coordinator_address, num_processes,
                    config.jax_coordination_service)
            else:
                self.service = xla_extension.get_distributed_runtime_service(
                    coordinator_address, num_processes)

        if self.client is not None:
            raise RuntimeError(
                'distributed.initialize should only be called once.')

        if xla_client._version >= 72:
            self.client = xla_extension.get_distributed_runtime_client(
                coordinator_address, process_id,
                config.jax_coordination_service)
        else:
            self.client = xla_extension.get_distributed_runtime_client(
                coordinator_address, process_id)
        logging.info('Connecting to JAX distributed service on %s',
                     coordinator_address)
        self.client.connect()