def _xla_call_translation_rule(ctx, avals_in, avals_out, *in_nodes, name, backend=None, call_jaxpr, donated_invars, inline=None, device=None): del device, donated_invars, inline # Ignored. c = ctx.builder check_backend_matches(backend, ctx.platform) subc = xc.XlaBuilder(f"jit_{name}") args = [parameter(subc, i, c.get_shape(n)) for i, n in enumerate(in_nodes)] sub_ctx = ctx.replace(builder=subc, name_stack=extend_name_stack(ctx.name_stack, wrap_name(name, 'jit'))) out_nodes = jaxpr_subcomp(sub_ctx, call_jaxpr, (), *args) if len(out_nodes) == 1: subc = subc.Build(out_nodes[0]) return [xops.Call(c, subc, list(in_nodes))] else: subc = subc.Build(xops.Tuple(subc, out_nodes)) return xla_destructure(c, xops.Call(c, subc, list(in_nodes)))
def _sharded_jit_translation_rule(ctx, avals_in, avals_out, *in_nodes, in_parts, out_parts_thunk, nparts, name, call_jaxpr, local_in_parts, local_out_parts_thunk, local_nparts): subc = xc.XlaBuilder(f"sharded_jit_{name}") # We assume any extra leading in_nodes are constants and replicate them. num_extra_nodes = len(in_nodes) - len(in_parts) assert num_extra_nodes >= 0 in_parts = (None, ) * num_extra_nodes + in_parts args = [] for i, (n, sharding) in enumerate(safe_zip(in_nodes, in_parts)): # We use xla.set_sharding instead of xla.with_sharding because inlined calls # shouldn't have shardings set directly on the inputs or outputs. arg = xla.parameter(subc, i, ctx.builder.GetShape(n)) args.append(xla.set_sharding(subc, arg, sharding)) sub_ctx = ctx.replace(builder=subc, name_stack=extend_name_stack( wrap_name(name, "sharded_jit"))) out_nodes = xla.jaxpr_subcomp(sub_ctx, call_jaxpr, (), *args) out_parts = out_parts_thunk() assert len(out_parts) == len(out_nodes) out_nodes = [ xla.set_sharding(subc, out, sharding) for out, sharding in safe_zip(out_nodes, out_parts) ] subc = subc.build(xops.Tuple(subc, out_nodes)) return xla.xla_destructure(ctx.builder, xops.Call(ctx.builder, subc, list(in_nodes)))
def _remat_using_cond(ctx, in_nodes, name, call_jaxpr): """Lower remat to a Conditional which always returns true. This: 1. Circumvents common subexpression elimination. 2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks occur after the primal blocks, because cotangent is an input to the Conditional.""" # Fake condition which always selects True branch. c = ctx.builder rng = xops.RngUniform(xops.Constant(c, np.array(0, dtype=np.float32)), xops.Constant(c, np.array(1, dtype=np.float32)), xc.Shape.array_shape(xc.PrimitiveType.F32, [])) pred = xops.Lt(rng, xops.Constant(c, np.array(2, dtype=np.float32))) true_op = xops.Tuple(c, in_nodes) remat_subc = xc.XlaBuilder("remat_call_subcomputation") input_op = parameter(remat_subc, 0, c.get_shape(true_op), replicated=[]) args = xla_destructure(remat_subc, input_op) sub_ctx = ctx.replace(builder=remat_subc, name_stack=extend_name_stack( ctx.name_stack, wrap_name(name, 'remat'))) out_nodes = jaxpr_subcomp(sub_ctx, call_jaxpr, (), *args) out_node_shapes = [remat_subc.get_shape(o) for o in out_nodes] remat_subc = remat_subc.build(xops.Tuple(remat_subc, out_nodes)) false_op = true_op dummy_subc = xc.XlaBuilder("remat_call_dummy_subcomputation") parameter(dummy_subc, 0, c.get_shape(false_op), replicated=[]) out_nodes = [_zeros(dummy_subc, s) for s in out_node_shapes] dummy_subc = dummy_subc.build(xops.Tuple(dummy_subc, out_nodes)) return xla_destructure( c, xops.Conditional(pred, true_op, remat_subc, false_op, dummy_subc))
def _remat_using_while(ctx, in_nodes, name, call_jaxpr): """Lower remat to a single iteration while loop.""" c = ctx.builder # Dummy subc for getting subcomp shapes. dummy_inputs = xops.Tuple(c, in_nodes) dummy_subc = xc.XlaBuilder("remat_dummy_subcomputation") dummy_input_op = parameter(dummy_subc, 0, c.get_shape(dummy_inputs), replicated=[]) dummy_args = xla_destructure(dummy_subc, dummy_input_op) dummy_ctx = ctx.replace(builder=dummy_subc, name_stack=extend_name_stack( ctx.name_stack, wrap_name(name, 'remat'))) dummy_subcomp_outs = jaxpr_subcomp(dummy_ctx, call_jaxpr, (), *dummy_args) out_node_shapes = [dummy_subc.get_shape(o) for o in dummy_subcomp_outs] i_init = xops.Constant(c, np.array(0, dtype=np.int32)) zeros_like_outs = [_zeros(c, s) for s in out_node_shapes] inputs = xops.Tuple(c, [i_init] + list(in_nodes) + zeros_like_outs) cond_subc = xc.XlaBuilder("remat_cond_subcomputation") input_op = parameter(cond_subc, 0, c.get_shape(inputs), replicated=[]) i = xops.GetTupleElement(input_op, 0) rng = xops.RngUniform( xops.Constant(cond_subc, np.array(1, dtype=np.int32)), xops.Constant(cond_subc, np.array(2, dtype=np.int32)), xc.Shape.array_shape(xc.PrimitiveType.S32, [])) cond_subc = cond_subc.build(xops.Lt(i, rng)) body_subc = xc.XlaBuilder("remat_body_subcomputation") input_op = parameter(body_subc, 0, c.get_shape(inputs), replicated=[]) i, *args = xla_destructure(body_subc, input_op)[:len(in_nodes) + 1] i_next = xops.Add(i, xops.Constant(body_subc, np.array(1, dtype=np.int32))) body_ctx = ctx.replace(builder=body_subc, name_stack=extend_name_stack( ctx.name_stack, wrap_name(name, 'remat'))) subcomp_outs = jaxpr_subcomp(body_ctx, call_jaxpr, (), *args) out_nodes = [i_next] + args + list(subcomp_outs) body_subc = body_subc.build(xops.Tuple(body_subc, out_nodes)) outs = xops.While(cond_subc, body_subc, inputs) return xla_destructure(c, outs)[len(in_nodes) + 1:]
def _named_call_translation_rule(ctx, avals_in, avals_out, *in_nodes, name="core_call", backend=None, call_jaxpr): check_backend_matches(backend, ctx.platform) c = ctx.builder subc = xc.XlaBuilder(name) args = [parameter(subc, i, c.GetShape(n)) for i, n in enumerate(in_nodes)] sub_ctx = ctx.replace(builder=subc, name_stack=extend_name_stack(ctx.name_stack, name)) out_nodes = jaxpr_subcomp(sub_ctx, call_jaxpr, (), *args) subc = subc.Build(xops.Tuple(subc, out_nodes)) return xla_destructure(c, xops.Call(c, subc, list(in_nodes)))
def _sharded_jit_lowering(ctx, *in_nodes, in_parts, out_parts_thunk, nparts, name, call_jaxpr, local_in_parts, local_out_parts_thunk, local_nparts): # We assume any extra leading in_nodes are constants and replicate them. num_extra_nodes = len(in_nodes) - len(in_parts) assert num_extra_nodes >= 0 in_parts = (None, ) * num_extra_nodes + in_parts args = [] for ns, sharding in safe_zip( safe_map(mlir.wrap_singleton_ir_values, in_nodes), in_parts): if sharding is not None: args.append([ mlir.wrap_with_sharding_op(n, xla.sharding_to_proto(sharding)) for n in ns ]) else: args.append(ns) sub_ctx = ctx.module_context.replace( name_stack=extend_name_stack(wrap_name(name, "sharded_jit"))) fn = mlir.lower_jaxpr_to_fun(sub_ctx, f"sharded_jit_{name}", core.ClosedJaxpr(call_jaxpr, ())) output_types = safe_map(mlir.aval_to_ir_types, ctx.avals_out) flat_output_types = util.flatten(output_types) call = std.CallOp(flat_output_types, ir.FlatSymbolRefAttr.get(fn.name.value), mlir.flatten_lowering_ir_args(args)) out_nodes = util.unflatten(call.results, safe_map(len, output_types)) out_parts = out_parts_thunk() outputs = [] for ns, sharding in safe_zip(out_nodes, out_parts): if sharding is not None: outputs.append([ mlir.wrap_with_sharding_op(n, xla.sharding_to_proto(sharding)) for n in ns ]) else: outputs.append(ns) return outputs
def _cond_lowering(ctx, index, *args, branches, linear): del linear # Unused. joined_effects = core.join_effects(*(branch.effects for branch in branches)) ordered_effects = [ eff for eff in joined_effects if eff in core.ordered_effects ] num_tokens = len(ordered_effects) tokens_in = ctx.tokens_in.subset(ordered_effects) output_token_types = [mlir.token_type() for _ in ordered_effects] output_types = [ *output_token_types, *_map(mlir.aval_to_ir_types, ctx.avals_out) ] flat_output_types = util.flatten(output_types) # mhlo.CaseOp takes a single argument 'index' and the corresponding blocks # have no arguments; the computation within the block uses implicit # captures. case_op = mhlo.CaseOp(flat_output_types, index=index, num_branches=len(branches)) name_stack = extend_name_stack(ctx.module_context.name_stack, 'cond') for i, jaxpr in enumerate(branches): branch = case_op.regions[i].blocks.append() with ir.InsertionPoint(branch): sub_ctx = ctx.module_context.replace( name_stack=xla.extend_name_stack(name_stack, f'branch_{i}_fun')) out_vals, tokens_out = mlir.jaxpr_subcomp( sub_ctx, jaxpr.jaxpr, tokens_in, _map(mlir.ir_constants, jaxpr.consts), *_map(mlir.wrap_singleton_ir_values, args)) out_tokens = [tokens_out.get(eff) for eff in ordered_effects] out_vals = [*out_tokens, *out_vals] mhlo.ReturnOp(util.flatten(out_vals)) tokens_and_outputs = util.unflatten(case_op.results, _map(len, output_types)) tokens, outputs = util.split_list(tokens_and_outputs, [num_tokens]) ctx.set_tokens_out(mlir.TokenSet(zip(ordered_effects, tokens))) return outputs
def _sharded_callable( fun: lu.WrappedFun, nparts: Optional[int], in_parts: Tuple[pxla.PartitionsOrReplicated, ...], out_parts_thunk: Callable[[], Tuple[pxla.PartitionsOrReplicated, ...]], local_in_parts: Optional[Tuple[pxla.PartitionsOrReplicated, ...]], local_out_parts_thunk: Callable[[], Optional[Tuple[ pxla.PartitionsOrReplicated, ...]]], local_nparts: Optional[int], name: str, *abstract_args): nrep = 1 if local_in_parts is None: local_in_parts = in_parts global_abstract_args = [ pxla.get_global_aval(arg, parts, lparts) for arg, parts, lparts in safe_zip( abstract_args, in_parts, local_in_parts) ] if logging.vlog_is_on(2): logging.vlog(2, "abstract_args: %s", abstract_args) logging.vlog(2, "global_abstract_args: %s", global_abstract_args) logging.vlog(2, "in_parts: %s", in_parts) logging.vlog(2, "local_in_parts: %s", local_in_parts) jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_final( fun, global_abstract_args) platform = xb.get_backend().platform if platform not in ["tpu", "gpu"]: # TODO(skye): fall back to regular jit? raise ValueError(f"sharded_jit not supported for {platform}") nparts = pxla.reconcile_num_partitions(jaxpr, nparts) assert nparts is not None if nparts > xb.device_count(): raise ValueError( f"sharded_jit computation requires {nparts} devices, " f"but only {xb.device_count()} devices are available.") if xb.local_device_count() < nparts < xb.device_count(): raise NotImplementedError( f"sharded_jit across multiple hosts must use all available devices. " f"Got {nparts} out of {xb.device_count()} requested devices " f"(local device count: {xb.local_device_count()})") if local_nparts is None: if nparts > xb.local_device_count(): raise ValueError( "Specify 'local_nparts' when using cross-process sharded_jit " "and all inputs and outputs are replicated.") else: local_nparts = nparts if local_nparts > xb.local_device_count(): raise ValueError( f"sharded_jit computation requires {local_nparts} local devices, " f"but only {xb.local_device_count()} local devices are available.") if logging.vlog_is_on(2): logging.vlog(2, "nparts: %d local_nparts: %d", nparts, local_nparts) out_parts = out_parts_thunk() local_out_parts = local_out_parts_thunk() if local_out_parts is None: local_out_parts = out_parts if logging.vlog_is_on(2): logging.vlog(2, "out_parts: %s", out_parts) logging.vlog(2, "local_out_parts: %s", local_out_parts) local_out_avals = [ pxla.get_local_aval(out, parts, lparts) for out, parts, lparts in safe_zip( global_out_avals, out_parts, local_out_parts) ] log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG logging.log(log_priority, "Compiling %s for %d devices with args %s.", fun.__name__, nparts, global_abstract_args) c = xc.XlaBuilder("spjit_{}".format(fun.__name__)) xla_consts = _map(partial(xla.pyval_to_ir_constant, c), consts) xla_args = _xla_sharded_args(c, global_abstract_args, in_parts) axis_env = xla.AxisEnv(nrep, (), ()) ctx = xla.TranslationContext( c, platform, axis_env, extend_name_stack(wrap_name(name, "sharded_jit"))) out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args) out_tuple = xla.with_sharding(c, out_parts, xops.Tuple, c, out_nodes) built = c.Build(out_tuple) if nparts <= xb.local_device_count(): devices = xb.local_devices()[:nparts] else: assert nparts == xb.device_count() devices = xb.devices() device_assignment = np.array([[d.id for d in devices]]) device_assignment = np.reshape(device_assignment, (-1, nparts)) # device_assignment = None # TODO(skye): replace with default device assignment? compiled = dispatch.backend_compile( xb.get_backend(), built, xb.get_compile_options(nrep, nparts, device_assignment)) input_specs = [ pxla.partitioned_sharding_spec(local_nparts, parts, aval) for parts, aval in zip(local_in_parts, abstract_args) ] input_indices = [ pxla.spec_to_indices(aval.shape, spec) if spec is not None else None for aval, spec in zip(abstract_args, input_specs) ] handle_args = partial(pxla.shard_args, compiled.local_devices(), input_indices) handle_outs = _avals_to_results_handler( nrep, local_nparts, # type: ignore local_out_parts, local_out_avals) return partial(_execute_spatially_partitioned, compiled, handle_args, handle_outs)