def _get_builtin_table(): global _builtin_table if _builtin_table is not None: return _builtin_table _builtin_table = {} def register_all(mod): for name in dir(mod): v = getattr(mod, name) if callable(v) and not _is_special_functional_bound_op( v) and v is not torch.no_grad and v is not torch.autocast: _builtin_ops.append((v, "aten::" + name)) for mod in _modules_containing_builtins: register_all(mod) _builtin_ops.append((math.gcd, "aten::gcd")) _builtin_ops.append((math.isfinite, "aten::isfinite")) _builtin_ops.append( (math.remainder, "aten::mathremainder")) # type: ignore[attr-defined] import torch.distributed.autograd as dist_autograd if dist_autograd.is_available(): _builtin_ops.append( (dist_autograd.get_gradients, "aten::get_gradients")) _builtin_ops.append((dist_autograd.backward, "aten::dist_backward")) # populate the _builtin_table from _builtin_ops for builtin, aten_op in _builtin_ops: _builtin_table[id(builtin)] = aten_op return _builtin_table
def _get_builtin_table(): global _builtin_table if _builtin_table is not None: return _builtin_table _builtin_table = {} def register_all(mod): for name in dir(mod): v = getattr(mod, name) if callable(v): _builtin_ops.append((v, "aten::" + name)) for mod in _modules_containing_builtins: register_all(mod) if not PY2: _builtin_ops.append((math.gcd, "aten::gcd")) _builtin_ops.append((math.isfinite, "aten::isfinite")) if PY37: _builtin_ops.append((math.remainder, "aten::mathremainder")) import torch.distributed.autograd as dist_autograd if dist_autograd.is_available(): _builtin_ops.append( (dist_autograd.get_gradients, "aten::get_gradients")) # populate the _builtin_table from _builtin_ops for builtin, aten_op in _builtin_ops: _builtin_table[id(builtin)] = aten_op return _builtin_table