예제 #1
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  install_dir=None):
    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch

    from tools.nnwrap import generate_wrappers as generate_nn_wrappers

    # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
    # used by the legacy NN bindings.
    generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates')

    # Build ATen based Variable classes
    autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated'
    jit_gen_dir = install_dir or 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.makedirs(d)
    gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir,
                 'tools/autograd')
    gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir,
                     'tools/jit/templates')
예제 #2
0
def generate_code(ninja_global=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch
    from tools.nnwrap import generate_wrappers as generate_nn_wrappers

    # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
    # used by the legacy NN bindings.
    generate_nn_wrappers()

    # Build ATen based Variable classes
    autograd_gen_dir = 'torch/csrc/autograd/generated'
    jit_gen_dir = 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.mkdir(d)
    gen_autograd(
        'torch/lib/tmp_install/share/ATen/Declarations.yaml',
        autograd_gen_dir)
    gen_jit_dispatch(
        'torch/lib/tmp_install/share/ATen/Declarations.yaml',
        jit_gen_dir)
예제 #3
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  install_dir=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch

    from tools.nnwrap import generate_wrappers as generate_nn_wrappers

    # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
    # used by the legacy NN bindings.
    generate_nn_wrappers(nn_path, install_dir, 'tools/cwrap/plugins/templates')

    # Build ATen based Variable classes
    autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated'
    jit_gen_dir = install_dir or 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.makedirs(d)
    gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd')
    gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
예제 #4
0
def generate_code(ninja_global=None, declarations_path=None, nn_path=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch
    from tools.nnwrap import generate_wrappers as generate_nn_wrappers

    # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
    # used by the legacy NN bindings.
    generate_nn_wrappers(nn_path)

    # Build ATen based Variable classes
    autograd_gen_dir = 'torch/csrc/autograd/generated'
    jit_gen_dir = 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.mkdir(d)
    gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir)
    gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir)
예제 #5
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  install_dir=None,
                  subset=None):
    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch

    # Build ATen based Variable classes
    autograd_gen_dir = install_dir or 'torch/csrc/autograd/generated'
    jit_gen_dir = install_dir or 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.makedirs(d)

    if subset == "pybindings" or not subset:
        gen_autograd_python(declarations_path or DECLARATIONS_PATH,
                            autograd_gen_dir, 'tools/autograd')

    if subset == "libtorch" or not subset:
        gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir,
                     'tools/autograd')
        gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir,
                         'tools/jit/templates')
예제 #6
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  install_dir=None,
                  subset=None,
                  disable_autograd=False,
                  selected_op_list_path=None,
                  selected_op_list=None,
                  force_schema_registration=False):
    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
    from tools.autograd.gen_annotated_fn_args import gen_annotated
    from tools.autograd.utils import load_op_list_and_strip_overload
    from tools.jit.gen_unboxing_wrappers import gen_unboxing_wrappers

    # Build ATen based Variable classes
    if install_dir is None:
        install_dir = 'torch/csrc'
        python_install_dir = 'torch/testing/_internal/generated'
    else:
        python_install_dir = install_dir
    autograd_gen_dir = os.path.join(install_dir, 'autograd', 'generated')
    jit_gen_dir = os.path.join(install_dir, 'jit', 'generated')
    for d in (autograd_gen_dir, jit_gen_dir, python_install_dir):
        if not os.path.exists(d):
            os.makedirs(d)
    runfiles_dir = os.environ.get("RUNFILES_DIR", None)
    data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else ''
    autograd_dir = os.path.join(data_dir, 'tools', 'autograd')
    tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates')

    if subset == "pybindings" or not subset:
        gen_autograd_python(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, autograd_dir)

    if subset == "libtorch" or not subset:
        selected_op_list = load_op_list_and_strip_overload(selected_op_list, selected_op_list_path)

        gen_autograd(
            declarations_path or DECLARATIONS_PATH,
            autograd_gen_dir,
            autograd_dir,
            disable_autograd=disable_autograd,
            selected_op_list=selected_op_list,
        )
        gen_unboxing_wrappers(
            declarations_path or DECLARATIONS_PATH,
            jit_gen_dir,
            tools_jit_templates,
            disable_autograd=disable_autograd,
            selected_op_list=selected_op_list,
            force_schema_registration=force_schema_registration)

    if subset == "python" or not subset:
        gen_annotated(
            declarations_path or DECLARATIONS_PATH,
            python_install_dir,
            autograd_dir)
예제 #7
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  native_functions_path=None,
                  install_dir=None,
                  subset=None,
                  disable_autograd=False,
                  force_schema_registration=False,
                  operator_selector=None):
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
    from tools.autograd.gen_annotated_fn_args import gen_annotated
    from tools.jit.gen_unboxing_wrappers import gen_unboxing_wrappers
    from tools.codegen.selective_build.selector import SelectiveBuilder

    # Build ATen based Variable classes
    if install_dir is None:
        install_dir = 'torch/csrc'
        python_install_dir = 'torch/testing/_internal/generated'
    else:
        python_install_dir = install_dir
    autograd_gen_dir = os.path.join(install_dir, 'autograd', 'generated')
    jit_gen_dir = os.path.join(install_dir, 'jit', 'generated')
    for d in (autograd_gen_dir, jit_gen_dir, python_install_dir):
        if not os.path.exists(d):
            os.makedirs(d)
    runfiles_dir = os.environ.get("RUNFILES_DIR", None)
    data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else ''
    autograd_dir = os.path.join(data_dir, 'tools', 'autograd')
    tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates')

    if subset == "pybindings" or not subset:
        gen_autograd_python(declarations_path or DECLARATIONS_PATH,
                            native_functions_path or NATIVE_FUNCTIONS_PATH,
                            autograd_gen_dir, autograd_dir)

    if operator_selector is None:
        operator_selector = SelectiveBuilder.get_nop_selector()

    if subset == "libtorch" or not subset:

        gen_autograd(
            declarations_path or DECLARATIONS_PATH,
            native_functions_path or NATIVE_FUNCTIONS_PATH,
            autograd_gen_dir,
            autograd_dir,
            disable_autograd=disable_autograd,
            operator_selector=operator_selector,
        )
        gen_unboxing_wrappers(
            declarations_path or DECLARATIONS_PATH,
            jit_gen_dir,
            tools_jit_templates,
            disable_autograd=disable_autograd,
            operator_selector=operator_selector,
            force_schema_registration=force_schema_registration)

    if subset == "python" or not subset:
        gen_annotated(declarations_path or DECLARATIONS_PATH,
                      python_install_dir, autograd_dir)
예제 #8
0
def generate_code(
    gen_dir: pathlib.Path,
    native_functions_path: Optional[str] = None,
    tags_path: Optional[str] = None,
    install_dir: Optional[str] = None,
    subset: Optional[str] = None,
    disable_autograd: bool = False,
    force_schema_registration: bool = False,
    operator_selector: Any = None,
) -> None:
    from torchgen.selective_build.selector import SelectiveBuilder

    from tools.autograd.gen_annotated_fn_args import gen_annotated
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python

    # Build ATen based Variable classes
    if install_dir is None:
        install_dir = os.fspath(gen_dir / "torch/csrc")
        python_install_dir = os.fspath(gen_dir /
                                       "torch/testing/_internal/generated")
    else:
        python_install_dir = install_dir
    autograd_gen_dir = os.path.join(install_dir, "autograd", "generated")
    for d in (autograd_gen_dir, python_install_dir):
        os.makedirs(d, exist_ok=True)
    autograd_dir = os.fspath(pathlib.Path(__file__).parent.parent / "autograd")

    if subset == "pybindings" or not subset:
        gen_autograd_python(
            native_functions_path or NATIVE_FUNCTIONS_PATH,
            tags_path or TAGS_PATH,
            autograd_gen_dir,
            autograd_dir,
        )

    if operator_selector is None:
        operator_selector = SelectiveBuilder.get_nop_selector()

    if subset == "libtorch" or not subset:

        gen_autograd(
            native_functions_path or NATIVE_FUNCTIONS_PATH,
            tags_path or TAGS_PATH,
            autograd_gen_dir,
            autograd_dir,
            disable_autograd=disable_autograd,
            operator_selector=operator_selector,
        )

    if subset == "python" or not subset:
        gen_annotated(
            native_functions_path or NATIVE_FUNCTIONS_PATH,
            tags_path or TAGS_PATH,
            python_install_dir,
            autograd_dir,
        )
예제 #9
0
def generate_code(ninja_global=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.cwrap import cwrap
    from tools.cwrap.plugins.THPPlugin import THPPlugin
    from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
    from tools.cwrap.plugins.AutoGPU import AutoGPU
    from tools.cwrap.plugins.BoolOption import BoolOption
    from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
    from tools.cwrap.plugins.NullableArguments import NullableArguments

    from tools.cwrap.plugins.WrapDim import WrapDim
    from tools.cwrap.plugins.AssertNDim import AssertNDim

    from tools.cwrap.plugins.Broadcast import Broadcast
    from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch
    thp_plugin = THPPlugin()

    cwrap('torch/csrc/generic/TensorMethods.cwrap',
          plugins=[
              ProcessorSpecificPlugin(),
              BoolOption(), thp_plugin,
              AutoGPU(condition='IS_CUDA'),
              ArgcountSortPlugin(),
              KwargsPlugin(),
              AssertNDim(),
              WrapDim(),
              Broadcast()
          ])
    # Build ATen based Variable classes
    autograd_gen_dir = 'torch/csrc/autograd/generated'
    jit_gen_dir = 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.mkdir(d)
    gen_autograd('torch/lib/tmp_install/share/ATen/Declarations.yaml',
                 autograd_gen_dir)
    gen_jit_dispatch('torch/lib/tmp_install/share/ATen/Declarations.yaml',
                     jit_gen_dir)
예제 #10
0
def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None,
                  install_dir=None,
                  subset=None,
                  disable_autograd=False,
                  selected_op_list_path=None,
                  selected_op_list=None,
                  force_schema_registration=False):
    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
    from tools.jit.gen_unboxing_wrappers import gen_unboxing_wrappers

    # Build ATen based Variable classes
    install_dir = install_dir or 'torch/csrc'
    autograd_gen_dir = os.path.join(install_dir, 'autograd', 'generated')
    jit_gen_dir = os.path.join(install_dir, 'jit', 'generated')
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.makedirs(d)
    runfiles_dir = os.environ.get("RUNFILES_DIR", None)
    data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else ''
    autograd_dir = os.path.join(data_dir, 'tools', 'autograd')
    tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates')

    if subset == "pybindings" or not subset:
        gen_autograd_python(declarations_path or DECLARATIONS_PATH,
                            autograd_gen_dir, autograd_dir)

    if subset == "libtorch" or not subset:
        # TODO: add selected op mechanism in augotrad to save learning size
        gen_autograd(
            declarations_path or DECLARATIONS_PATH,
            autograd_gen_dir,
            autograd_dir,
            disable_autograd=disable_autograd,
        )
        gen_unboxing_wrappers(
            declarations_path or DECLARATIONS_PATH,
            jit_gen_dir,
            tools_jit_templates,
            disable_autograd=disable_autograd,
            selected_op_list_path=selected_op_list_path,
            selected_op_list=selected_op_list,
            force_schema_registration=force_schema_registration)
예제 #11
0
def generate_code(ninja_global=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.cwrap import cwrap
    from tools.cwrap.plugins.THPPlugin import THPPlugin
    from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
    from tools.cwrap.plugins.AutoGPU import AutoGPU
    from tools.cwrap.plugins.BoolOption import BoolOption
    from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
    from tools.cwrap.plugins.NullableArguments import NullableArguments

    from tools.cwrap.plugins.WrapDim import WrapDim
    from tools.cwrap.plugins.AssertNDim import AssertNDim

    from tools.cwrap.plugins.Broadcast import Broadcast
    from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch
    thp_plugin = THPPlugin()

    cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
        ProcessorSpecificPlugin(), BoolOption(), thp_plugin,
        AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(),
        AssertNDim(), WrapDim(), Broadcast()
    ])
    # Build ATen based Variable classes
    autograd_gen_dir = 'torch/csrc/autograd/generated'
    jit_gen_dir = 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.mkdir(d)
    gen_autograd(
        'torch/lib/tmp_install/share/ATen/Declarations.yaml',
        autograd_gen_dir)
    gen_jit_dispatch(
        'torch/lib/tmp_install/share/ATen/Declarations.yaml',
        jit_gen_dir)