コード例 #1
0
ファイル: gen.py プロジェクト: gtgalone/pytorch
def generate_outputs():
    cwrap_files = filter_by_extension(options.files, '.cwrap')
    nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
    native_files = filter_by_extension(options.files, 'native_functions.yaml')

    declarations = [d
                    for file in cwrap_files
                    for d in cwrap_parser.parse(file)]

    declarations += nn_parse.run(nn_files)
    declarations += native_parse.run(native_files)
    declarations = preprocess_declarations.run(declarations)
    for fname, env in generators.items():
        fm = file_manager
        if env['name'] == 'CUDA':
            fm = cuda_file_manager
        fm.write(fname, GENERATOR_DERIVED, env)

    # note: this will fill in top_env['type/tensor_method_declarations/definitions']
    # and modify the declarations to include any information that will all_backends
    # be used by function_wrapper.create_derived
    output_declarations = function_wrapper.create_generic(top_env, declarations)
    output_declarations = postprocess_output_declarations(output_declarations)
    file_manager.write("Declarations.yaml", format_yaml(output_declarations))

    # populated by generate_storage_type_and_tensor
    all_types = []

    for backend, density, scalar_type in iterate_types():
        all_types.append(generate_storage_type_and_tensor(
            backend, density, scalar_type, declarations))

    file_manager.write('Type.h', TYPE_H, top_env)
    file_manager.write('Type.cpp', TYPE_CPP, top_env)

    cuda_file_manager.write('RegisterCUDA.h', REGISTER_CUDA_H, top_env)
    cuda_file_manager.write('RegisterCUDA.cpp', REGISTER_CUDA_CPP, top_env)

    file_manager.write('Tensor.h', TENSOR_H, top_env)
    file_manager.write('TensorMethods.h', TENSOR_METHODS_H, top_env)
    file_manager.write('Functions.h', FUNCTIONS_H, top_env)

    file_manager.write('CPUCopy.cpp', copy_wrapper.create(all_types, 'CPU'))
    cuda_file_manager.write('CUDACopy.cpp', copy_wrapper.create(all_types, 'CUDA'))
    file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)

    file_manager.check_all_files_written()
    cuda_file_manager.check_all_files_written()
コード例 #2
0
ファイル: gen.py プロジェクト: bhuWenDongchao/pytorch
def generate_outputs():
    cwrap_files = [f for f in files if f.endswith('.cwrap')]
    nn_files = [f for f in files if f.endswith('nn.yaml') or f.endswith('.h')]
    native_files = [f for f in files if f.endswith('native_functions.yaml')]

    declarations = [d
                    for file in cwrap_files
                    for d in cwrap_parser.parse(file)]

    declarations += nn_parse.run(nn_files)
    declarations += native_parse.run(native_files)
    declarations = preprocess_declarations.run(declarations)
    for fname, env in generators.items():
        file_manager.write(fname, GENERATOR_DERIVED.substitute(env))

    # note: this will fill in top_env['type/tensor_method_declarations/definitions']
    # and modify the declarations to include any information that will all_backends
    # be used by function_wrapper.create_derived
    output_declarations = function_wrapper.create_generic(top_env, declarations)
    output_declarations = postprocess_output_declarations(output_declarations)
    file_manager.write("Declarations.yaml", format_yaml(output_declarations))

    # populated by generate_storage_type_and_tensor
    all_types = []

    for backend, density, scalar_type in iterate_types():
        all_types.append(generate_storage_type_and_tensor(
            backend, density, scalar_type, declarations))

    file_manager.write('Type.h', TYPE_H.substitute(top_env))
    file_manager.write('Type.cpp', TYPE_CPP.substitute(top_env))

    file_manager.write('Tensor.h', TENSOR_H.substitute(top_env))
    file_manager.write('TensorMethods.h', TENSOR_METHODS_H.substitute(top_env))
    file_manager.write('Functions.h', FUNCTIONS_H.substitute(top_env))

    file_manager.write('Copy.cpp', copy_wrapper.create(all_types))
    file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H.substitute(top_env))

    file_manager.check_all_files_written()
コード例 #3
0
ファイル: gen.py プロジェクト: Northrend/pytorch
declarations = [d
                for file in cwrap_files
                for d in cwrap_parser.parse(file)]
print(nn_files)
declarations += nn_parse.run(nn_files)
declarations += native_parse.run(native_files)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
    write(fname, GENERATOR_DERIVED.substitute(env))


# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
output_declarations = postprocess_output_declarations(output_declarations)
write("Declarations.yaml", format_yaml(output_declarations))

# populated by generate_storage_type_and_tensor
all_types = []

for backend in backends:
    for density in densities:
        for scalar_type in scalar_types:
            if density == 'Sparse' and scalar_type[0] == 'Half':
                # THS does not do half type yet.
                continue
            all_types.append(generate_storage_type_and_tensor(
                backend, density, scalar_type, declarations))
コード例 #4
0
ファイル: gen.py プロジェクト: SiddharthTiwari/pytorch
def generate_outputs():
    cwrap_files = filter_by_extension(options.files, '.cwrap')
    nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
    native_files = filter_by_extension(options.files, 'native_functions.yaml')

    declarations = [d
                    for file in cwrap_files
                    for d in cwrap_parser.parse(file)]

    declarations += nn_parse.run(nn_files)
    declarations += native_parse.run(native_files)
    declarations = preprocess_declarations.run(declarations)
    for fname, env in generators.items():
        fm = file_manager
        if env['name'] == 'CUDA':
            fm = cuda_file_manager
        fm.write(fname, GENERATOR_DERIVED, env)

    # note: this will fill in top_env['type/tensor_method_declarations/definitions']
    # and modify the declarations to include any information that will all_backends
    # be used by function_wrapper.create_derived
    output_declarations = function_wrapper.create_generic(top_env, declarations)
    output_declarations = postprocess_output_declarations(output_declarations)
    file_manager.write("Declarations.yaml", format_yaml(output_declarations))

    for backend, density in iterate_types():
        generate_storage_type_and_tensor(backend, density, declarations)
    for backend in extension_backends:
        generate_type_extension_backend(backend, declarations)

    for backend, density, scalar_type in legacy_iterate_types():
        if density == 'Dense':
            generate_legacy_th_dispatcher(backend, density, scalar_type, [])

    core_files = {
        'Type.h': TYPE_H,
        'Tensor.h': TENSOR_H,
        'TensorMethods.h': TENSOR_METHODS_H
    }

    for core_file, core_template_file in core_files.items():
        core_file_manager.write(core_file, core_template_file, top_env)

    file_manager.write('TypeExtendedInterface.h', TYPE_EXTENDED_INTERFACE_H, top_env)
    file_manager.write('TypeDefault.h', TYPE_DEFAULT_H, top_env)
    file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env)

    file_manager.write('LegacyTHDispatcher.h', LEGACY_TH_DISPATCHER_H, top_env)
    file_manager.write('LegacyTHDispatcher.cpp', LEGACY_TH_DISPATCHER_CPP, top_env)

    file_manager.write('RegisterCPU.h', REGISTER_CPU_H, top_env)
    file_manager.write('RegisterCPU.cpp', REGISTER_CPU_CPP, top_env)

    cuda_file_manager.write('RegisterCUDA.h', REGISTER_CUDA_H, top_env)
    cuda_file_manager.write('RegisterCUDA.cpp', REGISTER_CUDA_CPP, top_env)

    file_manager.write('Functions.h', FUNCTIONS_H, top_env)
    file_manager.write('LegacyTHFunctions.h', LEGACY_TH_FUNCTIONS_H, top_env)

    file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)

    file_manager.write('ExtensionBackendRegistration.h', EXTENSION_BACKEND_REGISTRATION_H, top_env)

    file_manager.check_all_files_written()
    cuda_file_manager.check_all_files_written()

    # check that generated files match source files
    core_source_path = os.path.join(options.source_path, 'core')
    match, mismatch, errors = cmpfiles_with_eol_normalization(core_install_dir, core_source_path, core_files.keys())
    if errors:
        raise RuntimeError("Error while trying to compare source and generated files for {}. "
                           "Source directory: {}.  Generated directory: {}."
                           .format(errors, core_source_path, core_install_dir))
    if mismatch:
        file_component = '{}'.format(','.join(mismatch))
        if len(mismatch) > 1:
            file_component = '{' + file_component + '}'
        update_cmd = "cp {}/{} {}".format(core_install_dir, file_component, core_source_path)
        raise RuntimeError("Source files: {} did not match generated files.  To update the source files, "
                           "set environment variable GEN_TO_SOURCE or run \"{}\"".format(mismatch, update_cmd))
コード例 #5
0
def generate_outputs():
    cwrap_files = filter_by_extension(options.files, '.cwrap')
    nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
    native_files = filter_by_extension(options.files, 'native_functions.yaml')

    declarations = [
        d for file in cwrap_files for d in cwrap_parser.parse(file)
    ]

    declarations += nn_parse.run(nn_files)
    declarations += native_parse.run(native_files)
    declarations = preprocess_declarations.run(declarations)
    for fname, env in generators.items():
        fm = file_manager
        if env['name'] == 'CUDA':
            fm = cuda_file_manager
        fm.write(fname, GENERATOR_DERIVED, env)

    # note: this will fill in top_env['type/tensor_method_declarations/definitions']
    # and modify the declarations to include any information that will all_backends
    # be used by function_wrapper.create_derived
    output_declarations = function_wrapper.create_generic(
        top_env, declarations)
    output_declarations = postprocess_output_declarations(output_declarations)
    file_manager.write("Declarations.yaml", format_yaml(output_declarations))

    for backend, density in iterate_types():
        generate_storage_type_and_tensor(backend, density, declarations)
    for backend in extension_backends:
        generate_type_extension_backend(backend, declarations)

    for backend, density, scalar_type in legacy_iterate_types():
        if density == 'Dense':
            generate_legacy_th_dispatcher(backend, density, scalar_type, [])

    core_files = {
        'Type.h': TYPE_H,
        'Tensor.h': TENSOR_H,
        'TensorMethods.h': TENSOR_METHODS_H
    }

    for core_file, core_template_file in core_files.items():
        core_file_manager.write(core_file, core_template_file, top_env)

    file_manager.write('TypeExtendedInterface.h', TYPE_EXTENDED_INTERFACE_H,
                       top_env)
    file_manager.write('TypeDefault.h', TYPE_DEFAULT_H, top_env)
    file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env)

    file_manager.write('LegacyTHDispatcher.h', LEGACY_TH_DISPATCHER_H, top_env)
    file_manager.write('LegacyTHDispatcher.cpp', LEGACY_TH_DISPATCHER_CPP,
                       top_env)

    file_manager.write('RegisterCPU.h', REGISTER_CPU_H, top_env)
    file_manager.write('RegisterCPU.cpp', REGISTER_CPU_CPP, top_env)

    cuda_file_manager.write('RegisterCUDA.h', REGISTER_CUDA_H, top_env)
    cuda_file_manager.write('RegisterCUDA.cpp', REGISTER_CUDA_CPP, top_env)

    file_manager.write('Functions.h', FUNCTIONS_H, top_env)
    file_manager.write('LegacyTHFunctions.h', LEGACY_TH_FUNCTIONS_H, top_env)

    file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)

    file_manager.write('ExtensionBackendRegistration.h',
                       EXTENSION_BACKEND_REGISTRATION_H, top_env)

    file_manager.check_all_files_written()
    cuda_file_manager.check_all_files_written()

    # check that generated files match source files
    core_source_path = os.path.join(options.source_path, 'core')
    match, mismatch, errors = cmpfiles_with_eol_normalization(
        core_install_dir, core_source_path, core_files.keys())
    if errors:
        raise RuntimeError(
            "Error while trying to compare source and generated files for {}. "
            "Source directory: {}.  Generated directory: {}.".format(
                errors, core_source_path, core_install_dir))
    if mismatch:
        file_component = '{}'.format(','.join(mismatch))
        if len(mismatch) > 1:
            file_component = '{' + file_component + '}'
        update_cmd = "cp {}/{} {}".format(core_install_dir, file_component,
                                          core_source_path)
        raise RuntimeError(
            "Source files: {} did not match generated files.  To update the source files, "
            "set environment variable GEN_TO_SOURCE or run \"{}\"".format(
                mismatch, update_cmd))
コード例 #6
0
ファイル: gen.py プロジェクト: uPorter/PyTorch
def generate_outputs():
    cwrap_files = filter_by_extension(options.files, '.cwrap')
    nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
    native_files = filter_by_extension(options.files, 'native_functions.yaml')

    declarations = [d
                    for file in cwrap_files
                    for d in cwrap_parser.parse(file)]

    declarations += nn_parse.run(nn_files)
    declarations += native_parse.run(native_files)
    declarations = preprocess_declarations.run(declarations)

    # note: this will fill in top_env['type/tensor_method_declarations/definitions']
    # and modify the declarations to include any information that will all_backends
    # be used by function_wrapper.create_derived
    output_declarations = function_wrapper.create_generic(top_env, declarations)
    output_declarations = postprocess_output_declarations(output_declarations)
    file_manager.write("Declarations.yaml", format_yaml(output_declarations))

    # Filter out named-tensor only declarations.
    # They are necessary in create_generic because that generates Type.h, TensorBody.h,
    # and TensorMethods.h, all of which are checked in to the codebase and therefore
    # need to be consistent whether or not BUILD_NAMEDTENSOR is on/off.
    if not BUILD_NAMEDTENSOR:
        declarations = [decl for decl in declarations
                        if not is_namedtensor_only_decl(decl)]

    for backend, density in iterate_types():
        generate_storage_type_and_tensor(backend, density, declarations)

    core_files = {
        'TensorBody.h': TENSOR_H,
        'TensorMethods.h': TENSOR_METHODS_H,
        'OpsAlreadyMovedToC10.cpp': OPS_ALREADY_MOVED_TO_C10_CPP,
    }

    for core_file, core_template_file in core_files.items():
        core_file_manager.write(core_file, core_template_file, top_env)

    file_manager.write('TypeDefault.h', TYPE_DEFAULT_H, top_env)
    file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env)
    file_manager.write('RegistrationDeclarations.h', REGISTRATION_DECLARATIONS_H, top_env)

    file_manager.write('Functions.h', FUNCTIONS_H, top_env)

    file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)

    file_manager.check_all_files_written()
    cuda_file_manager.check_all_files_written()

    # check that generated files match source files
    core_source_path = os.path.join(options.source_path, 'core')
    match, mismatch, errors = cmpfiles_with_eol_normalization(core_install_dir, core_source_path, core_files.keys())
    if errors:
        raise RuntimeError("Error while trying to compare source and generated files for {}. "
                           "Source directory: {}.  Generated directory: {}."
                           .format(errors, core_source_path, core_install_dir))
    if mismatch:
        file_component = '{}'.format(','.join(mismatch))
        if len(mismatch) > 1:
            file_component = '{' + file_component + '}'
        update_cmd = "cp {}/{} {}".format(core_install_dir, file_component, core_source_path)
        raise RuntimeError("Source files: {} did not match generated files.  To update the source files, "
                           "set environment variable GEN_TO_SOURCE or run \"{}\"".format(mismatch, update_cmd))
コード例 #7
0
ファイル: gen.py プロジェクト: marcc-hpc/pytorch-1
    f for f in files
    if f.endswith('native_functions.yaml') or f.endswith('cuDNN.yaml')
]

declarations = [d for file in cwrap_files for d in cwrap_parser.parse(file)]
print(nn_files)
declarations += nn_parse.run(nn_files)
declarations += native_parse.run(native_files)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
    write(fname, GENERATOR_DERIVED.substitute(env))

# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
output_declarations = postprocess_output_declarations(output_declarations)
write("Declarations.yaml", format_yaml(output_declarations))

# populated by generate_storage_type_and_tensor
all_types = []

for backend in backends:
    for density in densities:
        for scalar_type in scalar_types:
            if density == 'Sparse' and scalar_type[0] == 'Half':
                # THS does not do half type yet.
                continue
            all_types.append(
                generate_storage_type_and_tensor(backend, density, scalar_type,
                                                 declarations))