Ejemplo n.º 1
0
 def get_argument(argument, option):
     if requires_checked_cast(argument):
         return CHECKED_USE.get(argument['type'],
                                '{}_').format(argument['name'])
     elif argument['type'] == 'bool' and 'if_true' in argument:
         if bool_option_is_string(argument):
             tpl = '({}) ? "{}" : "{}"'
         else:
             tpl = '({}) ? {} : {}'
         return tpl.format(argument['name'], argument['if_true'],
                           argument['if_false'])
     elif argument['type'] == "CONSTANT":
         if bool_option_is_string(
                 argument):  # this is a bool that is actually a string...
             return '"{}"'.format(argument['name'])
         v = str(argument['name'])
         for pattern, replacement in CONSTANT_REPLACEMENTS:
             v = re.sub(pattern, replacement, v)
         return CodeTemplate(v).substitute(backend_type_env)
     # e.g. argument 0, i.e. repeat the 0th argument in this position...
     elif argument['type'] == 'argument':
         index = int(argument['name'])
         return get_argument(option['arguments'][index], option)
     else:
         return argument['name']
Ejemplo n.º 2
0
 def get_argument(argument, option):
     if replace_with_null(argument):
         return 'NULL'
     elif requires_checked_cast(argument):
         checked_use = CHECKED_USE.get(argument['type'],
                                       '{}_').format(argument['name'])
         if nullable_argument(argument):
             checked_use = CHECKED_USE_NULLABLE.substitute(
                 env={}, arg_name=argument['name'], usage=checked_use)
         return checked_use
     elif argument['type'] == 'bool' and 'if_true' in argument:
         if bool_option_is_string(argument):
             tpl = '({}) ? "{}" : "{}"'
         else:
             tpl = '({}) ? {} : {}'
         return tpl.format(argument['name'], argument['if_true'],
                           argument['if_false'])
     elif argument['type'] == 'CONSTANT':
         # this is a bool that is actually a string...
         if bool_option_is_string(argument):
             return '"{}"'.format(argument['name'])
         v = str(argument.get('default', argument['name']))
         for pattern, replacement in CONSTANT_REPLACEMENTS:
             v = re.sub(pattern, replacement, v)
         return CodeTemplate(v).substitute(backend_type_env)
     # e.g. argument 0, i.e. repeat the 0th argument in this position...
     elif argument['type'] == 'argument':
         index = int(argument['name'])
         return get_argument(option['arguments'][index], option)
     else:
         return argument['name']
Ejemplo n.º 3
0
 def allocate_arg(env, arg, output_count):
     name = arg['name']
     allocation = CodeTemplate(ALLOC_WRAP[arg['type']]).substitute(env)
     if arg.get('mask', False):
         allocation = 'output_mask[{}] ? {} : nullptr'.format(output_count, allocation)
     return [
         'auto {}_ = {};'.format(name, allocation),
         'auto {} = Tensor({}_,false);'.format(name, name),
     ]
Ejemplo n.º 4
0
 def allocate_arg(env, arg, output_count):
     name = arg['name']
     allocation = CodeTemplate(ALLOC_WRAP[arg['type']]).substitute(env)
     tensor_arg = '{}_'.format(name)
     if arg.get('mask', False):
         allocation = 'output_mask[{}] ? {} : nullptr'.format(output_count, allocation)
         tensor_arg = ('{}_ == nullptr ? (TensorImpl*)UndefinedTensor::singleton() : (TensorImpl*){}_'
                       .format(name, name))
     return [
         'auto {}_ = {};'.format(name, allocation),
         'auto {} = Tensor({}, false);'.format(name, tensor_arg),
     ]
Ejemplo n.º 5
0
def notebook_generator(conn, dataset, comp):
    # initialization
    nb = nbf.v4.new_notebook()
    instance = CodeTemplate(conn, dataset, comp)

    nb['cells'].append(nbf.v4.new_code_cell(instance.dependency()))
    nb['cells'].append(nbf.v4.new_code_cell(instance.modifiers()))
    ctx, conn_docs = instance.connection()
    nb['cells'].append(nbf.v4.new_markdown_cell(conn_docs))
    nb['cells'].append(nbf.v4.new_code_cell(ctx))
    nb['cells'].append(nbf.v4.new_code_cell(instance.dataset()))
    for docs, analysis, plot in instance.analysis():
        nb['cells'].append(nbf.v4.new_markdown_cell(docs))
        nb['cells'].append(nbf.v4.new_code_cell(analysis))
        nb['cells'].append(nbf.v4.new_code_cell(plot))

    fname = 'analysis_notebook.ipynb'
    with open(fname, 'w') as f:
        nbf.write(nb, f)
Ejemplo n.º 6
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception(
                "trying to write files {} which are not ".format(self.undeclared_files) +
                "in the list of outputs this script produces. " +
                "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
    TEMPLATE_PATH + "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")

TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")

TENSOR_DERIVED_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/TensorDense.cpp")
Ejemplo n.º 7
0
import re
import yaml
from code_template import CodeTemplate

# temporary things we cannot handle
EXCLUDE_PATTERN = "bernoulli.*|normal.*|exponential.*|random.*|arange.*"
# what has to be done to add a Operation ...
# 1. add virtual dispatch declaration to Type.h and default impl to Type.cpp
TYPE_METHOD_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix}${api_name}(${formals}) ;
""")
TYPE_METHOD_DEFINITION = CodeTemplate("""\
${return_type} Type::${method_prefix}${api_name}(${formals}) {
    throw std::runtime_error(std::string("${api_name} is not implemented for type ") + toString());
}
""")
# 2. add virtual override to TypeDerived.h
TYPE_DERIVED_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix}${api_name}(${formals}) override;
""")
# 3. add override definition to TypeDerived.cpp
TYPE_DERIVED_DEFINITION = CodeTemplate("""\
${return_type} ${Type}::${method_prefix}${api_name}(${formals}) {
    ${type_definition_body}
}
""")
# 4. add non-virtual declaration to Tensor.h
TENSOR_METHOD_DECLARATION = CodeTemplate("""\
${return_type} ${api_name}(${method_formals})${const_mark};
""")
# 5. add non-virtual declaration to Tensor.cpp
Ejemplo n.º 8
0
Archivo: gen.py Proyecto: zyiyy/pytorch
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception(
                "trying to write files {} which are not ".format(self.undeclared_files) +
                "in the list of outputs this script produces. " +
                "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
    TEMPLATE_PATH + "/GeneratorDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_EXTENDED_INTERFACE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtendedInterface.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
TYPE_EXTENSION_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.h")
TYPE_EXTENSION_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.cpp")
TYPE_EXTENSION_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtensionDerived.h")
TYPE_EXTENSION_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtensionDerived.cpp")

LEGACY_TH_DISPATCHER_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.h")
LEGACY_TH_DISPATCHER_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.cpp")
LEGACY_TH_DISPATCHER_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.cpp")
Ejemplo n.º 9
0
from code_template import CodeTemplate

FILE = CodeTemplate("""\
#include "TH/TH.h"
#ifdef AT_CUDA_ENABLED
#undef THNN_
#include "THC/THC.h"
#endif
#include "ATen/Utils.h"
${copy_includes}

namespace at {

${copy_functions}

}
""")

CASE = CodeTemplate("""\
case ${src_id}:
    ${THTensor}_copy${cuda}${src_scalar_name}(${state,}dst_->tensor,static_cast<${src_tensor}*>(src.pImpl)->tensor);
    break;
""")

FUNCTION = CodeTemplate("""\
void ${Type}::s_copy(const Tensor & src, Tensor & dst) const {
  // code generated by function_wrapper
  auto dst_ = checked_cast_tensor<${Tensor}>(dst.pImpl,"dst",0,false);
  (void) dst_; //silence unused warning
  switch(src.type().ID()) {
    ${copy_body}
Ejemplo n.º 10
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception("trying to write files {} which are not ".format(
                self.undeclared_files) +
                            "in the list of outputs this script produces. " +
                            "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                                 "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
OPS_ALREADY_MOVED_TO_C10_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/OpsAlreadyMovedToC10.cpp")
BACKEND_SELECT_REGISTER_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/BackendSelectRegister.cpp")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorBody.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")

FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")

LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH +
import re
from code_template import CodeTemplate

import sys
if sys.version_info[0] == 3:
    string_type = str
else:
    string_type = basestring

# temporary things we cannot handle
EXCLUDE_PATTERN = "bernoulli.*|normal.*|exponential.*|random.*|arange.*"
# what has to be done to add a Operation ...
# 1. if broadcasting or without the full list of arguments, add a non-virtual
#    declaration under Type.h
TYPE_METHOD_DECLARATION_NON_VIRTUAL = CodeTemplate("""\
${return_type} ${method_prefix}${api_name}(${formals}) ;
""")
# 2. broadcasting functions are implemented in Type.cpp
TYPE_METHOD_DEFINITION_BROADCAST = CodeTemplate("""\
${return_type} Type::${method_prefix}${api_name}(${formals}) {
    Tensor ${broadcast_returns};
    std::tie(${broadcast_returns}) = ${broadcast_function}(${broadcast_actuals});
    return ${method_prefix_derived}${api_name}(${broadcast_modified_actuals});
}
""")
# 3. functions without the full list of arguments are implemented in TypeMethods.h
TYPE_METHOD_INLINE = CodeTemplate("""\
inline ${return_type} Type::${method_prefix}${api_name}(${formals}) {
    ${return_call}${method_prefix}${api_name}(${actuals_with_constants});
}
""")
Ejemplo n.º 12
0
from code_template import CodeTemplate

FILE = CodeTemplate("""\
#include "TH/TH.h"
#ifdef AT_CUDA_ENABLED
#undef THNN_
#include "THC/THC.h"
#endif
#include "ATen/Utils.h"
${copy_includes}

namespace at {

${copy_functions}

}
""")

CASE = CodeTemplate("""\
case ${src_id}:
    ${THTensor}_copy${cuda}${src_scalar_name}(${state,}dst_->tensor,static_cast<${src_tensor}*>(src.pImpl)->tensor);
    break;
""")

FUNCTION = CodeTemplate("""\
void ${Type}::copy(const Tensor & src, Tensor & dst) {
  // code generated by function_wrapper
  auto dst_ = checked_cast<${Tensor}>(dst.pImpl,"dst",0);
  switch(src.type().ID()) {
    ${copy_body}
    default:
Ejemplo n.º 13
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception(
                "trying to write files {} which are not ".format(self.undeclared_files) +
                "in the list of outputs this script produces. " +
                "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
TYPE_EXTENSION_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.h")
TYPE_EXTENSION_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.cpp")
REGISTRATION_DECLARATIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegistrationDeclarations.h")

TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")

FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")

LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHFunctions.h")
LEGACY_TH_FUNCTIONS_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHFunctions.cpp")
Ejemplo n.º 14
0
import re
from code_template import CodeTemplate

# temporary things we cannot handle
EXCLUDE_PATTERN = "bernoulli.*|normal.*|exponential.*|random.*|arange.*"
# what has to be done to add a Operation ...
# 1. add virtual dispatch declaration to Type.h and default impl to Type.cpp
TYPE_METHOD_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix}${api_name}(${formals}) ;
""")
TYPE_METHOD_DEFINITION = CodeTemplate("""\
${return_type} Type::${method_prefix}${api_name}(${formals}) {
    throw std::runtime_error(std::string("${api_name} is not implemented for type ") + toString());
}
""")
# 2. add virtual override to TypeDerived.h
TYPE_DERIVED_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix}${api_name}(${formals}) override;
""")
# 3. add override definition to TypeDerived.cpp
TYPE_DERIVED_DEFINITION = CodeTemplate("""\
${return_type} ${Type}::${method_prefix}${api_name}(${formals}) {
    ${type_definition_body}
}
""")
# 4. add non-virtual declaration to Tensor.h
TENSOR_METHOD_DECLARATION = CodeTemplate("""\
${return_type} ${api_name}(${method_formals})${const_mark};
""")
# 5. add non-virtual declaration to Tensor.cpp
TENSOR_METHOD_DEFINITION = CodeTemplate("""\
Ejemplo n.º 15
0
parser.add_argument("--output_prefix", default="", help="")
parser.add_argument(
    "--install_dir", default=".", help="where to put generated file")
parser.add_argument("--aten_root", default="", help="root directory of aten")
args, _ = parser.parse_known_args()

if args.aten_root:
    if not os.path.exists(args.aten_root):
        raise ValueError('aten_root ({}) does not exist'.format(
            args.aten_root))
    sys.path.append(os.path.join(args.aten_root, 'src', 'ATen'))
    from code_template import CodeTemplate as CT
else:
    from src.ATen.code_template import CodeTemplate as CT

OP_TEMPLATE = CT.from_file(
    os.path.join(args.template_dir, 'aten_op_template.h'))


try:
    # use faster C loader if available
    from yaml import CLoader as Loader
except ImportError:
    from yaml import Loader


def write(filename, s):
    with open(filename, "w") as f:
        f.write(s)


def read(filename):
Ejemplo n.º 16
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################

import sys
import yaml
from copy import deepcopy
project_root = sys.argv[1]
sys.path.append(project_root + "/third_party/aten/src/ATen")
from code_template import CodeTemplate as CT
OP_TEMPLATE = CT.from_file(project_root +
                           '/caffe2/contrib/aten/aten_op_template.h')

try:
    # use faster C loader if available
    from yaml import CLoader as Loader
except ImportError:
    from yaml import Loader


def write(filename, s):
    with open(filename, "w") as f:
        f.write(s)


def read(filename):
    with open(filename, "r") as f:
Ejemplo n.º 17
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception(
                "trying to write files {} which are not ".format(self.undeclared_files) +
                "in the list of outputs this script produces. " +
                "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
    TEMPLATE_PATH + "/GeneratorDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_EXTENDED_INTERFACE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtendedInterface.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
TYPE_EXTENSION_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.h")
TYPE_EXTENSION_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.cpp")

LEGACY_TH_DISPATCHER_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.h")
LEGACY_TH_DISPATCHER_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.cpp")
LEGACY_TH_DISPATCHER_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.cpp")
LEGACY_TH_DISPATCHER_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.h")
Ejemplo n.º 18
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception("trying to write files {} which are not ".format(
                self.undeclared_files) +
                            "in the list of outputs this script produces. " +
                            "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(TEMPLATE_PATH +
                                           "/GeneratorDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                                 "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_EXTENDED_INTERFACE_H = CodeTemplate.from_file(TEMPLATE_PATH +
                                                   "/TypeExtendedInterface.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")

REGISTER_CPU_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.h")
REGISTER_CPU_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.cpp")

REGISTER_CUDA_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.h")
REGISTER_CUDA_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.cpp")
    def emit_body(env, option):
        body = []
        body += handle_sparse(env, option)
        body += handle_zero_dim(env, option)
        # arguments are potentially duplicated because of one argument
        # referencing another
        seen_names = set()
        count = 0
        is_cuda = 'CUDA' in backend_type_env['Backend']

        # scalar_check is the heuristic conditions when a result may be a scalar_check
        # if there is a THSize* argument, then its dimensions are used to determine scalar.
        # otherwise, it is true if all the input tensors are scalars,
        scalar_check_is_from_size = False
        scalar_check = None
        for arg in option['arguments']:
            if is_real_argument_to_wrapper(arg):
                count += 1
            if arg['type'] == 'THSize*':
                scalar_check_is_from_size = True
                scalar_check = '{}.size() == 0'.format(arg['name'])

            wrap_dim_arg = arg.get('wrap_dim', None)
            if wrap_dim_arg is not None:
                # wrap_dim specification can have (add) expressions, e.g. self+1
                wrap_dim_params = wrap_dim_arg.split("+")
                wrap_dim_params[0] = wrap_dim_params[0] + "_"
                wrap_dim_target = wrap_dim_params[0]
                wrap_dim_params[0] = "{}->dim()".format(wrap_dim_target)
                wrap_dim_expr = "+".join(wrap_dim_params)
                body.append("{} = maybe_wrap_dim({}, {});".format(
                    arg['name'], arg['name'], wrap_dim_expr))

            # only generated checked casts the first time we see it
            if not arg['name'] in seen_names and requires_checked_cast(arg):
                seen_names.add(arg['name'])

                # make a new allocation of TensorImpl, then wrap a Tensor around it.
                if arg.get('allocate', False):
                    allocation = CodeTemplate(
                        ALLOC_WRAP[arg['type']]).substitute(env)
                    body.append('auto {}_ = {};'.format(
                        arg['name'], allocation))
                    body.append('auto {} = Tensor({}_,false);'.format(
                        arg['name'], arg['name']))
                # extract the TensorImpl from an existing tensor (or Storage, etc.)
                else:
                    # special case where we allow undefined Tensors, and thus
                    # the checked cast succeeds even if the Tensor is not
                    # defined
                    null_okay = 'true' if nullable_argument(arg) else 'false'

                    check_cast = CHECKED_CAST[arg['type']].substitute(
                        env,
                        arg_name=arg['name'],
                        arg_pos=count,
                        null_okay=null_okay)
                    body.append("auto {}_ = {};".format(
                        arg['name'], check_cast))
                if drop_argument(arg, option) or replace_with_null(arg):
                    body.append("(void) {}_; //silence unused warning".format(
                        arg['name']))
                # resize tensors for special ops that require it
                if 'resize' in arg:
                    resize = arg['resize']
                    if isinstance(resize, str):
                        body.append("{}.resize_({}.sizes());".format(
                            arg['name'], resize))
                    else:
                        dims = [
                            '{}.size({})'.format(name, dim)
                            for name, dim in resize
                        ]
                        body.append("{}.resize_({{ {} }});".format(
                            arg['name'], ','.join(dims)))
                # also special handling where we zero some outputs.
                if arg.get('cpu_zero', False) and not is_cuda:
                    body.append("{}.zero_();".format(arg['name']))

                # isScalar() for all input tensors is and'd to form
                # the test for whether the output is also a scalar
                if (not arg.get('output') and 'Tensor' in arg['type']
                        and 'TensorList' not in arg['type']
                        and 'THS' not in arg['type']
                        and not scalar_check_is_from_size):
                    check = '{}->isScalar()'.format(arg['name'] + '_')
                    scalar_check = (check if scalar_check is None else
                                    scalar_check + ' && ' + check)

        option['derived_actuals'] = get_arguments(option)
        is_nn = option['mode'] == 'NN'
        if is_cuda or is_nn:
            option['derived_actuals'] = ['context->thc_state'
                                         ] + option['derived_actuals']

        if is_nn:
            prefix = 'THNN_{}'.format(env['THType'])
        elif option.get('sparse', False):
            if is_cuda:
                prefix = 'THCS' + env['ScalarName'] + "Tensor_"
            else:
                prefix = env['THTensor'].replace('TH', 'THS') + '_'
        else:
            prefix = env['THTensor'] + '_'

        call = prefix + \
            CodeTemplate("${cname}(${derived_actuals})").substitute(env)
        ret = option['return']

        if ret['kind'] == 'arguments':
            if 'aten_custom_call' in option:
                # all aten_custom_call bodies handle settings on their own.
                scalar_check = None
                body.append(
                    CodeTemplate(option['aten_custom_call']).substitute(env))
            else:
                body.append(call + ";")
            arguments_indices = ret['arguments']
            arguments = [
                option['arguments'][argi] for argi in arguments_indices
            ]
            if scalar_check is not None:
                if len(arguments) > 1:
                    body.append("bool maybe_scalar = {};".format(scalar_check))
                    scalar_check = 'maybe_scalar'
                for arg in arguments:
                    body.append("{}_->maybeScalar({});".format(
                        arg['name'], scalar_check))
            if len(arguments_indices) == 1:
                arg = arguments[0]
                body.append("return {};".format(arg['name']))
            else:
                types = [
                    to_return_type(arg, option)['type'] for arg in arguments
                ]
                # TODO: check for move semantics...
                names = [arg['name'] for arg in arguments]
                body.append(
                    CodeTemplate(
                        "return std::tuple<${types}>(${names});").substitute(
                            types=types, names=names))
        elif ret['kind'] == 'type':
            if ret['type'] == 'THTensor*':
                maybe_scalar = "->maybeScalar({})".format(scalar_check) \
                               if scalar_check is not None \
                               else ""
                return_tensor = "return Tensor((new ${Tensor}(context,${arg_name}))${maybe_scalar},false);"
                body.append(
                    CodeTemplate(return_tensor).substitute(
                        env, arg_name=call, maybe_scalar=maybe_scalar))
            else:
                # we using int64_t for long in the API, so correct it here...
                if is_actual_return_long(ret):
                    call = "static_cast<int64_t>({})".format(call)
                body.append("return {};".format(call))
        else:
            raise Exception("NYI - return handling")
        return body
Ejemplo n.º 20
0
# limitations under the License.
##############################################################################

import sys
import yaml
project_root = sys.argv[1]
sys.path.append(project_root + "/third_party/aten/src/ATen")
from code_template import CodeTemplate as CT

try:
    # use faster C loader if available
    from yaml import CLoader as Loader
except ImportError:
    from yaml import Loader

OP_TEMPLATE = CT.from_file(project_root+'/caffe2/contrib/aten/aten_op_template.h')


def write(filename, s):
    with open(filename, "w") as f:
        f.write(s)


def read(filename):
    with open(filename, "r") as f:
        return f.read()


decls = yaml.load(read('aten/src/ATen/ATen/Declarations.yaml'), Loader=Loader)

top_env = {
Ejemplo n.º 21
0
from code_template import CodeTemplate
from function_wrapper import nested_dict

FILE = CodeTemplate("""\
#include "ATen/Config.h"

#include "TH/TH.h"
${cuda_includes}
#include "ATen/Utils.h"
${copy_includes}

namespace at {

${copy_functions}

}
""")

CUDA_INCLUDES = """\
#undef THNN_
#include "THC/THC.h"
"""

# NB: The copy templates static_cast both dst and src, even though
# technically we also perform a checked_cast_tensor in the prologue
# of the copy (meaning that hypothetically, an already casted tensor
# is available.  However, in s_copy, the casted tensor is dst, while
# in _s_copy_from, the casted tensor is src.  So we can reuse the logic
# in both cases, we unconditionally cast both tensors (and rely
# on the surrounding code to establish the necessary invariants.)
Ejemplo n.º 22
0
# Rather than teaching the dispatcher how to extract dispatch keys from types besides Tensor, we
# register an extra kernel for each factory op, under the `BackendSelect` dispatch key. This key
# has higher precedence than dispatch keys for actual backends, so a BackendSelect kernel will
# front-run other kernels registered for the same op.
#
# It's the responsibility of the BackendSelect factory kernels to extract the "real" dispatch
# key from non-Tensor arguments, and redispatch using this key. Here, we generate implementations
# that obtain the key from the TensorOptions argument that's passed to all Tensor factory ops.
#
# BackendSelectRegister.cpp will contain both the BackendSelect kernels and registrations for
# all factory functions that have 'backend_select' flag in its native_functions.yaml definition.

from code_template import CodeTemplate
from function_wrapper import gen_dispatch_key_init

GENERATED_COMMENT = CodeTemplate("@" + "generated from ${filename}")

FUNCTION_REGISTRATION = CodeTemplate("""\
  m.impl_UNBOXED("aten::${op_name_with_overload_name}", ${function_name});
""")

FUNCTION_DEFINITION = CodeTemplate("""\
// ${schema_string}
Tensor ${function_name}(${method_formals}) {
  static OperatorHandle OP = c10::Dispatcher::singleton().findSchemaOrThrow("aten::${name}", "${overload_name}");
  ${dispatch_key_init}
  return OP.callWithDispatchKey<${formals_types}>(_dk, ${actuals});
}
""")

Ejemplo n.º 23
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception("trying to write files {} which are not ".format(
                self.undeclared_files) +
                            "in the list of outputs this script produces. " +
                            "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                                 "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
REGISTRATION_DECLARATIONS_H = CodeTemplate.from_file(
    TEMPLATE_PATH + "/RegistrationDeclarations.h")

TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorBody.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")

FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")

LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH +
                                               "/LegacyTHFunctions.h")
Ejemplo n.º 24
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception("trying to write files {} which are not ".format(
                self.undeclared_files) +
                            "in the list of outputs this script produces. " +
                            "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                                 "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
OPS_ALREADY_MOVED_TO_C10_CPP = CodeTemplate.from_file(
    TEMPLATE_PATH + "/OpsAlreadyMovedToC10.cpp")

TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorBody.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")

FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")

LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH +
                                               "/LegacyTHFunctions.h")
Ejemplo n.º 25
0
from code_template import CodeTemplate
from function_wrapper import nested_dict

FILE = CodeTemplate("""\
// ${generated_comment}

#include "ATen/Config.h"

#include "TH/TH.h"
${cuda_includes}
#include "ATen/Utils.h"
${copy_includes}

namespace at {

${copy_functions}

}
""")

CUDA_INCLUDES = """\
#undef THNN_
#include "THC/THC.h"
"""

# NB: The copy templates static_cast both dst and src, even though
# technically we also perform a checked_cast_tensor in the prologue
# of the copy (meaning that hypothetically, an already casted tensor
# is available.  However, in s_copy, the casted tensor is dst, while
# in _s_copy_from, the casted tensor is src.  So we can reuse the logic
# in both cases, we unconditionally cast both tensors (and rely
# This script generates BackendSelectRegister.cpp which is being used for dispatching purposes.
# We process only those factory functions that have 'backend_select' flag in its native_functions.yaml definition.

from code_template import CodeTemplate

GENERATED_COMMENT = CodeTemplate("@" + "generated from ${filename}")

FUNCTION_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
  .schema("${schema_string}")
  .impl_unboxedOnlyKernel<decltype(${function_name}), &${function_name}>(DispatchKey::BackendSelect)
  .aliasAnalysis(AliasAnalysisKind::FROM_SCHEMA))
""")

FUNCTION_DEFINITION = CodeTemplate("""\
Tensor ${function_name}(${method_formals}) {
  DispatchKey key = options.computeDispatchKey();
  static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::${name}", "${overload_name}");
  return op.callUnboxedWithDispatchKey<${formals_types}>(key, ${type_method_actuals});
}
""")


def needs_backend_select(declaration_option):
    # We register an op under the BackendSelect dispatch key
    # if a TensorOptions argument has been gathered from its declared args
    # We skip all the 'new_*' and '*_like' ops as they are special cased and avoid dispatching.
    # See TypeDefault.cpp
    if declaration_option['name'].endswith(
            '_like') or declaration_option['name'].startswith('new_'):
        return False
Ejemplo n.º 27
0
                  '--source-path',
                  help='path to source director for tensorlib',
                  action='store',
                  default='.')
parser.add_option('-o',
                  '--output-dependencies',
                  help='only output a list of dependencies',
                  action='store')
parser.add_option('-n', '--no-cuda', action='store_true')

options, files = parser.parse_args()
if options.output_dependencies is not None:
    output_dependencies_file = open(options.output_dependencies, 'w')

TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(TEMPLATE_PATH +
                                           "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                             "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")

TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")

TENSOR_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                            "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDense.cpp")

TENSOR_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDerived.h")
Ejemplo n.º 28
0
parser.add_argument("--install_dir",
                    default=".",
                    help="where to put generated file")
parser.add_argument("--aten_root", default="", help="root directory of aten")
args, _ = parser.parse_known_args()

if args.aten_root:
    if not os.path.exists(args.aten_root):
        raise ValueError('aten_root ({}) does not exist'.format(
            args.aten_root))
    sys.path.append(os.path.join(args.aten_root, 'src', 'ATen'))
    from code_template import CodeTemplate as CT
else:
    from src.ATen.code_template import CodeTemplate as CT

OP_TEMPLATE = CT.from_file(
    os.path.join(args.template_dir, 'aten_op_template.h'))

try:
    # use faster C loader if available
    from yaml import CLoader as Loader
except ImportError:
    from yaml import Loader


def write(filename, s):
    with open(filename, "w") as f:
        f.write(s)


def read(filename):
    with open(filename, "r") as f:
Ejemplo n.º 29
0
    def emit_body(env, option):
        body = []
        # arguments are potentially duplicated because of one argument
        # referencing another
        seen_names = set()
        # only generated checked casts the first time we see it
        count = 0
        for arg in option['arguments']:
            if is_real_argument_to_wrapper(arg):
                count += 1
            if not arg['name'] in seen_names and requires_checked_cast(arg):
                seen_names.add(arg['name'])
                if arg.get('allocate', False):
                    allocation = CodeTemplate(
                        ALLOC_WRAP[arg['type']]).substitute(env)
                    body.append('auto {}_ = {};'.format(
                        arg['name'], allocation))
                    body.append('auto {} = Tensor({}_,false);'.format(
                        arg['name'], arg['name']))
                else:
                    check_cast = CHECKED_CAST[arg['type']].substitute(
                        env, arg_name=arg['name'], arg_pos=count)
                    body.append("auto {}_ = {};".format(
                        arg['name'], check_cast))
                if 'resize' in arg:
                    resize = arg['resize']
                    if type(resize) == str:
                        body.append("{}.resize_as_({});".format(
                            arg['name'], resize))
                    else:
                        dims = ['{}.size({})'.format(name, dim)
                                for name, dim in resize]
                        body.append("{}.resize_({{ {} }});".format(
                            arg['name'], ','.join(dims)))
                if arg.get('cpu_zero', False):
                    body.append("{}.zero_();".format(arg['name']))

        option['actuals'] = get_arguments(option)
        is_cuda = backend_type_env['Backend'] == 'CUDA'
        is_nn = option['mode'] == 'NN'
        if is_cuda or is_nn:
            option['actuals'] = ['context->thc_state'] + option['actuals']

        if is_nn:
            prefix = 'THNN_{}'.format(env['THType'])
        else:
            prefix = env['THTensor'] + '_'

        call = prefix + CodeTemplate("${cname}(${actuals})").substitute(env)
        ret = option['return']
        if ret['kind'] == 'arguments':
            body.append(call + ";")
            arguments_indices = ret['arguments']
            if len(arguments_indices) == 1:
                arg = option['arguments'][arguments_indices[0]]
                body.append("return {};".format(arg['name']))
            else:
                arguments = [option['arguments'][argi]
                             for argi in arguments_indices]
                types = [to_return_type(arg, option) for arg in arguments]
                # TODO: check for move semantics...
                names = [arg['name'] for arg in arguments]
                body.append(CodeTemplate("return std::tuple<${types}>(${names});").substitute(
                    types=types, names=names))
        elif ret['kind'] == 'type':
            if ret['type'] == 'THTensor*':
                body.append(CodeTemplate(
                    "return Tensor(new ${Tensor}(context,${arg_name}),false);").substitute(env, arg_name=call))
            else:
                # we using int64_t for long in the API, so correct it here...
                if is_actual_return_long(ret):
                    call = "static_cast<int64_t>({})".format(call)
                body.append("return {};".format(call))
        else:
            raise Exception("NYI - return handling")
        return body
Ejemplo n.º 30
0
# Rather than teaching the dispatcher how to extract dispatch keys from types besides Tensor, we
# register an extra kernel for each factory op, under the `BackendSelect` dispatch key. This key
# has higher precedence than dispatch keys for actual backends, so a BackendSelect kernel will
# front-run other kernels registered for the same op.
#
# It's the responsibility of the BackendSelect factory kernels to extract the "real" dispatch
# key from non-Tensor arguments, and redispatch using this key. Here, we generate implementations
# that obtain the key from the TensorOptions argument that's passed to all Tensor factory ops.
#
# BackendSelectRegister.cpp will contain both the BackendSelect kernels and registrations for
# all factory functions that have 'backend_select' flag in its native_functions.yaml definition.

from code_template import CodeTemplate
from function_wrapper import gen_dispatch_key_init

GENERATED_COMMENT = CodeTemplate("@" + "generated from ${filename}")

FUNCTION_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
  .schema("${schema_string}")
  .impl_unboxedOnlyKernel<decltype(${function_name}), &${function_name}>(DispatchKey::BackendSelect)
  .aliasAnalysis(AliasAnalysisKind::FROM_SCHEMA))
""")

FUNCTION_DEFINITION = CodeTemplate("""\
// ${schema_string}
Tensor ${function_name}(${method_formals}) {
  static OperatorHandle OP = c10::Dispatcher::singleton().findSchemaOrThrow("aten::${name}", "${overload_name}");
  ${dispatch_key_init}
  globalLegacyTypeDispatch().initForDispatchKey(_dk);
  return OP.callUnboxedWithDispatchKey<${formals_types}>(_dk, ${type_method_actuals});
Ejemplo n.º 31
0
from collections import OrderedDict
from code_template import CodeTemplate

import sys
if sys.version_info[0] == 3:
    string_type = str
else:
    string_type = basestring

# temporary things we cannot handle
EXCLUDE_PATTERN = "bernoulli.*"
# what has to be done to add a Operation ...
# 1. if broadcasting or without the full list of arguments, add a non-virtual
#    declaration under Type.h
TYPE_METHOD_DECLARATION_NON_VIRTUAL = CodeTemplate("""\
${return_type} ${api_name}(${formals_with_defaults}) const;
""")
# 2. broadcasting functions are implemented in Type.cpp
TYPE_METHOD_DEFINITION_BROADCAST = CodeTemplate("""\
${return_type} Type::${api_name}(${formals}) const {
    Tensor ${broadcast_returns};
    std::tie(${broadcast_returns}) = ${broadcast_function}(${broadcast_actuals}, "${api_name}");
    return ${method_prefix_derived}${api_name}(${broadcast_modified_actuals});
}
""")
# 3. add virtual dispatch declaration to Type.h and impl to Type.cpp (this is usually
#    a default impl because actual implementations are in the derived Types); method_prefix_derived
#    is present for providing a base-class definition for a derived-type method with a prefix.
TYPE_METHOD_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix_derived}${api_name}(${formals_with_defaults}) const;
""")
Ejemplo n.º 32
0
# Rather than teaching the dispatcher how to extract dispatch keys from types besides Tensor, we
# register an extra kernel for each factory op, under the `BackendSelect` dispatch key. This key
# has higher precedence than dispatch keys for actual backends, so a BackendSelect kernel will
# front-run other kernels registered for the same op.
#
# It's the responsibility of the BackendSelect factory kernels to extract the "real" dispatch
# key from non-Tensor arguments, and redispatch using this key. Here, we generate implementations
# that obtain the key from the TensorOptions argument that's passed to all Tensor factory ops.
#
# BackendSelectRegister.cpp will contain both the BackendSelect kernels and registrations for
# all factory functions that have 'backend_select' flag in its native_functions.yaml definition.

from code_template import CodeTemplate
from function_wrapper import gen_dispatch_key_init

GENERATED_COMMENT = CodeTemplate("@" + "generated from ${filename}")

UNBOXEDONLY_FUNCTION_REGISTRATION = CodeTemplate("""\
  m.impl_UNBOXED("aten::${op_name_with_overload_name}", ${function_name});
""")

FUNCTION_REGISTRATION = CodeTemplate("""\
  m.impl("aten::${op_name_with_overload_name}", c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(${function_name})));
""")

FUNCTION_DEFINITION = CodeTemplate("""\
// ${schema_string}
Tensor ${function_name}(${method_formals}) {
  static auto op = c10::Dispatcher::singleton()
    .findSchemaOrThrow("aten::${name}", "${overload_name}")
    .typed<${function_cpp_signature}>();
Ejemplo n.º 33
0
    def emit_body(env, option):
        body = []
        body += handle_sparse(env, option)
        body += handle_zero_dim(env, option)
        only_zero_dim_check = handle_only_zero_dim(env, option)
        if only_zero_dim_check is not None:
            #  code below only_zero_dim_check is unreachable so we do not need to generate the rest.
            body += only_zero_dim_check
            return body

        body += handle_buffers(env, option)
        # arguments are potentially duplicated because of one argument
        # referencing another
        seen_names = set()
        seen_tensorlists = set()
        count = 0
        output_count = 0

        # scalar_check is the heuristic conditions when a result may be a scalar_check
        # if there is a THSize* argument, then its dimensions are used to determine scalar.
        # otherwise, it is true if all the input tensors are scalars,
        scalar_check_is_from_size = False
        scalar_check_is_from_option = False
        scalar_check = None
        scalar_check_opt = option.get('scalar_check')
        if scalar_check_opt is not None:
            if isinstance(scalar_check_opt, bool):
                scalar_check = str(scalar_check_opt).lower()
            else:
                scalar_check = scalar_check_opt
            scalar_check_is_from_option = True

        for arg in option['arguments']:
            if is_real_argument_to_wrapper(arg):
                count += 1
            if arg['type'] == 'THSize*' and not scalar_check_is_from_option:
                scalar_check_is_from_size = True
                scalar_check = '{}.size() == 0'.format(arg['name'])
            if arg['type'] == 'TensorList':
                seen_tensorlists.add(arg['name'])

            wrap_dim_target = arg.get('wrap_dim', None)
            if wrap_dim_target is not None:
                # for Tensors, "name_" is the TensorImpl, but for TensorLists, it is an
                # std::vector of TH*s.  Since TH*s have different dimension rules, we used
                # "name" instead, but keep "name_" for tensor to avoid an extra function call.
                if wrap_dim_target not in seen_tensorlists:
                    wrap_dim_target = wrap_dim_target + "_"
                body.append("{} = maybe_wrap_dim({}, {});".format(
                    arg['name'], arg['name'], wrap_dim_target))

            # only generated checked casts the first time we see it
            if arg['name'] not in seen_names and requires_checked_cast(arg):
                seen_names.add(arg['name'])

                # make a new allocation of TensorImpl, then wrap a Tensor around it.
                if arg.get('allocate', False):
                    body += allocate_arg(env, arg, output_count)
                    output_count += 1
                # extract the TensorImpl from an existing tensor (or Storage, etc.)
                else:
                    # special case where we allow undefined Tensors, and thus
                    # the checked cast succeeds even if the Tensor is not
                    # defined
                    null_okay = 'true' if nullable_argument(arg) else 'false'
                    default_init = []
                    if 'default_init' in arg:
                        default_init.append(arg['default_init'])

                    noelem_to_empty = 'is_noelem_tensor_size(size)' if 'size' in seen_names else 'false'
                    check_cast = CHECKED_CAST[arg['type']].substitute(
                        env,
                        arg_name=arg['name'],
                        arg_pos=count,
                        null_okay=null_okay,
                        default_init=default_init,
                        size=arg.get('size'),
                        noelem_to_empty=noelem_to_empty)
                    body.append("auto {}_ = {};".format(
                        arg['name'], check_cast))
                if drop_argument(arg, option) or replace_with_null(arg):
                    body.append("(void) {}_; //silence unused warning".format(
                        arg['name']))

                initializers = []

                # resize tensors for special ops that require it
                if 'resize' in arg:
                    initializers.append(resize_arg(arg))

                # also special handling where we zero some outputs.
                if arg.get('zero', False) or (arg.get('cpu_zero', False)
                                              and not is_cuda):
                    initializers.append("{}.zero_();".format(arg['name']))

                # only initialize non-null arguments
                if nullable_argument(arg) and len(initializers) > 0:
                    body.append(
                        CONDITIONAL_INITIALIZER.substitute({
                            'name':
                            arg['name'],
                            'initializer':
                            initializers
                        }))
                else:
                    body += initializers

                # for out-of-place: isScalar() for all input tensors is and'd to form
                # the test for whether the output is also a scalar
                # for in-place: isScalar() shouldn't change as a result of the operation
                if (not arg.get('output') and 'Tensor' in arg['type']
                        and 'TensorList' not in arg['type']
                        and 'THS' not in arg['type']
                        and not scalar_check_is_from_size
                        and not scalar_check_is_from_option
                        and not option['inplace']):
                    check = '{}->isScalar()'.format(arg['name'] + '_')
                    if nullable_argument(arg):
                        check = '(!{} || {})'.format(arg['name'] + '_', check)
                    scalar_check = (check if scalar_check is None else
                                    scalar_check + ' && ' + check)

        # cimpls, if it exists, contains the underlying C function names and
        # arguments. Otherwise use option
        cimpls = option.get('cimpls', [option])
        calls = [handle_call(env, option, cimpl) for cimpl in cimpls]

        ret = option['return']

        if ret['kind'] == 'arguments':
            if 'aten_custom_call' in option:
                # all aten_custom_call bodies handle settings on their own.
                scalar_check = None
                body.append(
                    CodeTemplate(option['aten_custom_call']).substitute(env))
            else:
                body.extend([call + ';' for call in calls])
            arguments_indices = ret['arguments']
            arguments = [
                option['arguments'][argi] for argi in arguments_indices
            ]
            if scalar_check is not None:
                if len(arguments) > 1:
                    body.append("bool maybe_scalar = {};".format(scalar_check))
                    scalar_check = 'maybe_scalar'
                for arg in arguments:
                    stmt = "{}_->maybeScalar({});".format(
                        arg['name'], scalar_check)
                    if nullable_argument(arg):
                        stmt = "if ({}_) {}".format(arg['name'], stmt)
                    body.append(stmt)
            if len(arguments_indices) == 1:
                arg = arguments[0]
                body.append("return {};".format(arg['name']))
            else:
                types = [
                    to_return_type(arg, option)['type'] for arg in arguments
                ]
                # TODO: check for move semantics...
                names = [arg['name'] for arg in arguments]
                body.append(
                    CodeTemplate(
                        "return std::tuple<${types}>(${names});").substitute(
                            types=types, names=names))
        elif ret['kind'] == 'type':
            assert len(calls) == 1
            call = calls[0]
            if ret['type'] == 'THTensor*':
                maybe_scalar = "->maybeScalar({})".format(scalar_check) \
                               if scalar_check is not None \
                               else ""
                return_tensor = "return Tensor((new ${Tensor}(context,${arg_name}))${maybe_scalar},false);"
                body.append(
                    CodeTemplate(return_tensor).substitute(
                        env, arg_name=call, maybe_scalar=maybe_scalar))
            # return the same underlying Tensor type for both real and accreal; this ensures
            # e.g. x.sum(0) and x.sum() return the same type.
            elif ret['type'] == 'accreal' or ret['type'] == 'real':
                body.append('return scalarTensor({});'.format(call))
            else:
                # we using int64_t for long in the API, so correct it here...
                if is_actual_return_long(ret):
                    call = "static_cast<int64_t>({})".format(call)
                body.append("return {};".format(call))
        else:
            raise Exception("NYI - return handling")
        return body
Ejemplo n.º 34
0
        else:
            self.filenames.remove(filename)

    def check_all_files_written(self):
        if len(self.undeclared_files) > 0:
            raise Exception("trying to write files {} which are not ".format(
                self.undeclared_files) +
                            "in the list of outputs this script produces. " +
                            "use will_write to add them.")
        if len(self.filenames) > 0:
            raise Exception("Outputs declared with 'will_write' were " +
                            "never written: {}".format(self.filenames))


TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(TEMPLATE_PATH +
                                           "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                             "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")

TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                                 "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")

TENSOR_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH +
                                            "/TensorDerived.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDense.cpp")