示例#1
0
文件: ops.py 项目: zzm422/DALI
def _docstring_generator(cls):
    schema = b.GetSchema(cls.__name__)
    ret = schema.Dox()
    ret += '\n'
    ret += """
Parameters
----------
"""
    for arg in schema.GetArgumentNames():
        dtype = schema.GetArgumentType(arg)
        arg_name_doc = "`" + arg + "` : "
        ret += (
            arg_name_doc +
            _type_name_convert_to_string(dtype, schema.IsTensorArgument(arg)))
        if schema.IsArgumentOptional(arg):
            default_value_string = schema.GetArgumentDefaultValueString(arg)
            # Evaluating empty string results in an error
            # so we need to prevent that
            if default_value_string:
                default_value = eval(default_value_string)
            else:
                default_value = default_value_string
            if dtype == DALIDataType.STRING:
                default_value = "\'" + str(default_value) + "\'"
            ret += (", optional, default = " +
                    str(_type_convert_value(dtype, default_value)))
        indent = '\n' + " " * len(arg_name_doc)
        ret += indent
        ret += schema.GetArgumentDox(arg).replace("\n", indent)
        ret += '\n'
    return ret
示例#2
0
    def __init__(self,
                 source=None,
                 num_outputs=None,
                 *,
                 cycle=None,
                 layout=None,
                 name=None,
                 device="cpu",
                 cuda_stream=None,
                 use_copy_kernel=None,
                 **kwargs):
        self._schema = _b.GetSchema("_ExternalSource")
        self._spec = _b.OpSpec("_ExternalSource")
        self._device = device
        self._layout = layout
        self._cuda_stream = cuda_stream
        self._use_copy_kernel = use_copy_kernel

        callback = _get_callback_from_source(source, cycle)

        if name is not None and num_outputs is not None:
            raise ValueError(
                "`num_outputs` is not compatible with named `ExternalSource`")

        self._name = name
        self._num_outputs = num_outputs
        self._callback = callback

        self._spec.AddArg("device", device)
        for key, value in kwargs.items():
            self._spec.AddArg(key, value)
示例#3
0
    def __init__(
            self, source=None, num_outputs=None, *, cycle=None, layout=None, name=None,
            device="cpu", cuda_stream=None, use_copy_kernel=None, batch=None, parallel=None,
            no_copy=None, prefetch_queue_depth=None, **kwargs):
        self._schema = _b.GetSchema("_ExternalSource")
        self._spec = _b.OpSpec("_ExternalSource")
        self._device = device
        self._layout = layout
        self._cuda_stream = cuda_stream
        self._use_copy_kernel = use_copy_kernel

        import nvidia.dali.ops
        kwargs, self._call_args = nvidia.dali.ops._separate_kwargs(kwargs)

        callback = _get_callback_from_source(source, cycle)

        if name is not None and num_outputs is not None:
            raise ValueError("`num_outputs` is not compatible with named `ExternalSource`")

        self._name = name
        self._num_outputs = num_outputs
        self._batch = batch
        self._callback = callback
        self._parallel = parallel
        self._no_copy = no_copy
        self._prefetch_queue_depth = prefetch_queue_depth

        self._spec.AddArg("device", device)
        for key, value in kwargs.items():
            self._spec.AddArg(key, value)
示例#4
0
def _docstring_prefix_from_inputs(op_name):
    """
        Generate start of the docstring for `__call__` of Operator `op_name`
        assuming the docstrings were provided for all inputs separatelly

        Returns the signature of `__call__` and list of `Args` in appropriate section
    """
    schema = _b.GetSchema(op_name)
    # Signature
    ret = "__call__(" + schema.GetCallSignatureInputs() + ", **kwargs)\n"
    # __call__ docstring
    ret += "\nOperator call to be used in graph definition.\n"
    # Args section
    ret += """
Args
----
"""
    for i in range(schema.MaxNumInput()):
        optional = i >= schema.MinNumInput()
        input_type_str = schema.GetInputType(i) + _supported_layouts_str(
            schema.GetSupportedLayouts(i))
        ret += _numpydoc_formatter(schema.GetInputName(i), input_type_str,
                                   schema.GetInputDox(i), optional)
        ret += "\n"
    ret += "\n"
    return ret
示例#5
0
def _docstring_generator_call(op_name):
    """
        Generate full docstring for `__call__` of Operator `op_name`.
    """
    schema = _b.GetSchema(op_name)
    if schema.IsDocPartiallyHidden():
        return ""
    if schema.HasCallDox():
        ret = schema.GetCallDox()
    elif schema.HasInputDox():
        ret =_docstring_prefix_from_inputs(op_name)
    elif schema.CanUseAutoInputDox():
        ret = _docstring_prefix_auto(op_name)
    else:
        op_full_name, _, _ = _process_op_name(op_name)
        ret = "See :meth:`nvidia.dali.ops." + op_full_name + "` class for complete information.\n"
    if schema.AppendKwargsSection():
        # Kwargs section
        tensor_kwargs = _get_kwargs(schema)
        if tensor_kwargs:
            ret += """
Keyword Args
------------
"""
            ret += tensor_kwargs
    return ret
示例#6
0
def _docstring_prefix_auto(op_name):
    """
        Generate start of the docstring for `__call__` of Operator `op_name`
        with default values. Assumes there will be 0 or 1 inputs
    """
    schema = _b.GetSchema(op_name)
    if schema.MaxNumInput() == 0:
        return """__call__(**kwargs)

Operator call to be used in graph definition. This operator doesn't have any inputs.
"""
    elif schema.MaxNumInput() == 1:
        ret = """__call__(data, **kwargs)

Operator call to be used in graph definition.

Args
----
"""
        dox = "Input to the operator.\n"
        fmt = "TensorList" + _supported_layouts_str(
            schema.GetSupportedLayouts(0))
        ret += _numpydoc_formatter("data", fmt, dox, optional=False)
        return ret
    return ""
示例#7
0
        def __init__(self, **kwargs):
            self._spec = b.OpSpec(type(self).__name__)
            self._schema = b.GetSchema(type(self).__name__)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
                del kwargs["device"]
            else:
                self._device = op_device
            self._spec.AddArg("device", self._device)

            if "preserve" in kwargs.keys():
                self._preserve = kwargs["preserve"]
            else:
                self._preserve = False
            self._spec.AddArg("preserve", self._preserve)
            self._preserve = self._preserve or self._schema.IsNoPrune()

            # Store the specified arguments
            for key, value in kwargs.items():
                if isinstance(value, list):
                    if not value:
                        raise RuntimeError(
                            "List arguments need to have at least 1 element.")
                dtype = self._schema.GetArgumentType(key)
                converted_value = _type_convert_value(dtype, value)
                self._spec.AddArg(key, converted_value)
示例#8
0
def main(argv):
    cpu_ops = ops.cpu_ops()
    gpu_ops = ops.gpu_ops()
    mix_ops = ops.mixed_ops()
    support_ops = ops.support_ops()
    all_ops = cpu_ops.union(gpu_ops).union(mix_ops).union(support_ops)
    link_string = '_'
    op_name_max_len = len(max(all_ops, key=len)) + len(link_string)
    name_bar = op_name_max_len * '='
    formater = '{:{c}<{op_name_max_len}}  {:{c}^6}  {:{c}^6}  {:{c}^6}  {:{c}^7} {:{c}^9}\n'
    doc_table = ''
    doc_table += 'Below table lists all available operators and devices they can operate on.\n\n'
    doc_table += '.. |v| image:: images/tick.gif\n'
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    doc_table += formater.format('Operator name', 'CPU', 'GPU', 'Mixed', 'Support', 'Sequences', op_name_max_len = op_name_max_len, c=' ')
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    for op in sorted(all_ops, key=lambda v: str(v).lower()):
        schema = b.GetSchema(op)
        is_cpu = '|v|' if op in cpu_ops else ''
        is_gpu = '|v|' if op in gpu_ops else ''
        is_mixed = '|v|' if op in mix_ops else ''
        is_support = '|v|' if op in support_ops else ''
        supports_seq = '|v|' if schema.AllowsSequences() or schema.IsSequenceOperator() else ''
        op_string = op + link_string
        op_doc = formater.format(op_string, is_cpu, is_gpu, is_mixed, is_support, supports_seq, op_name_max_len = op_name_max_len, c=' ')
        doc_table += op_doc
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    with open(argv[0], 'w') as f:
        f.write(doc_table)
示例#9
0
文件: ops.py 项目: davidtranno1/DALI
def _docstring_generator(cls):
    op_name = cls.__name__
    op_dev = []
    if op_name in _cpu_ops:
        op_dev.append("'CPU'")
    if op_name in _gpu_ops:
        op_dev.append("'GPU'")
    if op_name in _mixed_ops:
        op_dev.append("'mixed'")
    if op_name in _support_ops:
        op_dev.append("'support'")
    pre_doc = "This is a " + ", ".join(op_dev) + " operator\n\n"

    schema = b.GetSchema(op_name)
    # insert tag to easily link to the operator
    ret = '.. _' + op_name + ':\n\n'
    ret += pre_doc
    ret += schema.Dox()
    ret += '\n'
    if schema.IsSequenceOperator():
        ret += "\nThis operator expects sequence inputs\n"
    elif schema.AllowsSequences():
        ret += "\nThis operator allows sequence inputs\n"

    if schema.IsDeprecated():
        use_instead = schema.DeprecatedInFavorOf()
        ret += "\n.. warning::\n\n   This operator is now deprecated"
        if use_instead:
            ret +=". Use `" + use_instead + "` instead"
        ret += "\n"

    if schema.IsNoPrune():
        ret += "\nThis operator will **not** be optimized out of the graph.\n"

    ret += """
Parameters
----------
"""
    for arg in schema.GetArgumentNames():
        dtype = schema.GetArgumentType(arg)
        arg_name_doc = "`" + arg + "` : "
        ret += (arg_name_doc +
                _type_name_convert_to_string(dtype, schema.IsTensorArgument(arg)))
        if schema.IsArgumentOptional(arg):
            default_value_string = schema.GetArgumentDefaultValueString(arg)
            # Evaluating empty string results in an error
            # so we need to prevent that
            if default_value_string:
                default_value = eval(default_value_string)
            else:
                default_value = default_value_string
            if dtype == DALIDataType.STRING:
                default_value = "\'" + str(default_value) + "\'"
            ret += (", optional, default = " +
                    str(_type_convert_value(dtype, default_value)))
        indent = '\n' + " " * len(arg_name_doc)
        ret += indent
        ret += schema.GetArgumentDox(arg).replace("\n", indent)
        ret += '\n'
    return ret
示例#10
0
        def __init__(self, **kwargs):
            self._spec = b.OpSpec(type(self).__name__)
            self._schema = b.GetSchema(type(self).__name__)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
                del kwargs["device"]
            else:
                self._device = op_device
            self._spec.AddArg("device", self._device)

            if "preserve" in kwargs.keys():
                self._preserve = kwargs["preserve"]
            else:
                self._preserve = False
            self._spec.AddArg("preserve", self._preserve)
            self._preserve = self._preserve or self._schema.IsNoPrune()

            # Store the specified arguments
            for key, value in kwargs.items():
                if value is None:
                    # None is not a valid value for any argument type, so treat it
                    # as if the argument was not supplied at all
                    continue

                dtype = self._schema.GetArgumentType(key)
                if isinstance(value, (list, tuple)):
                    if len(value) == 0:
                        self._spec.AddArgEmptyList(key,
                                                   _vector_element_type(dtype))
                        continue
                converted_value = _type_convert_value(dtype, value)
                self._spec.AddArg(key, converted_value)
示例#11
0
def _docstring_generator(cls):
    """
        Generate docstring for the class obtaining it from schema based on cls.__name__

        This lists all the Keyword args that can be used when creating operator
    """
    op_name = cls.__name__
    schema = _b.GetSchema(op_name)
    ret = '\n'

    if schema.IsDeprecated():
        use_instead = schema.DeprecatedInFavorOf()
        ret += ".. warning::\n\n   This operator is now deprecated"
        if use_instead:
            ret += ". Use `" + use_instead + "` instead."
        ret += "\n\n"

    ret += schema.Dox()
    ret += '\n'

    supported_statements = []
    if schema.IsSequenceOperator():
        supported_statements.append("expects sequence inputs")
    elif schema.AllowsSequences():
        supported_statements.append("allows sequence inputs")

    if schema.SupportsVolumetric():
        supported_statements.append("supports volumetric data")

    if len(supported_statements) > 0:
        ret += "\nThis operator "
        ret += supported_statements[0]
        if len(supported_statements) > 1:
            ret += " and " + supported_statements[1]
        ret += ".\n"

    if schema.IsNoPrune():
        ret += "\nThis operator will **not** be optimized out of the graph.\n"

    op_dev = []
    if op_name in _cpu_ops:
        op_dev.append("'cpu'")
    if op_name in _gpu_ops:
        op_dev.append("'gpu'")
    if op_name in _mixed_ops:
        op_dev.append("'mixed'")
    ret += """
Supported backends
"""
    for dev in op_dev:
        ret += " * " + dev + "\n"
    ret += "\n"

    ret += """
Keyword args
------------
"""
    ret += _get_kwargs(schema)
    return ret
示例#12
0
        def __init__(self, **kwargs):
            schema_name = _schema_name(type(self))
            self._spec = _b.OpSpec(schema_name)
            self._schema = _b.GetSchema(schema_name)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
                del kwargs["device"]
            else:
                self._device = op_device
            self._spec.AddArg("device", self._device)

            if "preserve" in kwargs.keys():
                self._preserve = kwargs["preserve"]
            else:
                self._preserve = False
            self._spec.AddArg("preserve", self._preserve)
            self._preserve = self._preserve or self._schema.IsNoPrune()

            # Check for any deprecated arguments that should be replaced or removed
            arg_names = list(kwargs.keys())
            for arg_name in arg_names:
                if not self._schema.IsDeprecatedArg(arg_name):
                    continue
                meta = self._schema.DeprecatedArgMeta(arg_name)
                new_name = meta['renamed_to']
                removed = meta['removed']
                msg = meta['msg']
                if new_name:
                    if new_name in kwargs:
                        raise TypeError(
                            "Operator {} got an unexpected '{}' deprecated argument when '{}' was already provided"
                            .format(type(self).__name__, arg_name, new_name))
                    kwargs[new_name] = kwargs[arg_name]
                    del kwargs[arg_name]
                elif removed:
                    del kwargs[arg_name]

                with warnings.catch_warnings():
                    warnings.simplefilter("default")
                    warnings.warn(msg, DeprecationWarning, stacklevel=2)

            # Store the specified arguments
            for key, value in kwargs.items():
                if value is None:
                    # None is not a valid value for any argument type, so treat it
                    # as if the argument was not supplied at all
                    continue

                dtype = self._schema.GetArgumentType(key)
                if isinstance(value, (list, tuple)):
                    if len(value) == 0:
                        self._spec.AddArgEmptyList(key,
                                                   _vector_element_type(dtype))
                        continue
                converted_value = _type_convert_value(dtype, value)
                self._spec.AddArg(key, converted_value)
示例#13
0
文件: ops.py 项目: xuezu29/DALI
    def __init__(self, function, **kwargs):
        self._schema = b.GetSchema("PythonFunctionImpl")
        self._spec = b.OpSpec("PythonFunctionImpl")
        self._device = "cpu"

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self.function = function
示例#14
0
文件: ops.py 项目: xeransis/DALI
def _docstring_generator(cls):
    __cpu_ops = set(b.RegisteredCPUOps())
    __cpu_ops.add("TFRecordReader")
    __gpu_ops = set(b.RegisteredGPUOps())
    __mix_ops = set(b.RegisteredMixedOps())
    __support_ops = set(b.RegisteredSupportOps())
    op_name = cls.__name__
    op_dev = []
    if op_name in __cpu_ops:
        op_dev.append("'CPU'")
    if op_name in __gpu_ops:
        op_dev.append("'GPU'")
    if op_name in __mix_ops:
        op_dev.append("'mixed'")
    if op_name in __support_ops:
        op_dev.append("'support'")
    pre_doc = "This is a " + ", ".join(op_dev) + " operator\n\n"

    schema = b.GetSchema(op_name)
    # insert tag to easily link to the operator
    ret = '.. _' + op_name + ':\n\n'
    ret += pre_doc
    ret += schema.Dox()
    ret += '\n'
    if schema.IsSequenceOperator():
        ret += "\nThis operator expects sequence inputs\n"
    elif schema.AllowsSequences():
        ret += "\nThis operator allows sequence inputs\n"
    ret += """
Parameters
----------
"""
    for arg in schema.GetArgumentNames():
        dtype = schema.GetArgumentType(arg)
        arg_name_doc = "`" + arg + "` : "
        ret += (
            arg_name_doc +
            _type_name_convert_to_string(dtype, schema.IsTensorArgument(arg)))
        if schema.IsArgumentOptional(arg):
            default_value_string = schema.GetArgumentDefaultValueString(arg)
            # Evaluating empty string results in an error
            # so we need to prevent that
            if default_value_string:
                default_value = eval(default_value_string)
            else:
                default_value = default_value_string
            if dtype == DALIDataType.STRING:
                default_value = "\'" + str(default_value) + "\'"
            ret += (", optional, default = " +
                    str(_type_convert_value(dtype, default_value)))
        indent = '\n' + " " * len(arg_name_doc)
        ret += indent
        ret += schema.GetArgumentDox(arg).replace("\n", indent)
        ret += '\n'
    return ret
示例#15
0
    def __init__(self, function, num_outputs=1, **kwargs):
        self._schema = b.GetSchema("PythonFunctionImpl")
        self._spec = b.OpSpec("PythonFunctionImpl")
        self._device = "cpu"

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self.function = function
        self.num_outputs = num_outputs
        self._preserve = True
示例#16
0
文件: ops.py 项目: zhangqiang880/DALI
        def __init__(self, **kwargs):
            schema_name = _schema_name(type(self))
            self._spec = _b.OpSpec(schema_name)
            self._schema = _b.GetSchema(schema_name)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
                del kwargs["device"]
            else:
                self._device = op_device
            self._spec.AddArg("device", self._device)

            kwargs, self._call_args = _separate_kwargs(kwargs)

            for k in self._call_args.keys():
                _check_arg_input(self._schema, type(self).__name__, k)

            if "preserve" in kwargs.keys():
                self._preserve = kwargs["preserve"]
                # we don't want to set "preserve" arg twice
                del kwargs["preserve"]
            else:
                self._preserve = False
            self._spec.AddArg("preserve", self._preserve)
            self._preserve = self._preserve or self._schema.IsNoPrune()

            # Check for any deprecated arguments that should be replaced or removed
            arg_names = list(kwargs.keys())
            for arg_name in arg_names:
                if not self._schema.IsDeprecatedArg(arg_name):
                    continue
                meta = self._schema.DeprecatedArgMeta(arg_name)
                new_name = meta['renamed_to']
                removed = meta['removed']
                msg = meta['msg']
                if new_name:
                    if new_name in kwargs:
                        raise TypeError(
                            "Operator {} got an unexpected '{}' deprecated argument when '{}' was already provided"
                            .format(type(self).__name__, arg_name, new_name))
                    kwargs[new_name] = kwargs[arg_name]
                    del kwargs[arg_name]
                elif removed:
                    del kwargs[arg_name]

                with warnings.catch_warnings():
                    warnings.simplefilter("default")
                    warnings.warn(msg, DeprecationWarning, stacklevel=2)

            # Store the specified arguments
            _add_spec_args(self._schema, self._spec, kwargs)
示例#17
0
def _docstring_generator(cls):
    op_name = _schema_name(cls)
    schema = _b.GetSchema(op_name)
    ret = _docstring_generator_main(cls, "ops")
    if schema.IsDocPartiallyHidden():
        return ret
    ret += """
Keyword args
------------
"""
    ret += _get_kwargs(schema)
    return ret
示例#18
0
文件: ops.py 项目: wh-forker/DALI
    def __init__(self, impl_name, function, num_outputs=1, device='cpu', **kwargs):
        self._schema = _b.GetSchema(impl_name)
        self._spec = _b.OpSpec(impl_name)
        self._device = device
        self._impl_name = impl_name

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self.function = function
        self.num_outputs = num_outputs
        self._preserve = True
示例#19
0
def _docstring_prefix_from_inputs(op_name):
    """
        Generate start of the docstring for `__call__` of Operator `op_name`
        assuming the docstrings were provided for all inputs separatelly

        Returns the signature of `__call__` and list of `Args` in appropriate section
    """
    schema = _b.GetSchema(op_name)
    # Signature
    ret = "__call__(" + schema.GetCallSignatureInputs() + ", **kwargs)\n"
    # __call__ docstring
    ret += "\nOperator call to be used in graph definition.\n"
    # Args section
    ret += _get_inputs_doc(schema)
    return ret
示例#20
0
        def __init__(self, **kwargs):
            self._spec = b.OpSpec(type(self).__name__)
            self._schema = b.GetSchema(type(self).__name__)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
            else:
                self._spec.AddArg("device", op_device)
                self._device = op_device

            # Store the specified arguments
            for key, value in kwargs.items():
                if isinstance(value, list):
                    if not value:
                        raise RuntimeError(
                            "List arguments need to have at least 1 element.")
                self._spec.AddArg(key, value)
示例#21
0
文件: ops.py 项目: yueyedeai/DALI
    def __init__(self, path, index_path, features, **kwargs):
        if isinstance(path, list):
            self._path = path
        else:
            self._path = [path]
        if isinstance(index_path, list):
            self._index_path = index_path
        else:
            self._index_path = [index_path]
        self._schema = b.GetSchema("_TFRecordReader")
        self._spec = b.OpSpec("_TFRecordReader")
        self._device = "cpu"

        self._spec.AddArg("path", self._path)
        self._spec.AddArg("index_path", self._index_path)

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self._features = features
示例#22
0
    def __init__(self, path, index_path, features, **kwargs):
        if isinstance(path, list):
            self._path = path
        else:
            self._path = [path]
        if isinstance(index_path, list):
            self._index_path = index_path
        else:
            self._index_path = [index_path]
        self._schema = _b.GetSchema(self._internal_schema_name)
        self._spec = _b.OpSpec(self._internal_schema_name)
        self._device = "cpu"

        self._spec.AddArg("path", self._path)
        self._spec.AddArg("index_path", self._index_path)

        kwargs, self._call_args = _separate_kwargs(kwargs)

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self._features = features
示例#23
0
文件: ops.py 项目: zivzone/DALI
def _docstring_prefix_auto(op_name):
    """
        Generate start of the docstring for `__call__` of Operator `op_name`
        with default values. Assumes there will be 0 or 1 inputs
    """
    schema = b.GetSchema(op_name)
    if schema.MaxNumInput() == 0:
        return """__call__(**kwargs)

Operator call to be used in `define_graph` step. This operator does not accept any TensorList inputs.
"""
    elif schema.MaxNumInput() == 1:
        return """__call__(data, **kwargs)

Operator call to be used in `define_graph` step.

Args
----
`data`: TensorList
    Input to the operator.
"""
    return ""
示例#24
0
def main(out_filename):
    cpu_ops = ops.cpu_ops()
    gpu_ops = ops.gpu_ops()
    mix_ops = ops.mixed_ops()
    all_ops = cpu_ops.union(gpu_ops).union(mix_ops)
    longest_module = max(ops_modules.keys(), key = len)
    link_formatter = ':meth:`{op} <{module}.{op}>`'
    op_name_max_len = len(link_formatter.format(op = "", module = longest_module)) + \
                      2 * len(max(all_ops, key=len))
    name_bar = op_name_max_len * '='
    formater = '{:{c}<{op_name_max_len}} {:{c}^6}  {:{c}^6}  {:{c}^7} {:{c}^9} {:{c}^10}\n'
    doc_table = ''
    doc_table += '.. |v| image:: images/tick.gif\n'
    doc_table += '\n'
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    doc_table += formater.format('Operator name', 'CPU', 'GPU', 'Mixed', 'Sequences', 'Volumetric', op_name_max_len = op_name_max_len, c=' ')
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    for op in sorted(all_ops, key=name_sort):
        schema = b.GetSchema(op)
        op_full_name, submodule, op_name = ops._process_op_name(op)
        is_cpu = '|v|' if op in cpu_ops else ''
        is_gpu = '|v|' if op in gpu_ops else ''
        is_mixed = '|v|' if op in mix_ops else ''
        supports_seq = '|v|' if schema.AllowsSequences() or schema.IsSequenceOperator() else ''
        volumetric = '|v|' if schema.SupportsVolumetric() else ''
        for (module_name, module) in ops_modules.items():
            m = module
            for part in submodule:
                m = getattr(m, part, None)
                if m is None:
                    break
            if m is not None and hasattr(m, op_name):
                submodule_str = ".".join([*submodule])
                op_string = link_formatter.format(op = op_full_name, module = module_name)
        op_doc = formater.format(op_string, is_cpu, is_gpu, is_mixed, supports_seq, volumetric, op_name_max_len = op_name_max_len, c=' ')
        doc_table += op_doc
    doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=')
    with open(out_filename, 'w') as f:
        f.write(doc_table)
示例#25
0
文件: ops.py 项目: zivzone/DALI
def _docstring_generator_call(op_name):
    """
        Generate full docstring for `__call__` of Operator `op_name`.
    """
    schema = b.GetSchema(op_name)
    if schema.HasCallDox():
        ret = schema.GetCallDox()
    elif schema.HasInputDox():
        ret = _docstring_prefix_from_inputs(op_name)
    elif schema.CanUseAutoInputDox():
        ret = _docstring_prefix_auto(op_name)
    else:
        ret = "Please refer to class :meth:`nvidia.dali.ops." + op_name + "` for full documentation.\n"
    if schema.AppendKwargsSection():
        # Kwargs section
        tensor_kwargs = _get_kwargs(schema, only_tensor=True)
        if tensor_kwargs:
            ret += """
Keyword Args
------------
"""
            ret += tensor_kwargs
    return ret
示例#26
0
文件: ops.py 项目: wuzhi19931128/DALI
def _docstring_prefix_from_inputs(op_name):
    """
        Generate start of the docstring for `__call__` of Operator `op_name`
        assuming the docstrings were provided for all inputs separatelly

        Returns the signature of `__call__` and list of `Args` in appropriate section
    """
    schema = b.GetSchema(op_name)
    # Signature
    ret = "__call__(" + schema.GetCallSignatureInputs() + ", **kwargs)\n"
    # __call__ docstring
    ret += "\nOperator call to be used in `define_graph` step.\n"
    # Args section
    ret += """
Args
----
"""
    for i in range(schema.MaxNumInput()):
        ret += _numpydoc_formatter(schema.GetInputName(i),
                                   schema.GetInputType(i),
                                   schema.GetInputDox(i))
        ret += "\n"
    ret += "\n"
    return ret
示例#27
0
    def __init__(self,
                 run_fn,
                 out_types,
                 in_types,
                 outs_ndim,
                 ins_ndim,
                 setup_fn=None,
                 device='cpu',
                 batch_processing=False,
                 **kwargs):
        assert len(in_types) == len(
            ins_ndim
        ), "Number of input types and input dimensions should match."
        assert len(out_types) == len(
            outs_ndim
        ), "Number of output types and output dimensions should match."
        if not isinstance(outs_ndim, list):
            outs_ndim = [outs_ndim]
        if not isinstance(ins_ndim, list):
            ins_ndim = [ins_ndim]
        if not isinstance(out_types, list):
            out_types = [out_types]
        if not isinstance(in_types, list):
            in_types = [in_types]

        setup_fn_address = None
        if setup_fn != None:
            setup_fn = njit(setup_fn)

            @cfunc(self._setup_fn_sig(), nopython=True)
            def setup_cfunc(out_shapes_ptr, out_ndims_ptr, num_outs,
                            in_shapes_ptr, in_ndims_ptr, num_ins, num_samples):
                out_shapes_np = _get_shape_view(out_shapes_ptr, out_ndims_ptr,
                                                num_outs, num_samples)
                in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr,
                                               num_outs, num_samples)
                setup_fn(out_shapes_np, in_shapes_np)

            setup_fn_address = setup_cfunc.address

        out0_lambda, out1_lambda, out2_lambda, out3_lambda, out4_lambda, out5_lambda = self._get_carrays_eval_lambda(
            out_types, outs_ndim)
        in0_lambda, in1_lambda, in2_lambda, in3_lambda, in4_lambda, in5_lambda = self._get_carrays_eval_lambda(
            in_types, ins_ndim)
        run_fn = njit(run_fn)
        run_fn_lambda = self._get_run_fn_lambda(len(out_types), len(in_types))
        if batch_processing:

            @cfunc(self._run_fn_sig(batch_processing=True), nopython=True)
            def run_cfunc(out_ptr, out_shapes_ptr, out_ndims_ptr, num_outs,
                          in_ptr, in_shapes_ptr, in_ndims_ptr, num_ins,
                          num_samples):
                out0 = out1 = out2 = out3 = out4 = out5 = None
                out_shapes_np = _get_shape_view(out_shapes_ptr, out_ndims_ptr,
                                                num_outs, num_samples)
                out_arr = carray(address_as_void_pointer(out_ptr),
                                 (num_outs, num_samples),
                                 dtype=np.int64)
                if num_outs >= 1:
                    out0 = [
                        out0_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[0], out_shapes_np[0])
                    ]
                if num_outs >= 2:
                    out1 = [
                        out1_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[1], out_shapes_np[1])
                    ]
                if num_outs >= 3:
                    out2 = [
                        out2_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[2], out_shapes_np[2])
                    ]
                if num_outs >= 4:
                    out3 = [
                        out3_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[3], out_shapes_np[3])
                    ]
                if num_outs >= 5:
                    out4 = [
                        out4_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[4], out_shapes_np[4])
                    ]
                if num_outs >= 6:
                    out5 = [
                        out5_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(out_arr[5], out_shapes_np[5])
                    ]

                in0 = in1 = in2 = in3 = in4 = in5 = None
                in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr,
                                               num_ins, num_samples)
                in_arr = carray(address_as_void_pointer(in_ptr),
                                (num_ins, num_samples),
                                dtype=np.int64)
                if num_ins >= 1:
                    in0 = [
                        in0_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[0], in_shapes_np[0])
                    ]
                if num_ins >= 2:
                    in1 = [
                        in1_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[1], in_shapes_np[1])
                    ]
                if num_ins >= 3:
                    in2 = [
                        in2_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[2], in_shapes_np[2])
                    ]
                if num_ins >= 4:
                    in3 = [
                        in3_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[3], in_shapes_np[3])
                    ]
                if num_ins >= 5:
                    in4 = [
                        in4_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[4], in_shapes_np[4])
                    ]
                if num_ins >= 6:
                    in5 = [
                        in5_lambda(address_as_void_pointer(ptr), shape)
                        for ptr, shape in zip(in_arr[5], in_shapes_np[5])
                    ]

                run_fn_lambda(run_fn, out0, out1, out2, out3, out4, out5, in0,
                              in1, in2, in3, in4, in5)
        else:

            @cfunc(self._run_fn_sig(batch_processing=False), nopython=True)
            def run_cfunc(out_ptr, out_shapes_ptr, out_ndims_ptr, num_outs,
                          in_ptr, in_shapes_ptr, in_ndims_ptr, num_ins):
                out0 = out1 = out2 = out3 = out4 = out5 = None
                out_shapes_np = _get_shape_view(out_shapes_ptr, out_ndims_ptr,
                                                num_outs, 1)
                out_arr = carray(address_as_void_pointer(out_ptr),
                                 num_outs,
                                 dtype=np.int64)
                if num_outs >= 1:
                    out0 = out0_lambda(address_as_void_pointer(out_arr[0]),
                                       out_shapes_np[0][0])
                if num_outs >= 2:
                    out1 = out1_lambda(address_as_void_pointer(out_arr[1]),
                                       out_shapes_np[1][0])
                if num_outs >= 3:
                    out2 = out2_lambda(address_as_void_pointer(out_arr[2]),
                                       out_shapes_np[2][0])
                if num_outs >= 4:
                    out3 = out3_lambda(address_as_void_pointer(out_arr[3]),
                                       out_shapes_np[3][0])
                if num_outs >= 5:
                    out4 = out4_lambda(address_as_void_pointer(out_arr[4]),
                                       out_shapes_np[4][0])
                if num_outs >= 6:
                    out5 = out5_lambda(address_as_void_pointer(out_arr[5]),
                                       out_shapes_np[5][0])

                in0 = in1 = in2 = in3 = in4 = in5 = None
                in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr,
                                               num_ins, 1)
                in_arr = carray(address_as_void_pointer(in_ptr),
                                num_ins,
                                dtype=np.int64)
                if num_ins >= 1:
                    in0 = in0_lambda(address_as_void_pointer(in_arr[0]),
                                     in_shapes_np[0][0])
                if num_ins >= 2:
                    in1 = in1_lambda(address_as_void_pointer(in_arr[1]),
                                     in_shapes_np[1][0])
                if num_ins >= 3:
                    in2 = in2_lambda(address_as_void_pointer(in_arr[2]),
                                     in_shapes_np[2][0])
                if num_ins >= 4:
                    in3 = in3_lambda(address_as_void_pointer(in_arr[3]),
                                     in_shapes_np[3][0])
                if num_ins >= 5:
                    in4 = in4_lambda(address_as_void_pointer(in_arr[4]),
                                     in_shapes_np[4][0])
                if num_ins >= 6:
                    in5 = in5_lambda(address_as_void_pointer(in_arr[5]),
                                     in_shapes_np[5][0])

                run_fn_lambda(run_fn, out0, out1, out2, out3, out4, out5, in0,
                              in1, in2, in3, in4, in5)

        self._impl_name = "NumbaFuncImpl"
        self._schema = _b.GetSchema(self._impl_name)
        self._spec = _b.OpSpec(self._impl_name)
        self._device = device

        kwargs, self._call_args = ops._separate_kwargs(kwargs)

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        self.run_fn = run_cfunc.address
        self.setup_fn = setup_fn_address
        self.out_types = out_types
        self.in_types = in_types
        self.outs_ndim = outs_ndim
        self.ins_ndim = ins_ndim
        self.num_outputs = len(out_types)
        self.batch_processing = batch_processing
        self._preserve = True
示例#28
0
    def __init__(self,
                 run_fn,
                 out_types,
                 in_types,
                 outs_ndim,
                 ins_ndim,
                 setup_fn=None,
                 device='cpu',
                 batch_processing=False,
                 blocks=None,
                 threads_per_block=None,
                 **kwargs):
        if device == 'gpu':
            self._check_minimal_numba_version()
            self._check_cuda_compatibility()

        assert len(in_types) == len(ins_ndim), (
            "Number of input types "
            "and input dimensions should match.")
        assert len(out_types) == len(outs_ndim), (
            "Number of output types "
            "and output dimensions should match.")

        if 'float16' in dir(numba_types):
            for t in [*in_types, *out_types]:
                if t == dali_types.FLOAT16:
                    raise RuntimeError("Numba does not support float16 for "
                                       "current Python version. "
                                       "Python 3.7 or newer is required")

        if device == 'gpu':
            assert batch_processing is False, (
                "Currently batch processing for GPU "
                "is not supported.")
            assert len(blocks) == 3, (
                "`blocks` array should contain 3 numbers, "
                f"while received: {len(blocks)}")
            for i, block_dim in enumerate(blocks):
                assert block_dim > 0, ("All dimensions should be positive. "
                                       "Value specified in `blocks` at index "
                                       f"{i} is nonpositive: {block_dim}")

            assert len(threads_per_block) == 3, (
                "`threads_per_block` array "
                "should contain 3 numbers, "
                f"while received: {len(threads_per_block)}")
            for i, threads in enumerate(threads_per_block):
                assert threads > 0, (
                    "All dimensions should be positive. "
                    "Value specified in `threads_per_block` at index "
                    f"{i} is nonpositive: {threads}")

        if not isinstance(outs_ndim, list):
            outs_ndim = [outs_ndim]
        if not isinstance(ins_ndim, list):
            ins_ndim = [ins_ndim]
        if not isinstance(out_types, list):
            out_types = [out_types]
        if not isinstance(in_types, list):
            in_types = [in_types]

        self._impl_name = "NumbaFuncImpl"
        self._schema = _b.GetSchema(self._impl_name)
        self._spec = _b.OpSpec(self._impl_name)
        self._device = device

        kwargs, self._call_args = ops._separate_kwargs(kwargs)

        for key, value in kwargs.items():
            self._spec.AddArg(key, value)

        if device == 'gpu':
            self.run_fn = self._get_run_fn_gpu(run_fn, in_types + out_types,
                                               ins_ndim + outs_ndim)
            self.setup_fn = None
        else:
            self.run_fn = self._get_run_fn_cpu(run_fn, out_types, in_types,
                                               outs_ndim, ins_ndim,
                                               batch_processing)
            self.setup_fn = self._get_setup_fn_cpu(setup_fn)
        self.out_types = out_types
        self.in_types = in_types
        self.outs_ndim = outs_ndim
        self.ins_ndim = ins_ndim
        self.num_outputs = len(out_types)
        self.batch_processing = batch_processing
        self._preserve = True
        self.blocks = blocks
        self.threads_per_block = threads_per_block
示例#29
0
def _docstring_generator_main(cls, api):
    """
        Generate docstring for the class obtaining it from schema based on cls.__name__
        This lists all the Keyword args that can be used when creating operator
    """
    op_name = _schema_name(cls)
    schema = _b.GetSchema(op_name)
    ret = '\n'

    if schema.IsDeprecated():
        use_instead = _op_name(schema.DeprecatedInFavorOf(), api)
        ret += ".. warning::\n\n   This operator is now deprecated"
        if use_instead:
            ret +=". Use :meth:`" + use_instead + "` instead."
        explanation = schema.DeprecationMessage()
        if explanation:
            indent = "\n" + " " * 3
            ret += indent
            ret += indent
            explanation = explanation.replace("\n", indent)
            ret += explanation
        ret += "\n\n"

    ret += schema.Dox()
    ret += '\n'

    if schema.IsDocPartiallyHidden():
        return ret

    supported_statements = []
    if schema.IsSequenceOperator():
        supported_statements.append("expects sequence inputs")
    elif schema.AllowsSequences():
        supported_statements.append("allows sequence inputs")

    if schema.SupportsVolumetric():
        supported_statements.append("supports volumetric data")

    if len(supported_statements) > 0:
        ret += "\nThis operator "
        ret += supported_statements[0]
        if len(supported_statements) > 1:
            ret += " and " + supported_statements[1]
        ret += ".\n"

    if schema.IsNoPrune():
        ret += "\nThis operator will **not** be optimized out of the graph.\n"

    op_dev = []
    if op_name in _cpu_ops:
        op_dev.append("'cpu'")
    if op_name in _gpu_ops:
        op_dev.append("'gpu'")
    if op_name in _mixed_ops:
        op_dev.append("'mixed'")
    ret += """
Supported backends
"""
    for dev in op_dev:
        ret += " * " + dev + "\n"
    ret += "\n"
    return ret
示例#30
0
def python_op_factory(name, schema_name=None, op_device="cpu"):
    class Operator(metaclass=_DaliOperatorMeta):
        def __init__(self, **kwargs):
            schema_name = _schema_name(type(self))
            self._spec = _b.OpSpec(schema_name)
            self._schema = _b.GetSchema(schema_name)

            # Get the device argument. We will need this to determine
            # the device that our outputs will be stored on
            if "device" in kwargs.keys():
                self._device = kwargs["device"]
                del kwargs["device"]
            else:
                self._device = op_device
            self._spec.AddArg("device", self._device)

            if "preserve" in kwargs.keys():
                self._preserve = kwargs["preserve"]
            else:
                self._preserve = False
            self._spec.AddArg("preserve", self._preserve)
            self._preserve = self._preserve or self._schema.IsNoPrune()

            # Check for any deprecated arguments that should be replaced or removed
            arg_names = list(kwargs.keys())
            for arg_name in arg_names:
                if not self._schema.IsDeprecatedArg(arg_name):
                    continue
                meta = self._schema.DeprecatedArgMeta(arg_name)
                new_name = meta['renamed_to']
                removed = meta['removed']
                msg = meta['msg']
                if new_name:
                    if new_name in kwargs:
                        raise TypeError(
                            "Operator {} got an unexpected '{}' deprecated argument when '{}' was already provided"
                            .format(type(self).__name__, arg_name, new_name))
                    kwargs[new_name] = kwargs[arg_name]
                    del kwargs[arg_name]
                elif removed:
                    del kwargs[arg_name]

                with warnings.catch_warnings():
                    warnings.simplefilter("default")
                    warnings.warn(msg, DeprecationWarning, stacklevel=2)

            # Store the specified arguments
            for key, value in kwargs.items():
                if value is None:
                    # None is not a valid value for any argument type, so treat it
                    # as if the argument was not supplied at all
                    continue

                dtype = self._schema.GetArgumentType(key)
                if isinstance(value, (list, tuple)):
                    if len(value) == 0:
                        self._spec.AddArgEmptyList(key,
                                                   _vector_element_type(dtype))
                        continue
                converted_value = _type_convert_value(dtype, value)
                self._spec.AddArg(key, converted_value)

        @property
        def spec(self):
            return self._spec

        @property
        def schema(self):
            return self._schema

        @property
        def device(self):
            return self._device

        @property
        def preserve(self):
            return self._preserve

        def __call__(self, *inputs, **kwargs):
            if (len(inputs) > self._schema.MaxNumInput()
                    or len(inputs) < self._schema.MinNumInput()):
                raise ValueError(("Operator {} expects from {} to " +
                                  "{} inputs, but received {}.").format(
                                      type(self).__name__,
                                      self._schema.MinNumInput(),
                                      self._schema.MaxNumInput(), len(inputs)))

            inputs = _preprocess_inputs(inputs, self.__class__.__name__,
                                        self._device, self._schema)

            # Build input sets, most of the time we only have one
            input_sets = []
            if self._detect_multiple_input_sets(inputs):
                arg_list_len = self._check_common_length(inputs)
                packed_inputs = self._unify_lists(inputs, arg_list_len)
                input_sets = self._repack_input_sets(packed_inputs)
            else:
                input_sets = [inputs]

            # Create OperatorInstance for every input set
            op_instances = []
            for input_set in input_sets:
                op_instances.append(
                    _OperatorInstance(input_set, self, **kwargs))
                op_instances[-1].generate_outputs()

            # Tie the instances together
            relation_id = op_instances[0].id
            for op in op_instances:
                op.relation_id = relation_id

            # If we don't have multiple input sets, flatten the result
            if len(op_instances) == 1:
                return op_instances[0].unwrapped_outputs
            outputs = []
            for op in op_instances:
                outputs.append(op.outputs)
            return self._repack_output_sets(outputs)

        # Check if any of inputs is a list
        def _detect_multiple_input_sets(self, inputs):
            return any(isinstance(input, list) for input in inputs)

        # Check if all list representing multiple input sets have the same length and return it
        def _check_common_length(self, inputs):
            arg_list_len = max(self._safe_len(input) for input in inputs)
            for input in inputs:
                if isinstance(input, list):
                    if len(input) != arg_list_len:
                        raise ValueError(
                            ("All argument lists for Multpile Input Sets used "
                             + "with operator {} must have the same length"
                             ).format(type(self).__name__))
            return arg_list_len

        def _safe_len(self, input):
            if isinstance(input, _DataNode):
                return 1
            else:
                return len(input)

        # Pack single _DataNodes into lists, so they are treated as Multiple Input Sets
        # consistently with the ones already present
        def _unify_lists(self, inputs, arg_list_len):
            result = ()
            for input in inputs:
                if isinstance(input, list):
                    result = result + (input, )
                else:
                    result = result + ([input] * arg_list_len, )
            return result

        # Zip the list from [[arg0, arg0', arg0''], [arg1', arg1'', arg1''], ...]
        # to [(arg0, arg1, ...), (arg0', arg1', ...), (arg0'', arg1'', ...)]
        def _repack_input_sets(self, inputs):
            return self._repack_list(inputs, tuple)

        # Unzip the list from [[out0, out1, out2], [out0', out1', out2'], ...]
        # to [[out0, out0', ...], [out1, out1', ...], [out2, out2', ...]]
        # Assume that all elements of input have the same length
        # If the inputs were 1-elem lists, return just a list, that is:
        # [[out0], [out0'], [out0''], ...] -> [out0, out0', out0'', ...]
        def _repack_output_sets(self, outputs):
            if len(outputs) > 1 and len(outputs[0]) == 1:
                output = []
                for elem in outputs:
                    output.append(elem[0])
                return output
            return self._repack_list(outputs, list)

        # Repack list from [[a, b, c], [a', b', c'], ....]
        # to [fn(a, a', ...), fn(b, b', ...), fn(c, c', ...)]
        # where fn can be `tuple` or `list`
        # Assume that all elements of input have the same length
        def _repack_list(self, sets, fn):
            output_list = []
            arg_list_len = len(sets[0])
            for i in range(arg_list_len):
                output_list.append(fn(input_set[i] for input_set in sets))
            return output_list

    Operator.__name__ = str(name)
    Operator.schema_name = schema_name or Operator.__name__
    # The autodoc doesn't generate doc for something that doesn't match the module name
    if _b.GetSchema(Operator.schema_name).IsInternal():
        Operator.__module__ = Operator.__module__ + ".internal"

    Operator.__call__.__doc__ = _docstring_generator_call(Operator.schema_name)
    return Operator