def _load_ops(): global _cpu_ops global _gpu_ops global _mixed_ops _cpu_ops = _cpu_ops.union(set(_b.RegisteredCPUOps())) _gpu_ops = _gpu_ops.union(set(_b.RegisteredGPUOps())) _mixed_ops = _mixed_ops.union(set(_b.RegisteredMixedOps())) _cpu_gpu_ops = _cpu_ops.union(_gpu_ops).union(_mixed_ops) ops_module = sys.modules[__name__] for op_reg_name in _cpu_gpu_ops: schema = _b.TryGetSchema(op_reg_name) make_hidden = schema.IsDocHidden() if schema else False op_full_name, submodule, op_name = _process_op_name(op_reg_name, make_hidden) module = _internal.get_submodule(ops_module, submodule) if not hasattr(module, op_name): op_class = python_op_factory(op_name, op_reg_name, op_device = "cpu") op_class.__module__ = module.__name__ setattr(module, op_name, op_class) if op_name not in ["ExternalSource"]: _wrap_op(op_class, submodule) # The operator was inserted into nvidia.dali.ops.hidden module, let's import it here # so it would be usable, but not documented as coming from other module if make_hidden: parent_module = _internal.get_submodule(ops_module, submodule[:-1]) setattr(parent_module, op_name, op_class)
def _wrap_op(op_class, submodule, parent_module, wrapper_doc): """Wrap the DALI Operator with fn API and insert the function into appropriate module. Args: op_class: Op class to wrap submodule: Additional submodule (scope) parent_module (str): If set to None, the wrapper is placed in nvidia.dali.fn module, otherwise in a specified parent module. wrapper_doc (str): Documentation of the wrapper function """ schema = _b.TryGetSchema(op_class.__name__) make_hidden = schema.IsDocHidden() if schema else False wrapper_name = _to_snake_case(op_class.__name__) if parent_module is None: fn_module = sys.modules[__name__] else: fn_module = sys.modules[parent_module] module = _internal.get_submodule(fn_module, submodule) if not hasattr(module, wrapper_name): wrap_func = _wrap_op_fn(op_class, wrapper_name, wrapper_doc) setattr(module, wrapper_name, wrap_func) if submodule: wrap_func.__module__ = module.__name__ if make_hidden: parent_module = _internal.get_submodule(fn_module, submodule[:-1]) setattr(parent_module, wrapper_name, wrap_func)
def _wrap_eager_op(op_class, submodule, wrapper_name, wrapper_doc): """Exposes eager operator to the appropriate module (similar to :func:`nvidia.dali.fn._wrap_op`). Uses ``op_class`` for preprocessing inputs and keyword arguments and filling OpSpec for backend eager operators. Args: op_class: Op class to wrap. submodule: Additional submodule (scope). wrapper_name: Wrapper name (the same as in fn API). wrapper_doc (str): Documentation of the wrapper function. """ op_name = op_class.schema_name op_schema = _b.TryGetSchema(op_name) if op_schema.IsDeprecated( ) or op_name in _stateful_operators or op_name in _generator_operators: # TODO(ksztenderski): For now only exposing stateless operators. return else: # If operator is not stateful or a generator expose it as stateless. wrapper = _wrap_stateless(op_class, op_name, wrapper_name) # Exposing to eager.experimental module. eager_module = _internal.get_submodule(sys.modules[__name__], 'experimental') op_module = _internal.get_submodule(eager_module, submodule) if not hasattr(op_module, wrapper_name): wrapper.__name__ = wrapper_name wrapper.__qualname__ = wrapper_name wrapper.__doc__ = wrapper_doc if submodule: wrapper.__module__ = op_module.__name__ setattr(op_module, wrapper_name, wrapper)
def fn_to_op_table(out_filename): formater = '{:{c}<{op_name_max_len}} {:{c}<{op_name_max_len}}\n' doc_table = '' doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=') doc_table += formater.format('Function (fn.*)', 'Operator Object (ops.*)', op_name_max_len = op_name_max_len, c=' ') doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=') for op in sorted(all_ops, key=name_sort): op_full_name, submodule, op_name = ops._process_op_name(op) schema = b.TryGetSchema(op) if schema: if schema.IsDocHidden(): continue for (module_name, module) in ops_modules.items(): m = module for part in submodule: m = getattr(m, part, None) if m is None: break if m is not None and hasattr(m, op_name): op_string = link_formatter.format(op = op_full_name, module = module_name) fn_string = link_formatter.format(op = to_fn_name(op_full_name), module = to_fn_module(module_name)) op_doc = formater.format(fn_string, op_string, op_name_max_len = op_name_max_len, c=' ') doc_table += op_doc doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=') with open(out_filename, 'w') as f: f.write(doc_table)
def _wrap_op(op_class, submodule): schema = _b.TryGetSchema(op_class.__name__) make_hidden = schema.IsDocHidden() if schema else False wrapper_name = _to_snake_case(op_class.__name__) fn_module = sys.modules[__name__] module = _internal.get_submodule(fn_module, submodule) if not hasattr(module, wrapper_name): wrap_func = _wrap_op_fn(op_class, wrapper_name) setattr(module, wrapper_name, wrap_func) if submodule: wrap_func.__module__ = module.__name__ if make_hidden: parent_module = _internal.get_submodule(fn_module, submodule[:-1]) setattr(parent_module, wrapper_name, wrap_func)
def _wrap_eager_op(op_class, submodules, parent_module, wrapper_name, wrapper_doc, make_hidden): """ Exposes eager operator to the appropriate module (similar to :func:`nvidia.dali.fn._wrap_op`). Uses ``op_class`` for preprocessing inputs and keyword arguments and filling OpSpec for backend eager operators. Args: op_class: Op class to wrap. submodule: Additional submodule (scope). parent_module (str): If set to None, the wrapper is placed in nvidia.dali.experimental.eager module, otherwise in a specified parent module. wrapper_name: Wrapper name (the same as in fn API). wrapper_doc (str): Documentation of the wrapper function. make_hidden (bool): If operator is hidden, we should extract it from hidden submodule. """ op_name = op_class.schema_name op_schema = _b.TryGetSchema(op_name) if op_schema.IsDeprecated() or op_name in _excluded_operators: return elif op_name in _stateful_operators: wrapper = _wrap_stateful(op_class, op_name, wrapper_name) op_module = _get_rng_state_target_module(submodules) else: if op_name in _iterator_operators: wrapper = _wrap_iterator(op_class, op_name, wrapper_name) else: # If operator is not stateful, generator, deprecated or excluded expose it as stateless. wrapper = _wrap_stateless(op_class, op_name, wrapper_name) op_module = _get_eager_target_module(parent_module, submodules, make_hidden) if not hasattr(op_module, wrapper_name): wrapper.__name__ = wrapper_name wrapper.__qualname__ = wrapper_name wrapper.__doc__ = wrapper_doc wrapper._schema_name = op_name if submodules: wrapper.__module__ = op_module.__name__ setattr(op_module, wrapper_name, wrapper)
def operations_table(out_filename): formater = '{:{c}<{op_name_max_len}} {:{c}^48} {:{c}<150}\n' doc_table = '' doc_table += '\n.. currentmodule:: nvidia.dali.fn\n\n' doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=') doc_table += formater.format('Function', 'Device support', 'Short description', op_name_max_len = op_name_max_len, c=' ') doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=') for op in sorted(all_ops, key=name_sort): op_full_name, submodule, op_name = ops._process_op_name(op) schema = b.TryGetSchema(op) short_descr = '' devices = [] if op in cpu_ops: devices += ['CPU'] if op in mix_ops: devices += ['Mixed'] if op in gpu_ops: devices += ['GPU'] devices_str = ', '.join(devices) if schema: if schema.IsDocHidden(): continue full_doc = schema.Dox() else: full_doc = eval('ops.' + op).__doc__ short_descr = full_doc.split("\n\n")[0].replace('\n', ' ') for (module_name, module) in ops_modules.items(): m = module for part in submodule: m = getattr(m, part, None) if m is None: break if m is not None and hasattr(m, op_name): fn_string = link_formatter.format(op = to_fn_name(op_full_name), module = to_fn_module(module_name)) op_doc = formater.format(fn_string, devices_str, short_descr, op_name_max_len = op_name_max_len, c=' ') doc_table += op_doc doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=') with open(out_filename, 'w') as f: f.write(doc_table)
def main(out_filename): cpu_ops = ops.cpu_ops() gpu_ops = ops.gpu_ops() mix_ops = ops.mixed_ops() all_ops = cpu_ops.union(gpu_ops).union(mix_ops) longest_module = max(ops_modules.keys(), key=len) link_formatter = ':meth:`{op} <{module}.{op}>`' op_name_max_len = len(link_formatter.format(op = "", module = longest_module)) + \ 2 * len(max(all_ops, key=len)) name_bar = op_name_max_len * '=' formater = '{:{c}<{op_name_max_len}} {:{c}^6} {:{c}^6} {:{c}^7} {:{c}^9} {:{c}^10}\n' doc_table = '' doc_table += '.. |v| image:: images/tick.gif\n' doc_table += '\n' doc_table += formater.format('', '', '', '', '', '', op_name_max_len=op_name_max_len, c='=') doc_table += formater.format('Operator name', 'CPU', 'GPU', 'Mixed', 'Sequences', 'Volumetric', op_name_max_len=op_name_max_len, c=' ') doc_table += formater.format('', '', '', '', '', '', op_name_max_len=op_name_max_len, c='=') for op in sorted(all_ops, key=name_sort): op_full_name, submodule, op_name = ops._process_op_name(op) is_cpu = '|v|' if op in cpu_ops else '' is_gpu = '|v|' if op in gpu_ops else '' is_mixed = '|v|' if op in mix_ops else '' schema = b.TryGetSchema(op) if schema: supports_seq = '|v|' if schema.AllowsSequences( ) or schema.IsSequenceOperator() else '' volumetric = '|v|' if schema.SupportsVolumetric() else '' else: supports_seq = '' volumetric = '' for (module_name, module) in ops_modules.items(): m = module for part in submodule: m = getattr(m, part, None) if m is None: break if m is not None and hasattr(m, op_name): submodule_str = ".".join([*submodule]) op_string = link_formatter.format(op=op_full_name, module=module_name) op_doc = formater.format(op_string, is_cpu, is_gpu, is_mixed, supports_seq, volumetric, op_name_max_len=op_name_max_len, c=' ') doc_table += op_doc doc_table += formater.format('', '', '', '', '', '', op_name_max_len=op_name_max_len, c='=') with open(out_filename, 'w') as f: f.write(doc_table)