def wrap(obj: 'OperatorMetatype'): cls_name = name_ if cls_name is None: cls_name = obj.__name__ super_register(obj, cls_name) op_names = obj.get_all_aliases() for name in op_names: name = get_version_agnostic_name(name) if name not in self._op_name_to_op_meta_dict: self._op_name_to_op_meta_dict[name] = obj else: assert self._op_name_to_op_meta_dict[name] == obj, \ "Inconsistent operator metatype registry - single patched op name maps to multiple metatypes!" return obj
def get_caller_context( self, operator_type: str) -> InputAgnosticOperationExecutionContext: """ Designed to work in the following way - for each scope the context will track the number of the calls to the operators with the name operator_type (call_order). The counter values are preserved until reset by a corresponding member function of the context, which must be called after each model iteration - this is usually handled inside NNCF. This mechanism allows to discern between multiple function calls inside the same module that would each require their own instance of compression layers - for instance, multiple `relu` function calls (either on their own or inside a `for` cycle), and at the same moment allow the checkpoints to be loaded if the model had changed in the meantime in a way that does not impact the major function call order (e.g. if comments were added to the .py file with the model) """ version_agnostic_operator_type = get_version_agnostic_name( operator_type) call_order = self.get_operator_call_count_in_scope( version_agnostic_operator_type, self.scope) ia_op_exec_context = InputAgnosticOperationExecutionContext( version_agnostic_operator_type, self.scope, call_order) return ia_op_exec_context