def __init__(self, op_info, **kwargs): out_tensor_info = op_info.output_tensors[0] out_tname, out_dtype, tensor_shape = (out_tensor_info.name, out_tensor_info.dtype, out_tensor_info.shape) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] pre_tname = self._prepare_tensor_name(out_tname) inline_tname = self._prepare_inline_array_name(out_tname) value = op_info.op_attr['value'].value.np_array.flatten() self._snippet = CreateTensorBinarySnippet(out_tname, tensor_shape=tensor_shape, tf_dtype=out_dtype, sptr_name=pre_tname, inline_name=inline_tname, ref_count=ref_count) weight_snippet = WeightSnippet(inline_tname, out_dtype, tensor_shape, value) weight_container = kwargs['weight_container'] weight_container.add_snippet(weight_snippet) print("hpplinux _InlineOperator self :") print(self.__dict__) print("hpplinux _InlineOperator parser :") print(parser) print("hpplinux _InlineOperator weight_snippet :") print(weight_snippet.__dict__)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] output = op_info.output_tensors[0].name parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = ReshapeOpSnippet(inputs, output, ref_count, to_eval)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] outputs = [tensor_info.name for tensor_info in op_info.output_tensors] qout_dtype = op_info.output_tensors[0].dtype range_dtype = op_info.output_tensors[1].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_counts = parser.get('ref_counts', []) to_eval = parser.get('to_eval', False) self._snippet = RequantizeOpSnippet(inputs, outputs, qout_dtype, range_dtype, ref_counts, to_eval)
def test_kwarg_parser(): op_attr = { 'global': 10, 'var1': 1, 'private__var1': 2, } parser = NamescopedKWArgsParser('private', op_attr) assert parser.get('no_such_thing') is None assert parser.get('global') == 10 assert parser.get('var1') == 2 try: parser['no_such_thing'] except KeyError: pass
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] outputs = [tensor_info.name for tensor_info in op_info.output_tensors] dtype = op_info.output_tensors[0].dtype ksize = op_info.op_attr['ksize'].value.ints_value strides = op_info.op_attr['strides'].value.ints_value padding = op_info.op_attr['padding'].value.decode('utf8') parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_counts = parser.get('ref_counts', []) to_eval = parser.get('to_eval', False) self._snippet = QuantizedMaxPoolSnippet(inputs, outputs, dtype, ksize, strides, padding, ref_counts, to_eval)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] out_info = op_info.output_tensors[0] output, out_dtype, out_shape = (out_info.name, out_info.dtype, out_info.shape) # FIXME: automatic alloc for uTensor fail if not out_shape: out_shape = [1] parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] outputs = [tensor_info.name for tensor_info in op_info.output_tensors] in_dtype, qout_dtype = (op_info.input_tensors[0].dtype, op_info.output_tensors[0].dtype ) #NT: why separate this out? #DB: I don't know, it's in the uTensor C code out_dtypes = [ tensor_info.dtype for tensor_info in op_info.output_tensors[1:] ] parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_counts = parser.get('ref_counts', []) to_eval = parser.get('to_eval', False) self._snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype, out_dtypes, qout_dtype, ref_counts, to_eval)
def __init__(self, op_info, **kwargs): out_tensor_info = op_info.output_tensors[0] out_tname, out_dtype = (out_tensor_info.name, out_tensor_info.dtype) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] pre_tname = self._tf_prepare_tensor_name(out_tname) idx_fname = "{}.idx".format(pre_tname) idx_dir = kwargs['idx_dir'] embed_data_dir = kwargs.get('embed_data_dir', os.path.join("/fs", idx_dir)) self._snippet = CreateTensorIdxSnippet(embed_data_dir, out_tname, idx_fname=idx_fname, np_dtype=out_dtype, ref_count=ref_count) idx_path = os.path.join(idx_dir, idx_fname) value = op_info.op_attr['value'].value self._tf_save_data(idx_path, value)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] output = op_info.output_tensors[0].name tf_dtype = op_info.input_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) print("hpplinux OperatorFactory _AddOperator op_info: ") print(op_info) print("hpplinux OperatorFactory _AddOperator parser: ") print(parser) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = AddOpSnippet(inputs, output, tf_dtype, ref_count, to_eval) print("hpplinux OperatorFactory _AddOperator self: ") print(sef.__dict__) print("hpplinux OperatorFactory _AddOperator self._snippet: ") print(sef._snippet.__dict__)
def __init__(self, methods, kwargs): """ kwargs is a dict of following format: { "<name_scope>__kwargname": kwarg_value, .... } where <name_scope> is the KWARGS_NAMESCOPE of desired transformer. ex: { 'refcnt__kwarg': 3 # this is kwarg for RefCntOptimizer } """ self._pipeline = [] for method in methods: trans_cls = self._TRANSFORMER_MAP[method] trans_name = trans_cls.KWARGS_NAMESCOPE parser = NamescopedKWArgsParser(trans_name, kwargs) transformer = trans_cls(**parser.as_dict()) self._pipeline.append(transformer)
def __init__(self, op_info, **kwargs): _Operator.__init__(self) inputs = [tensor_info.name for tensor_info in op_info.input_tensors] outputs = [tensor_info.name for tensor_info in op_info.output_tensors] in_dtype, filter_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype) out_dtypes = [ tensor_info.dtype for tensor_info in op_info.output_tensors ] strides = op_info.op_attr["strides"].value.ints_value padding = op_info.op_attr["padding"].value.decode('utf8') parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_counts = parser.get('ref_counts', []) to_eval = parser.get('to_eval', False) self._snippet = Conv2DOpSnippent(inputs, outputs, strides, padding, in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes, ref_counts=ref_counts, to_eval=to_eval)
def apply(self, ugraph): """Generate source and header files """ src_fname = self.src_fname if src_fname == 'None': src_fname = '{}.cpp'.format(ugraph.name) header_snippet = ContextHeaderSnippet( '_MODELS_{}'.format(ugraph.name), ugraph.name ) weight_container = ContextGlobalArrayContainer() composer = Composer() header_fname = '{}.hpp'.format(ugraph.name) weight_header_fname = '{}_weight.hpp'.format(ugraph.name) container = ContextSnippetsContainer(ugraph.name, header_fname, weight_header_fname) opFactory = OperatorFactory() if not os.path.exists(os.path.join(self.params_dir, ugraph.name)): os.makedirs(os.path.join(self.params_dir, ugraph.name)) for op_id, op_name in enumerate(ugraph.topo_order): op_info = ugraph.ops_info[op_name] op_type = op_info.op_type # TODO: better abstraction for snippet if op_type == "Placeholder": parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) out_tname = op_info.output_tensors[0].name ref_count = parser.get('ref_counts', [0])[0] container.template_vars["placeholders"].append(out_tname) container.template_vars["ref_counts"].append(ref_count) header_snippet.template_vars["placeholders"].append(out_tname) else: # TODO: the operator may correspond to multiple snippets (such as InlinTensor) # weight_container is passed to function for workaround snippet = opFactory.createOperatorSnippet( op_info, idx_dir=os.path.join(self.params_dir, ugraph.name), embed_data_dir=self.embed_data_dir, weight_container=weight_container, ) container.add_snippet(snippet) if self.debug_cmt: comments = ["<<< Operation id {}: {}".format(op_id, op_name), ">>> Operation id {}: {}".format(op_id + 1, op_name)] cmt_snippet = CommentSnippet(comments) container.add_snippet(cmt_snippet) composer.add_snippet(container) # generate cpp/hpp files if not os.path.exists(self.model_dir): os.makedirs(self.model_dir) if weight_container.snippets: _logger.info("Generate weight file: %s", weight_header_fname) with open(os.path.join(self.model_dir, weight_header_fname), "w") as wf: wf.write('// Auto generated by utensor-cli\n\n') wf.write(weight_container.render()) else: container.remove_header('"{}"'.format(weight_header_fname)) _logger.info("Generate header file: %s", header_fname) with open(os.path.join(self.model_dir, header_fname), "w") as wf: wf.write('// Auto generated by utensor-cli\n\n') wf.write(header_snippet.render()) _logger.info("Generate source file: %s", src_fname) with open(os.path.join(self.model_dir, src_fname), "w") as wf: wf.write('// Auto generated by utensor-cli\n\n') wf.write(composer.compose())