def common_quantize(cls, in_qtype, out_qtype, node, **kwargs): all_nodes = kwargs['all_nodes'] opts = kwargs['opts'] G = kwargs['G'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] in_qtype = in_qtype.make_symmetric_signed() out_qtype = out_qtype.make_symmetric_signed() if cls.is_constant(x): LOG.info("reducing %s to a constant", node.name) if out_qtype: val = x[0].value_as(out_qtype) else: val = cls.get_constant(x) params = ConstantInputParameters(node.name, value=val, dims=Dim.unnamed(val.shape), qtype=out_qtype, constant_store=G.constant_store) if opts.get('load_quantization'): G.quantization[NodeId(params)] = QRec.scaled( in_qs=[out_qtype], out_qs=[out_qtype]) else: if in_qtype == out_qtype: LOG.info('removing (de)quantize node %s with no effect', node.name) params = NoOPParameters(node.name, desc="quantize with no effect") elif in_qtype.dtype == out_qtype.dtype: LOG.info('removing (de)quantize node %s with scale change', node.name) params = NoOPParameters(node.name, desc="quantize with scale change") out_qtype = in_qtype else: params = QuantizeParameters(node.name, from_qtype=in_qtype, to_qtype=out_qtype) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) if opts.get('load_quantization'): G.quantization[NodeId(params)] = QRec.scaled( in_qs=[in_qtype], out_qs=[out_qtype]) all_nodes[node.output[0]] = (params, 0, deepcopy(x[2])) return params
def _common(cls, node, mode='constant', pads=None, constant_value=0, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] x = inputs[0] x_shape = x[2].shape apads = np.array(pads).reshape((-1, 2)) if cls.is_constant(x): logger.info("reducing %s to a constant", valid_name) val = cls.get_constant(x) if mode == 'constant': val = np.pad(val, apads, mode=mode, constant_values=constant_value) else: val = np.pad(val, apads, mode=mode) params = ConstantInputParameters(valid_name, value=val, constant_store=G.constant_store) all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape)) return params if mode != 'constant': raise ValueError('%s - pad mode %s is not supported' % (valid_name, mode)) if constant_value != 0: raise ValueError('%s - only zero padding is supported' % valid_name) trimmed_pads = tuple( [pad for idx, pad in enumerate(apads) if x_shape[idx] is not None]) if all(sum(trimmed_pad) == 0 for trimmed_pad in trimmed_pads): params = NoOPParameters(valid_name, desc="eliminated pad of 0") pshape = x_shape else: pshape = [ dim + sum(apads[idx]) if dim is not None else None for idx, dim in enumerate(x_shape) ] # pshape = [None if dim is None else dim + sum(apads[idx]) for idx, dim in enumerate(x_shape)] padvals = [(constant_value, constant_value)] * len(trimmed_pads) params = PadParameters(valid_name, padding=trimmed_pads, pad_vals=padvals) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape)) return params
def _common(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(CastOptions) G = kwargs['G'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] if node_opts: in_qtype = QType(dtype=TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[node_opts.InDataType()]) out_qtype = QType(dtype=TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[node_opts.OutDataType()]) return cls.common_quantize(in_qtype, out_qtype, node, **kwargs) params = NoOPParameters(node.name, desc='cast with no type information') G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, deepcopy(x[2])) return params
def _common(cls, node, mode='constant', pads=None, constant_value=0, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] x = inputs[0] x_shape = x[2].shape ndim = len(x_shape) npad = len(pads) // 2 if npad != ndim: if all(not pad for pad in pads): logger.warning( f'Pad {valid_name} has {npad} pad values and {ndim} input rank. ' 'Since pad is zero this is ignored but it probably indicates a bug in the ONNX graph.' ) else: raise ValueError( f'Eroor in ONNX graph - pad {valid_name} has {npad} pad values and {ndim} input rank.' ) apads = np.array([[pads[idx], pads[idx + ndim]] for idx in range(ndim)]) # apads = np.array(pads).reshape((-1, 2)) if cls.is_constant(x): logger.info("reducing %s to a constant", valid_name) val = cls.get_constant(x) if mode == 'constant': val = np.pad(val, apads, mode=mode, constant_values=constant_value) else: val = np.pad(val, apads, mode=mode) params = ConstantInputParameters(valid_name, value=val) pshape = [ dim + sum(apads[idx]) if dim is not None else None for idx, dim in enumerate(x_shape) ] all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape), x[3]) return params if mode != 'constant': raise ValueError('%s - pad mode %s is not supported' % (valid_name, mode)) if any( sum(pad) > 0 and x_shape[idx] is None for idx, pad in enumerate(apads)): raise ValueError( f'unknown/batch axis is being padded in {valid_name}. Manipulation of ' 'unknown/batch axis is not supported') trimmed_pads = tuple( [pad for idx, pad in enumerate(apads) if x_shape[idx] is not None]) if all(sum(trimmed_pad) == 0 for trimmed_pad in trimmed_pads): params = NoOPParameters(valid_name, desc="eliminated pad of 0") pshape = x_shape else: pshape = [ dim + sum(apads[idx]) if dim is not None else None for idx, dim in enumerate(x_shape) ] # pshape = [None if dim is None else dim + sum(apads[idx]) for idx, dim in enumerate(x_shape)] padvals = [(constant_value, constant_value)] * len(trimmed_pads) params = PadParameters(valid_name, padding=trimmed_pads, pad_vals=padvals) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape), x[3]) return params