def test_combine1(): dim1 = Dim.named_ordered(a=1, c=3, b=2) dim2 = Dim.named_ordered(a=1, c=3, b=2) dim3 = Dim.combine((dim1, dim2), 'c') assert dim3.shape == [1, 6, 2] dim3.c = 4 assert dim1.c == 3 and dim2.c == 3
def adjust_in_out_chw(self, G, node, names): self.verify_chw(node, names) trans = self.get_trans(names, ['c', 'h', 'w']) in_dim = node.in_dims[0] if in_dim.c != 1: self.apply_input_trans(node, trans, index=0) else: reshape = ReshapeParameters(f'{node.name}_r_chw', old_shape=in_dim.clone(), shape=Dim.named_ordered(c=in_dim.c, h=in_dim.h, w=in_dim.w)) G.insert_node_before(reshape, node, edge_class=NNEdge) self.check_quantization(G, node, reshape) out_dim = node.out_dims[0] if out_dim.c != 1: self.apply_output_trans(node, self.invert(trans), index=0) else: reshape = ReshapeParameters(f'{node.name}_r_{"".join(names)}', old_shape=Dim.named_ordered( c=out_dim.c, h=out_dim.h, w=out_dim.w), shape=out_dim.clone()) G.insert_node_after(node, reshape, edge_class=NNEdge) self.check_quantization(G, node, reshape, dir='out')
def test_operation2(): dim1 = Dim.named_ordered(a=1, c=3, b=2) dim2 = Dim.named_ordered(a=1, c=3, b=2) dim3 = dim1 - dim2 assert dim3.is_named assert dim3.is_ordered assert dim3.size() == 0
def test_operation1(): dim1 = Dim.named_ordered(a=1, c=3, b=2) dim2 = Dim.named_ordered(a=1, c=3, b=2) dim3 = dim1 + dim2 assert dim3.is_named assert dim3.is_ordered assert dim3.a == 2 and dim3.b == 4 and dim3.c == 6 assert dim3.shape == [2, 6, 4] dim3.a = 2 assert dim1.a == 1 and dim2.a == 1
def test_paddim(): dim1 = PadDim(1) assert not dim1.is_same assert dim1.h == 2 and dim1.w == 2 assert dim1.l == 1 and dim1.r == 1 and dim1.t == 1 and dim1.b == 1 assert dim1.numpy_pad_shape(Dim.named_ordered(w=10, h=10)) == [(1, 1), (1, 1)] stride_dim = StrideDim(1) filt_dim = Conv2DFilterDim(5, 5, 1, 1) in_dim = Dim.named_ordered(c=1, h=20, w=20) dim1 = PadDim.same() dim1.calculate_same(in_dim, filt_dim, stride_dim) assert dim1.shape == [2, 2, 2, 2]
def test_creation5(): dim1 = Dim.named_ordered(a=1, c=3, b=2) assert not dim1.is_unknown assert dim1.is_named assert dim1.is_ordered assert dim1.a == 1 and dim1.b == 2 and dim1.c == 3 assert dim1.shape == [1, 3, 2]
def conv(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape spatial_size = x_rank - 2 assert spatial_size <= 2, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights = cls.get_constant(inputs[1]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group filt_h = weights.shape[2] filt_w = weights.shape[2] h = 1 if spatial_size <= 1 else x_shape[2] w = 1 if spatial_size == 0 else (x_shape[2] if spatial_size == 1 else x_shape[3]) filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) if len(inputs) > 2: biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c]) dilations = cls.pad_start_with(node.attrs.get("dilations", [1] * spatial_size), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", [1] * spatial_size), [1], 2) pad_dim = cls.calc_pad_dim(node, spatial_size) params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), groups=group, padding=pad_dim, has_bias=True, in_dims_hint=SparseList([['c', 'h', 'w']]), out_dims_hint=SparseList([['c', 'h', 'w']]), constant_store=G.constant_store) params.weights = weights params.biases = biases in_dim = Dim.named_ordered(c=in_c, h=h, w=w) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def pool(cls, node, pool_type=None, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] x = inputs[0] x_shape = x[2].shape x_feature_shape = x_shape[2::] in_c = x_shape[1] kernel_shape = node.attrs["kernel_shape"] spatial_size = len(kernel_shape) x_rank = spatial_size + 2 if spatial_size != 2: raise ValueError(valid_name + " with {}D input".format(x_rank)) h = x_shape[2] w = x_shape[3] strides = node.attrs.get("strides", [1] * spatial_size) stride_is_one = all(stride == 1 for stride in strides) dilations = node.attrs.get("dilations", [1] * spatial_size) if any(dilation > 1 for dilation in dilations): raise ValueError(valid_name + " with dilation not supported") # ceil_mode = bool(node.attrs.get("ceil_mode", 0)) pad_dim = cls.calc_pad_dim(node, spatial_size) # Note: This needs to check dilation if it is added filter_matches_input = (all( k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip( kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w]))) if filter_matches_input and stride_is_one: params = GlobalPoolParameters(valid_name, pool_type=pool_type, axis=[1, 2], keep_dims=True, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) else: params = PoolingParameters( valid_name, filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]), stride=StrideDim(strides[0], strides[1]), padding=pad_dim, pool_type=pool_type, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def pool2d(cls, node, pool_type=None, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] opts = kwargs['opts'] node_opts = node.get_options(Pool2DOptions) inputs = [all_nodes[inp] for inp in node.input] x = inputs[0] x = cls.remove_known_batch_dimension(G, x, node) x_shape = x[2].shape in_c = x_shape[1] in_b, h, w, in_c = tuple(x_shape) filt_h = node_opts.FilterHeight() filt_w = node_opts.FilterWidth() stride_h = node_opts.StrideH() stride_w = node_opts.StrideW() pad = cls.get_tf_padding(node_opts.Padding()) filter_matches_input = h == filt_h and w == filt_w stride_is_one = stride_h == 1 and stride_w == 1 if filter_matches_input and stride_is_one: params = GlobalPoolParameters(node.name, pool_type=pool_type, axis=[0, 1], keep_dims=True, in_dims_hint=[['h', 'w', 'c']], out_dims_hint=[['h', 'w', 'c']]) else: params = PoolingParameters(node.name, filt=PoolFilterDim(filt_h, filt_w), stride=StrideDim(stride_h, stride_w), padding=pad, pool_type=pool_type, in_dims_hint=[['h', 'w', 'c']], out_dims_hint=[['h', 'w', 'c']]) if opts.get('load_quantization'): G.quantization[NodeId(params)] = cls.load_tf_quantization( node.input, node.output) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def adjust_in_out_order(self, G, node, names, order): self.verify_chw(node, names) trans = self.get_trans(names, order) in_dim = node.in_dims[0] if in_dim.c != 1: self.apply_input_trans(node, trans, index=0) else: new_shape = {k: getattr(in_dim, k) for k in order} reshape = ReshapeParameters( f'{node.name}_r_{"".join(in_dim.order)}_{"".join(order)}', old_shape=in_dim.clone(), shape=Dim.named_ordered(**new_shape) ) G.insert_node_before( reshape, node, edge_class=NNEdge ) node.in_dims_hint[0] = order self.check_quantization(G, node, reshape) out_dim = node.out_dims[0] if out_dim.c != 1: self.apply_output_trans(node, self.invert(trans), index=0) else: old_shape = {k: getattr(out_dim, k) for k in order} node.out_dims_hint[0] = order reshape = ReshapeParameters( f'{node.name}_r_{"".join(names)}', old_shape=Dim.named_ordered(**old_shape), shape=out_dim.clone() ) G.insert_node_after( node, reshape, edge_class=NNEdge ) self.check_quantization(G, node, reshape, direction='out')
def pool(cls, node, pool_type=None, copy_qtype=False, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] x = inputs[0] x_shape = x[2].shape x_feature_shape = x_shape[2::] input_rank = len(x_feature_shape) in_c = x_shape[1] kernel_shape = node.attrs["kernel_shape"] kernel_rank = len(kernel_shape) if input_rank != kernel_rank: raise ValueError( f'error in ONNX graph. {pool_type} pool {valid_name} ' f'has a different input spatial rank {input_rank} to kernel rank {kernel_rank}' ) spatial_size = kernel_rank if kernel_rank > 2: raise NotImplementedError( f'{pool_type} pool {valid_name} is a {kernel_rank}D pool ' 'which is not supported by NNTOOL') strides = node.attrs.get("strides", [1] * spatial_size) stride_is_one = all(stride == 1 for stride in strides) dilations = node.attrs.get("dilations", [1] * spatial_size) if any(dilation > 1 for dilation in dilations): raise ValueError(valid_name + " with dilation not supported") # ceil_mode = bool(node.attrs.get("ceil_mode", 0)) pad_dim = cls.calc_pad_dim(node, 2) if spatial_size == 1: strides = [1] + strides dilations = [1] + dilations kernel_shape = [1] + kernel_shape h = 1 w = x_shape[2] x_feature_shape = [1] + x_feature_shape else: h = x_shape[2] w = x_shape[3] strides = node.attrs.get("strides", [1] * spatial_size) stride_is_one = all(stride == 1 for stride in strides) dilations = node.attrs.get("dilations", [1] * spatial_size) if any(dilation > 1 for dilation in dilations): raise ValueError(valid_name + " with dilation not supported") # ceil_mode = bool(node.attrs.get("ceil_mode", 0)) pad_dim = cls.calc_pad_dim(node, spatial_size) if pad_dim.is_same: pad_dim.calculate_same( Dim.named_ordered(h=h, w=w), PoolFilterDim(kernel_shape[0], kernel_shape[1]), StrideDim(strides[0], strides[1])) # Note: This needs to check dilation if it is added filter_matches_input = (all( k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip( kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w]))) if filter_matches_input and stride_is_one: params = GlobalPoolingParameters(valid_name, pool_type=pool_type, axis=[1, 2], keep_dims=True, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) else: params = PoolingParameters( valid_name, filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]), stride=StrideDim(strides[0], strides[1]), padding=pad_dim, pool_type=pool_type, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims, x[3] if copy_qtype else None) return params
def _common(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape real_in_shape = deepcopy(x_shape) #conv_shape = [x if idx > 0 and x is not None else 1 for idx, x in enumerate(x_shape)] conv_shape = x_shape if None in x_shape: real_in_shape.remove(None) spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights_node = inputs[1][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[1]) out_c = weights.shape[1] group = node.attrs.get("group", 1) in_c = conv_shape[-spatial_size-1] if conv_shape[-spatial_size-1] is not None else 1 filt_out_c = out_c // group if in_c != weights.shape[0]: raise ValueError(f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[0]}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = 1 # create a new constant node since we are changing the shape weights = np.reshape(weights, (in_c, filt_out_c, filt_h, filt_w)) weights_node = ConstantInputParameters(f'{valid_name}_weights', value=weights, dims=Dim.unnamed( weights.shape)) cls.record_constant_qrec(inputs[1], weights_node, **kwargs) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = 1 if spatial_size == 1 else (conv_shape[-2] if conv_shape[-2] is not None else 1) w = conv_shape[-1] if conv_shape[-1] is not None else 1 filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=in_c) filt_dim = filt_dim.impose_order(cls.ONNX_TRANSFILTER_ORDER) if len(inputs) > 2: biases_node = inputs[2][0] biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters(f'{valid_name}_biases', value=biases, dims=Dim.unnamed( biases.shape)) padding, dilations, strides, output_padding = cls.calc_shapes(node, spatial_size, Dim2D((h, w)), Dim2D((filt_h, filt_w))) params = TransposeConv2DParameters(valid_name, filt=filt_dim, stride=strides, dilation=dilations, groups=group, padding=padding, has_bias=True, in_dims_hint=[['c', 'h', 'w'], cls.ONNX_TRANSFILTER_ORDER, ['c']], out_dims_hint=[['c', 'h', 'w']]) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) w_dim = Dim.named_ordered( out_c=filt_out_c, in_c=in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge(NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) if conv_shape != real_in_shape: # insert reshape from [xx,None,xx,xx] -> [None, xx, xx, xx] rbatch_params = ReshapeParameters(G.unique_name(f'{valid_name}_reshape_batchdim'), old_shape=Dim.unnamed(conv_shape), shape=Dim.unnamed(real_in_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=rbatch_params, from_idx=x[1], to_idx=0)) prev_node = rbatch_params prev_idx = 0 else: prev_node = x[0] prev_idx = x[1] if spatial_size == 1: oned_in_shape = [in_c, w] twod_in_shape = [in_c, 1, w] oned_out_shape = [out_dims[0].c, out_dims[0].w] r1_params = ReshapeParameters(f'{valid_name}_reshape2d', old_shape=Dim.unnamed(oned_in_shape), shape=Dim.unnamed(twod_in_shape)) r2_params = ReshapeParameters(f'{valid_name}_reshape1d', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge( NNEdge(from_node=prev_node, to_node=r1_params, from_idx=prev_idx, to_idx=0)) G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) pout_dims = ProvisionalDim([conv_shape[0]] + oned_out_shape) all_nodes[node.output[0]] = (r2_params, 0, pout_dims, None) return r2_params else: pout_dims = ProvisionalDim([conv_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=prev_node, to_node=params, from_idx=prev_idx, to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims, None) return params
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=True, in_dims_hint=SparseList([['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, node.input[0], weights_node, bias_node, node.output[0], opts) # if opts.get('load_dequantized'): # weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters( # node.input, bias_node.value) # else: # qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value, # node.output, opts) # if qrec: # G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1] # G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2] in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] filt_tensor = node.input[1] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions filt_tensor.used = True if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases has_bias = len(inputs) > 2 if has_bias: node.input[2].used = True params = Conv2DParameters(node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim( node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if opts.get('load_dequantized'): cls.load_dequantized_filter_parameters(params, node.input) else: cls.load_filter_parameters(G, params, node.input, node.output, opts) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def conv(cls, node, quantized=False, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape if x_shape[0] is not None: real_in_shape = tuple(x_shape.copy()) if x_shape[0] > 1: # support for multi batch is very limited batch = x_shape[0] logger.warning( f"{valid_name} has a non 1 batch dimension of {batch} -" " this is not supported by nntool or autotiler kernels") else: # if the batch is specified but is 1 then the input will be reshaped # and the output will have the batch dim set as unknown. batch = None else: real_in_shape = tuple(x_shape[1:]) batch = None spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # Input error checking undefined = [] if x_shape[1] is None: # cope with swapped batch and channel due to bad initial reshape if x_shape[0] == 1: batch = None x_shape = [x_shape[1], x_shape[0]] + list(x_shape[2:]) real_in_shape = x_shape[1:] else: undefined.append(f"input channel size of filter {valid_name} must be defined.") if not all(dim is not None for dim in x_shape[-spatial_size:]): undefined.append(f"input spatial size {x_shape} of filter {valid_name} must be defined.") if undefined: raise ValueError(f"{' '.join(undefined)}. You may need to override input dimensions.") # M x C/group x kH x kW weights_idx = 3 if quantized else 1 weights_node = inputs[weights_idx][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[weights_idx]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group if in_c != weights.shape[1] * group: raise ValueError(f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[1] * group}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = h = 1 w = x_shape[-1] # create a new constant node since we are changing the shape weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w)) weights_node = ConstantInputParameters(f'{valid_name}_weights', value=weights, dims=Dim.unnamed( weights.shape)) cls.record_constant_qrec(inputs[1], weights_node, **kwargs) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = x_shape[-2] w = x_shape[-1] conv_in_shape = (in_c, h, w) # h = 1 if spatial_size == 1 else ( # x_shape[-2] if x_shape[-2] is not None else 1) # w = x_shape[-1] if x_shape[-1] is not None else 1 filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) biases_idx = 8 if quantized else 2 if len(inputs) > biases_idx: biases_node = inputs[biases_idx][0] biases = cls.get_constant(inputs[biases_idx]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters(f'{valid_name}_biases', value=biases, dims=Dim.unnamed( biases.shape)) dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2) pad_dim = cls.calc_pad_dim(node, 4) if batch is not None: in_hint = ['n', 'c', 'h', 'w'] out_hint = ['n', 'c', 'h', 'w'] in_dim = Dim.named_ordered(n=batch, c=in_c, h=h, w=w) ker_in_order = [ ['n', 'c', 'h', 'w'], ['out_c', 'in_c', 'h', 'w'], ['out_c']] ker_out_order = [['n', 'c', 'h', 'w']] else: in_hint = ['c', 'h', 'w'] out_hint = ['c', 'h', 'w'] in_dim = Dim.named_ordered(c=in_c, h=h, w=w) ker_in_order = [ ['c', 'h', 'w'], ['out_c', 'in_c', 'h', 'w'], ['out_c']] ker_out_order = [['c', 'h', 'w']] params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), batch=batch, groups=group, padding=pad_dim, ker_in_order=ker_in_order, ker_out_order=ker_out_order, has_bias=True, in_dims_hint=[in_hint, cls.ONNX_FILTER_ORDER, ['c']], out_dims_hint=[out_hint]) if quantized: qrecs = kwargs['qrecs'] x_zp = cls.get_constant(inputs[2]) x_scale = cls.get_constant(inputs[1]) x_qtype = QType(dtype=x_zp.dtype, scale=x_scale, zero_point=x_zp) w_zp = cls.get_constant(inputs[5]) w_scale = cls.get_constant(inputs[4]) weights_node.qtype = w_qtype = QType( dtype=w_zp.dtype, scale=w_scale, zero_point=w_zp, quantized_dimension=0 if len(w_scale) > 1 else None) o_zp = cls.get_constant(inputs[7]) o_scale = cls.get_constant(inputs[6]) o_qtype = QType(dtype=o_zp.dtype, scale=o_scale, zero_point=o_zp) biases_node.qtype = b_qtype = QType( dtype=biases.dtype, scale=w_scale*x_scale) qrecs[NodeId(params)] = QRec.scaled( in_qs=[x_qtype, w_qtype, b_qtype], out_qs=[o_qtype], ) else: o_qtype = None w_dim = Dim.named_ordered( out_c=out_c, in_c=filt_in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge(NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) # check if input needs a reshape if conv_in_shape != real_in_shape: r1_params = ReshapeParameters(f'{valid_name}_reshape_in', old_shape=Dim.unnamed(real_in_shape), shape=Dim.unnamed(conv_in_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0)) G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) else: G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) # check if output needs a reshape if spatial_size == 1: if batch is not None: oned_out_shape = [batch, out_dims[0].c, out_dims[0].w] pout_dims = ProvisionalDim(oned_out_shape) else: oned_out_shape = [out_dims[0].c, out_dims[0].w] pout_dims = ProvisionalDim([None] + oned_out_shape) r2_params = ReshapeParameters(f'{valid_name}_reshape_out', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) params = r2_params else: pout_dims = ProvisionalDim([batch] + out_dims[0].shape) all_nodes[node.output[0]] = (params, 0, pout_dims, o_qtype) return params
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x = cls.remove_known_batch_dimension(G, x, node) x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check groups = in_c // filt_in_c params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), groups=groups, padding=pad, has_bias=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, params.filter.actual_shape, params.filter.get_order_idx('out_c'), node.input[0], weights_node, bias_node, node.output[0], opts) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([None] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) oparams = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (oparams, 0, pout_dims) return oparams
def _common(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(DepthwiseConv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] filt_tensor = node.input[1] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape) # get filter dimensions filt_tensor.used = True if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) # multiplier should match filter check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c, "invalid multiplier") groups = filt_dim.out_c // node_opts.DepthMultiplier() # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases has_bias = len(inputs) > 2 if has_bias: node.input[2].used = True # TFLITE produces single channel input DW convolutions with the # multiplier equal to the number of out channels. This is just # a normal convolution and since we don't handle the channel # multiplier at present (but can) just convert them to normal # convolutions convert_to_conv = in_c == 1 and groups == 1 if convert_to_conv: filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) else: filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, groups=groups, multiplier=node_opts.DepthMultiplier(), has_bias=has_bias, tf_depthwise=True, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if opts.get('load_dequantized'): cls.load_dequantized_filter_parameters(params, node.input, convert_to_conv, is_dw=True) else: cls.load_filter_parameters(G, params, node.input, node.output, opts, converted_to_conv=convert_to_conv) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def _common(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(DepthwiseConv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x = cls.remove_known_batch_dimension(G, x, node) x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) # multiplier should match filter check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c, "invalid multiplier") groups = filt_dim.out_c // node_opts.DepthMultiplier() # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check # TFLITE produces single channel input DW convolutions with the # multiplier equal to the number of out channels. This is just # a normal convolution and since we don't handle the channel # multiplier at present (but can) just convert them to normal # convolutions convert_to_conv = in_c == 1 and groups == 1 if convert_to_conv: filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) else: filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, groups=groups, multiplier=node_opts.DepthMultiplier(), has_bias=True, tf_depthwise=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_DW_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, params.filter.actual_shape, params.filter.get_order_idx('out_c'), node.input[0], weights_node, bias_node, node.output[0], opts, dw_to_pw=convert_to_conv) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def conv(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights_node = inputs[1][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[1]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group if in_c != weights.shape[1] * group: raise ValueError( f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[1] * group}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = 1 # create a new constant node since we are changing the shape weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w)) weights_node = ConstantInputParameters( f'{valid_name}_weights', value=weights, dims=Dim.unnamed(weights.shape), constant_store=G.constant_store) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = 1 if spatial_size == 1 else x_shape[-2] w = x_shape[-1] filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) if len(inputs) > 2: biases_node = inputs[2][0] biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters( f'{valid_name}_biases', value=biases, dims=Dim.unnamed(biases.shape), constant_store=G.constant_store) dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2) pad_dim = cls.calc_pad_dim(node, 4) params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), groups=group, padding=pad_dim, has_bias=True, in_dims_hint=[['c', 'h', 'w'], cls.ONNX_FILTER_ORDER, ['c']], out_dims_hint=[['c', 'h', 'w']], constant_store=G.constant_store) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) w_dim = Dim.named_ordered(out_c=out_c, in_c=filt_in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge( NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge( NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) if spatial_size == 1: oned_in_shape = [in_c, w] twod_in_shape = [in_c, 1, w] oned_out_shape = [out_dims[0].c, out_dims[0].w] r1_params = ReshapeParameters(f'{valid_name}_reshape2d', old_shape=Dim.unnamed(oned_in_shape), shape=Dim.unnamed(twod_in_shape)) r2_params = ReshapeParameters(f'{valid_name}_reshape1d', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0)) G.add_edge( NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) G.add_edge( NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) pout_dims = ProvisionalDim([x_shape[0]] + oned_out_shape) all_nodes[node.output[0]] = (r2_params, 0, pout_dims) return r2_params else: pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params