def two_conv_graph(): G = NNGraph(name='two_conv_graph') ti = G.add_input(Dim.unnamed([10, 10, 2])) c1filt = Conv2DFilterDim(3, 3, 2, in_c=2) c1filt.impose_order(['out_c', 'h', 'w', 'in_c']) n1 = Conv2DParameters("node1", filt=c1filt, stride=StrideDim(1, 1), padding=PadDim(0), in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']])) G.add_node(n1) w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]] w1 = [w1, w1, w1] w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]] w2 = [w2, w2, w2] n1.weights = np.array([w1, w2]) c2filt = Conv2DFilterDim(3, 3, 2, in_c=2) c2filt.impose_order(['out_c', 'h', 'w', 'in_c']) n2 = Conv2DParameters("node2", filt=c2filt, stride=StrideDim(1, 1), padding=PadDim(0), in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']])) G.add_node(n2) w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]] w3 = [w3, w3, w3] n2.weights = np.array([w3, w3]) to = G.add_output() G.add_edge(NNEdge(ti, n1)) G.add_edge(NNEdge(n1, n2)) G.add_edge(NNEdge(n2, to)) G.add_dimensions() yield G
def test_conf2d_depth(): # TF Lite depthwise convolution weights = np.arange(9).reshape([3, 3]) weights = np.repeat(weights, 2).reshape([1, 3, 3, 2]) filt = Conv2DFilterDim(3, 3, 2, 1).impose_order(["in_c", "h", "w", "out_c"]) stride = StrideDim(1) pad = PadDim(0) dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, groups=1, multiplier=2, tf_depthwise=True, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(16).reshape([1, 4, 4]) in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output1 = conv2d(params, in_dims, out_dims[0], input_, weights, None) assert np.array_equal(output1, [[[258, 294], [402, 438]], [[258, 294], [402, 438]]]) output2 = conv2d(params, in_dims, out_dims[0], input_, weights, None, allow_faster=False) assert np.array_equal(output1, output2)
def test_conf2d_normal(): weights = np.arange(9).reshape([1, 1, 3, 3]) filt = Conv2DFilterDim(3, 3, 1, 1) stride = StrideDim(1) pad = PadDim(0) dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(16).reshape([1, 4, 4]) in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) details = {} output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, details=details) # assert details['max_acc'] == 438.0 and details['min_acc'] == 258.0 assert np.array_equal(output_, [[[258, 294], [402, 438]]])
def test_conf2d_q2(caplog): caplog.set_level(logging.INFO) weights_q = QType(16, 1, True) weights = weights_q.quantize(np.full([1, 1, 2, 2], 1.0)) filt = Conv2DFilterDim(2, 2, 1, 1) stride = StrideDim(1) pad = PadDim.valid() dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) in_q = QType(16, 0, True) calc_q = QType(weights_q.bits + in_q.bits, weights_q.q + in_q.q, True) qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[in_q], weights_q=weights_q, acc_q=calc_q, calc_q=calc_q) input_ = in_q.quantize(np.full([1, 2, 2], 1.0)) in_dims = Dim.named(c=1, h=2, w=2).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, qrec=qrec) output_ = in_q.dequantize(output_) assert np.array_equal(output_, [[[4.]]])
def actfusion_graph(): G = NNGraph(name='actfusion_graph') ti1 = G.add_input(Dim.unnamed([10, 10, 2])).name ti2 = G.add_input(Dim.unnamed([10, 10, 2])).name c1filt = Conv2DFilterDim(3, 3, 2, in_c=2) c1filt.impose_order(['out_c', 'h', 'w', 'in_c']) n1 = Conv2DParameters("node1", filt=c1filt, stride=StrideDim(1, 1), padding=PadDim(0), in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']])) G.add_node(n1) w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]] w1 = [w1, w1, w1] w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]] w2 = [w2, w2, w2] n1.weights = np.array([w1, w2]) n1a = ReluActivationParameters("node1a") G.add_node(n1a) c2filt = Conv2DFilterDim(3, 3, 2, in_c=2) c2filt.impose_order(['out_c', 'h', 'w', 'in_c']) n2 = Conv2DParameters("node2", filt=c2filt, stride=StrideDim(1, 1), padding=PadDim(0), in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']])) G.add_node(n2) w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]] w3 = [w3, w3, w3] n2.weights = np.array([w3, w3]) n3 = MatrixAddParameters("node3") G.add_node(n3) n4 = ReluActivationParameters("node4") G.add_node(n4) to = G.add_output() G.add_edge(NNEdge(ti1, n1)) G.add_edge(NNEdge(n1, n1a)) G.add_edge(NNEdge(ti2, n2)) G.add_edge(NNEdge(n1a, n3, to_idx=0)) G.add_edge(NNEdge(n2, n3, to_idx=1)) G.add_edge(NNEdge(n3, n4)) G.add_edge(NNEdge(n4, to)) G.add_dimensions() yield G
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(TransposeConvOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[2] x_shape = x[2].shape in_b, in_h, in_w, in_c = tuple(x_shape) pout_shape = [ dim if x_shape[idx] is not None else None for idx, dim in enumerate(cls.get_constant(inputs[0])) ] out_b, out_h, out_w, out_c = tuple(pout_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) stride_w = node_opts.StrideW() stride_h = node_opts.StrideH() # compute padding pad = node_opts.Padding() if pad == Padding.SAME: pad_h = ((in_h - 1) * stride_h + filt_h - out_h) pad_w = ((in_w - 1) * stride_w + filt_w - out_w) pad_top = pad_h // 2 pad_left = pad_w // 2 pad = PadDim(pad_top, pad_h - pad_top, pad_left, pad_w - pad_left, same_type='balanced_right') else: pad = PadDim(0) params = TransposeConv2DParameters( node.name, filt=filt_dim, stride=StrideDim(stride_h, stride_w), padding=pad, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy()], out_dims_hint=[['h', 'w', 'c']]) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) pout_dims = ProvisionalDim(pout_shape) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def _match(self, G: GraphView, set_identity: bool = True, **kwargs): something_changed = False filt_nodes = [node for node in G.nodes() if isinstance(node, (Conv2DParameters, ConvFusionParameters))] for filt_node in filt_nodes: pnode = filt_node if isinstance(filt_node, ConvFusionParameters): cnodes = filt_node.contained_nodes() filt_node = cnodes[0] if not isinstance(filt_node, Conv2DParameters): continue in_dim = filt_node.in_dims filt_dim = filt_node.filter if filt_dim.h <= in_dim[0].h and filt_dim.w <= in_dim[0].w: continue min_h = min(filt_dim.h, in_dim[0].h) min_w = min(filt_dim.w, in_dim[0].w) if min_h > 1 and min_w > 1: LOG.warning("Filter of %s [%dx%d] bigger than input [%dx%d] not optimal but will work on AT", filt_node.name, filt_dim.h, filt_dim.w, in_dim[0].h, in_dim[0].w) continue ker_h = 1 if min_h == 1 else filt_dim.h ker_w = 1 if min_w == 1 else filt_dim.w if ker_h == filt_dim.h and ker_w == filt_dim.w: continue new_filt_dim = Conv2DFilterDim( ker_h, ker_w, filt_dim.out_c, in_c=filt_dim.in_c) LOG.warning("Converting filter of %s from [%dx%d] -> [%dx%d]", filt_node.name, filt_dim.h, filt_dim.w, new_filt_dim.h, new_filt_dim.w) filt_node.filter = new_filt_dim new_w_idxs = [] for dim in filt_dim.order: if dim in ('out_c', 'in_c'): new_w_idxs.append(slice(None)) elif dim == 'h': if new_filt_dim.h == 1: new_w_idxs.append( slice(filt_node.padding.t, filt_node.padding.t + 1)) else: new_w_idxs.append(slice(0, new_filt_dim.h)) elif dim == 'w': if new_filt_dim.w == 1: new_w_idxs.append( slice(filt_node.padding.l, filt_node.padding.l + 1)) else: new_w_idxs.append(slice(0, new_filt_dim.w)) weights_node = G.indexed_in_edges(pnode.name)[1].from_node weights_node.value = weights_node.value[tuple(new_w_idxs)] weights_node.dims = Dim.unnamed(weights_node.value.shape) something_changed = True if set_identity: self.set_identity(G) return something_changed
def add_convolution(out_graph, routes, idx, l): activation = get_str(l, 'activation', default="logistic") node_name = "{}_{}".format(l['type'], idx) routes['in'][idx] = node_name padding = l.get("padding") pad = l.get("pad") size = get_int(l, 'size', 1) groups = get_int(l, 'groups', 1) filters_c = get_int(l, 'filters', 1) stride = get_int(l, 'stride', 1) batch_normalize = get_int(l, 'batch_normalize', 0) flipped = get_int(l, 'flipped', 0) custom = {'batch_normalize': batch_normalize == 1, 'flipped': flipped == 1} assert 'binary' not in l, "Binary convolutions are not implemented" assert 'xnor' not in l, "XNOR convolutions are not implemented" assert 'dot' not in l, "dot is not implemented" # padding calculation as per Darknet code if pad is not None: padding = int(size / 2) if padding is None: padding = 0 if activation is None: routes['in'][idx], routes['out'][idx] =\ out_graph.add_operator(node_name,\ Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\ StrideDim(stride), PadDim(padding), groups=groups,\ custom=custom, has_bias=True)) else: activation = DARKNET_ACTIVATION_TYPES[activation] routes['in'][idx], routes['out'][idx] =\ out_graph.add_operators( node_name, [ Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\ StrideDim(stride), PadDim(padding), groups=groups,\ custom=custom, has_bias=True), ActivationParameters(activation) ] ) return True
def conv(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape spatial_size = x_rank - 2 assert spatial_size <= 2, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights = cls.get_constant(inputs[1]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group filt_h = weights.shape[2] filt_w = weights.shape[2] h = 1 if spatial_size <= 1 else x_shape[2] w = 1 if spatial_size == 0 else (x_shape[2] if spatial_size == 1 else x_shape[3]) filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) if len(inputs) > 2: biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c]) dilations = cls.pad_start_with(node.attrs.get("dilations", [1] * spatial_size), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", [1] * spatial_size), [1], 2) pad_dim = cls.calc_pad_dim(node, spatial_size) params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), groups=group, padding=pad_dim, has_bias=True, in_dims_hint=SparseList([['c', 'h', 'w']]), out_dims_hint=SparseList([['c', 'h', 'w']]), constant_store=G.constant_store) params.weights = weights params.biases = biases in_dim = Dim.named_ordered(c=in_c, h=h, w=w) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def test_paddim(): dim1 = PadDim(1) assert not dim1.is_same assert dim1.h == 2 and dim1.w == 2 assert dim1.l == 1 and dim1.r == 1 and dim1.t == 1 and dim1.b == 1 assert dim1.numpy_pad_shape(Dim.named_ordered(w=10, h=10)) == [(1, 1), (1, 1)] stride_dim = StrideDim(1) filt_dim = Conv2DFilterDim(5, 5, 1, 1) in_dim = Dim.named_ordered(c=1, h=20, w=20) dim1 = PadDim.same() dim1.calculate_same(in_dim, filt_dim, stride_dim) assert dim1.shape == [2, 2, 2, 2]
def add_convolution(G, tensors, name, subgraph, _, op, load_tensors=False, dequantize=False): conv_opts = Conv2DOptions.Conv2DOptions() conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos) # get filter dimensions filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FILTER_ORDER) filt = Conv2DFilterDim(filt['h'], filt['w'],\ filt['out_c'], in_c=filt['in_c']) filt = filt.impose_order(TF_LITE_FILTER_ORDER) # compute padding pad = get_tf_padding(conv_opts.Padding()) # does it have biases has_bias = op.InputsLength() > 2 node = Conv2DParameters(name, filt=filt, stride=StrideDim(conv_opts.StrideH(), conv_opts.StrideW()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if load_tensors: node.weights = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=dequantize) if has_bias: node.biases = get_tensor(G.model, tensors, subgraph, op, 2, dequantize=dequantize) return fuse_activation(G, conv_opts, name, node)
def test_conf2d_depth_q(): calc_q = QType(32, 9, True) biases_q = acc_q = out_q = QType(16, 4, True) weights_q = QType(16, 4, True) in_q = QType(16, 5, True) # TF Lite depthwise convolution biases = np.full([2], 0.5) qbiases = biases_q.quantize(biases) weights = np.full([3, 3], 0.5) weights = np.repeat(weights, 2).reshape([1, 3, 3, 2]) qweights = weights_q.quantize(weights) filt = Conv2DFilterDim(3, 3, 2, 1).impose_order(["in_c", "h", "w", "out_c"]) stride = StrideDim(1) pad = PadDim(0) dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, groups=1, multiplier=2, tf_depthwise=True, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[out_q], weights_q=weights_q, biases_q=biases_q, acc_q=acc_q, calc_q=calc_q) input_ = np.full([1, 4, 4], 2) qinput_ = in_q.quantize(input_) in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, biases) qoutput_ = conv2d(params, in_dims, out_dims[0], qinput_, qweights, qbiases, qrec=qrec) dqoutput_ = out_q.dequantize(qoutput_) assert np.array_equal(output_, dqoutput_)
def test_conf2d_pad_dilate(): weights = np.arange(9).reshape([1, 1, 3, 3]) filt = Conv2DFilterDim(3, 3, 1, 1) stride = StrideDim(1) pad = PadDim.same() dilation = DilationDim(2) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(16).reshape([1, 4, 4]) in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None) assert np.array_equal(output_, [[[266., 206.], [98., 66.]]])
def test_conf2d_pad(): weights = np.arange(9).reshape([1, 1, 3, 3]) filt = Conv2DFilterDim(3, 3, 1, 1) stride = StrideDim(1) pad = PadDim.same() dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(16).reshape([1, 4, 4]) in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None) assert np.array_equal(output_, [[[73, 121, 154, 103], [171, 258, 294, 186],\ [279, 402, 438, 270], [139, 187, 202, 113]]])
def test_conf2d_2_in_2_out_c(): weights = np.arange(4).reshape([1, 2, 2]) weights = np.append(weights, weights, axis=0) weights = np.append(weights, weights, axis=0) weights = weights.reshape([2, 2, 2, 2]) filt = Conv2DFilterDim(2, 2, 2, 2) stride = StrideDim(1) pad = PadDim.valid() dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(9).reshape([1, 3, 3]) input_ = np.append(input_, input_, axis=0) in_dims = Dim.named(c=2, h=3, w=3).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None) assert np.array_equal(output_, [[[38., 50.], [74., 86.]],\ [[38., 50.], [74., 86.]]])
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=True, in_dims_hint=SparseList([['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, node.input[0], weights_node, bias_node, node.output[0], opts) # if opts.get('load_dequantized'): # weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters( # node.input, bias_node.value) # else: # qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value, # node.output, opts) # if qrec: # G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1] # G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2] in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def add_depthwise_convolution(G, tensors, name, subgraph, _, op, load_tensors=False, dequantize=False): conv_opts = DepthwiseConv2DOptions.DepthwiseConv2DOptions() conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos) # get filter dimensions inp = get_input_size(tensors, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER) filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_DW_FILTER_ORDER) filt = Conv2DFilterDim(filt['h'], filt['w'],\ filt['out_c'], in_c=1) # multiplier should match filter check(filt.out_c == conv_opts.DepthMultiplier() * inp['c'], "invalid multiplier") groups = filt.out_c // conv_opts.DepthMultiplier() # compute padding pad = get_tf_padding(conv_opts.Padding()) # does it have biases has_bias = op.InputsLength() > 2 # TFLITE produces single channel input DW convolutions with the # multiplier equal to the number of out channels. This is just # a normal convolution and since we don't handle the channel # multiplier at present (but can) just convert them to normal # convolutions convert_to_conv = inp['c'] == 1 and groups == 1 if convert_to_conv: filt.impose_order(TF_LITE_FILTER_ORDER) node = Conv2DParameters(name, filt=filt, stride=StrideDim(conv_opts.StrideH(), conv_opts.StrideW()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) else: filt.impose_order(TF_LITE_DW_FILTER_ORDER) node = Conv2DParameters(name, filt=filt, stride=StrideDim(conv_opts.StrideH(), conv_opts.StrideW()), padding=pad, groups=groups, multiplier=conv_opts.DepthMultiplier(), has_bias=has_bias, tf_depthwise=True, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if load_tensors: node.weights = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=dequantize) # If we've converted to a normal conv then change the weight order if convert_to_conv: node.weights = node.weights.transpose(TF_LITE_DW_FILTER_TRANSPOSE) if has_bias: node.biases = get_tensor(G.model, tensors, subgraph, op, 2, dequantize=dequantize) return fuse_activation(G, conv_opts, name, node)
def _common(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(DepthwiseConv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] filt_tensor = node.input[1] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape) # get filter dimensions filt_tensor.used = True if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) # multiplier should match filter check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c, "invalid multiplier") groups = filt_dim.out_c // node_opts.DepthMultiplier() # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases has_bias = len(inputs) > 2 if has_bias: node.input[2].used = True # TFLITE produces single channel input DW convolutions with the # multiplier equal to the number of out channels. This is just # a normal convolution and since we don't handle the channel # multiplier at present (but can) just convert them to normal # convolutions convert_to_conv = in_c == 1 and groups == 1 if convert_to_conv: filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) else: filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, groups=groups, multiplier=node_opts.DepthMultiplier(), has_bias=has_bias, tf_depthwise=True, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if opts.get('load_dequantized'): cls.load_dequantized_filter_parameters(params, node.input, convert_to_conv, is_dw=True) else: cls.load_filter_parameters(G, params, node.input, node.output, opts, converted_to_conv=convert_to_conv) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x = cls.remove_known_batch_dimension(G, x, node) x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check groups = in_c // filt_in_c params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), groups=groups, padding=pad, has_bias=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, params.filter.actual_shape, params.filter.get_order_idx('out_c'), node.input[0], weights_node, bias_node, node.output[0], opts) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([None] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) oparams = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (oparams, 0, pout_dims) return oparams
def version_1(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(Conv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] filt_tensor = node.input[1] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape) # get filter dimensions filt_tensor.used = True if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases has_bias = len(inputs) > 2 if has_bias: node.input[2].used = True params = Conv2DParameters(node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim( node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=has_bias, in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']]), constant_store=G.constant_store) if opts.get('load_dequantized'): cls.load_dequantized_filter_parameters(params, node.input) else: cls.load_filter_parameters(G, params, node.input, node.output, opts) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size([in_dim]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def conv(cls, node, quantized=False, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape if x_shape[0] is not None: real_in_shape = tuple(x_shape.copy()) if x_shape[0] > 1: # support for multi batch is very limited batch = x_shape[0] logger.warning( f"{valid_name} has a non 1 batch dimension of {batch} -" " this is not supported by nntool or autotiler kernels") else: # if the batch is specified but is 1 then the input will be reshaped # and the output will have the batch dim set as unknown. batch = None else: real_in_shape = tuple(x_shape[1:]) batch = None spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # Input error checking undefined = [] if x_shape[1] is None: # cope with swapped batch and channel due to bad initial reshape if x_shape[0] == 1: batch = None x_shape = [x_shape[1], x_shape[0]] + list(x_shape[2:]) real_in_shape = x_shape[1:] else: undefined.append(f"input channel size of filter {valid_name} must be defined.") if not all(dim is not None for dim in x_shape[-spatial_size:]): undefined.append(f"input spatial size {x_shape} of filter {valid_name} must be defined.") if undefined: raise ValueError(f"{' '.join(undefined)}. You may need to override input dimensions.") # M x C/group x kH x kW weights_idx = 3 if quantized else 1 weights_node = inputs[weights_idx][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[weights_idx]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group if in_c != weights.shape[1] * group: raise ValueError(f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[1] * group}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = h = 1 w = x_shape[-1] # create a new constant node since we are changing the shape weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w)) weights_node = ConstantInputParameters(f'{valid_name}_weights', value=weights, dims=Dim.unnamed( weights.shape)) cls.record_constant_qrec(inputs[1], weights_node, **kwargs) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = x_shape[-2] w = x_shape[-1] conv_in_shape = (in_c, h, w) # h = 1 if spatial_size == 1 else ( # x_shape[-2] if x_shape[-2] is not None else 1) # w = x_shape[-1] if x_shape[-1] is not None else 1 filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) biases_idx = 8 if quantized else 2 if len(inputs) > biases_idx: biases_node = inputs[biases_idx][0] biases = cls.get_constant(inputs[biases_idx]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters(f'{valid_name}_biases', value=biases, dims=Dim.unnamed( biases.shape)) dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2) pad_dim = cls.calc_pad_dim(node, 4) if batch is not None: in_hint = ['n', 'c', 'h', 'w'] out_hint = ['n', 'c', 'h', 'w'] in_dim = Dim.named_ordered(n=batch, c=in_c, h=h, w=w) ker_in_order = [ ['n', 'c', 'h', 'w'], ['out_c', 'in_c', 'h', 'w'], ['out_c']] ker_out_order = [['n', 'c', 'h', 'w']] else: in_hint = ['c', 'h', 'w'] out_hint = ['c', 'h', 'w'] in_dim = Dim.named_ordered(c=in_c, h=h, w=w) ker_in_order = [ ['c', 'h', 'w'], ['out_c', 'in_c', 'h', 'w'], ['out_c']] ker_out_order = [['c', 'h', 'w']] params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), batch=batch, groups=group, padding=pad_dim, ker_in_order=ker_in_order, ker_out_order=ker_out_order, has_bias=True, in_dims_hint=[in_hint, cls.ONNX_FILTER_ORDER, ['c']], out_dims_hint=[out_hint]) if quantized: qrecs = kwargs['qrecs'] x_zp = cls.get_constant(inputs[2]) x_scale = cls.get_constant(inputs[1]) x_qtype = QType(dtype=x_zp.dtype, scale=x_scale, zero_point=x_zp) w_zp = cls.get_constant(inputs[5]) w_scale = cls.get_constant(inputs[4]) weights_node.qtype = w_qtype = QType( dtype=w_zp.dtype, scale=w_scale, zero_point=w_zp, quantized_dimension=0 if len(w_scale) > 1 else None) o_zp = cls.get_constant(inputs[7]) o_scale = cls.get_constant(inputs[6]) o_qtype = QType(dtype=o_zp.dtype, scale=o_scale, zero_point=o_zp) biases_node.qtype = b_qtype = QType( dtype=biases.dtype, scale=w_scale*x_scale) qrecs[NodeId(params)] = QRec.scaled( in_qs=[x_qtype, w_qtype, b_qtype], out_qs=[o_qtype], ) else: o_qtype = None w_dim = Dim.named_ordered( out_c=out_c, in_c=filt_in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge(NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) # check if input needs a reshape if conv_in_shape != real_in_shape: r1_params = ReshapeParameters(f'{valid_name}_reshape_in', old_shape=Dim.unnamed(real_in_shape), shape=Dim.unnamed(conv_in_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0)) G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) else: G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) # check if output needs a reshape if spatial_size == 1: if batch is not None: oned_out_shape = [batch, out_dims[0].c, out_dims[0].w] pout_dims = ProvisionalDim(oned_out_shape) else: oned_out_shape = [out_dims[0].c, out_dims[0].w] pout_dims = ProvisionalDim([None] + oned_out_shape) r2_params = ReshapeParameters(f'{valid_name}_reshape_out', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) params = r2_params else: pout_dims = ProvisionalDim([batch] + out_dims[0].shape) all_nodes[node.output[0]] = (params, 0, pout_dims, o_qtype) return params
def _common(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape real_in_shape = deepcopy(x_shape) #conv_shape = [x if idx > 0 and x is not None else 1 for idx, x in enumerate(x_shape)] conv_shape = x_shape if None in x_shape: real_in_shape.remove(None) spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights_node = inputs[1][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[1]) out_c = weights.shape[1] group = node.attrs.get("group", 1) in_c = conv_shape[-spatial_size-1] if conv_shape[-spatial_size-1] is not None else 1 filt_out_c = out_c // group if in_c != weights.shape[0]: raise ValueError(f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[0]}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = 1 # create a new constant node since we are changing the shape weights = np.reshape(weights, (in_c, filt_out_c, filt_h, filt_w)) weights_node = ConstantInputParameters(f'{valid_name}_weights', value=weights, dims=Dim.unnamed( weights.shape)) cls.record_constant_qrec(inputs[1], weights_node, **kwargs) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = 1 if spatial_size == 1 else (conv_shape[-2] if conv_shape[-2] is not None else 1) w = conv_shape[-1] if conv_shape[-1] is not None else 1 filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=in_c) filt_dim = filt_dim.impose_order(cls.ONNX_TRANSFILTER_ORDER) if len(inputs) > 2: biases_node = inputs[2][0] biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters(f'{valid_name}_biases', value=biases, dims=Dim.unnamed( biases.shape)) padding, dilations, strides, output_padding = cls.calc_shapes(node, spatial_size, Dim2D((h, w)), Dim2D((filt_h, filt_w))) params = TransposeConv2DParameters(valid_name, filt=filt_dim, stride=strides, dilation=dilations, groups=group, padding=padding, has_bias=True, in_dims_hint=[['c', 'h', 'w'], cls.ONNX_TRANSFILTER_ORDER, ['c']], out_dims_hint=[['c', 'h', 'w']]) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) w_dim = Dim.named_ordered( out_c=filt_out_c, in_c=in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge(NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) if conv_shape != real_in_shape: # insert reshape from [xx,None,xx,xx] -> [None, xx, xx, xx] rbatch_params = ReshapeParameters(G.unique_name(f'{valid_name}_reshape_batchdim'), old_shape=Dim.unnamed(conv_shape), shape=Dim.unnamed(real_in_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=rbatch_params, from_idx=x[1], to_idx=0)) prev_node = rbatch_params prev_idx = 0 else: prev_node = x[0] prev_idx = x[1] if spatial_size == 1: oned_in_shape = [in_c, w] twod_in_shape = [in_c, 1, w] oned_out_shape = [out_dims[0].c, out_dims[0].w] r1_params = ReshapeParameters(f'{valid_name}_reshape2d', old_shape=Dim.unnamed(oned_in_shape), shape=Dim.unnamed(twod_in_shape)) r2_params = ReshapeParameters(f'{valid_name}_reshape1d', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge( NNEdge(from_node=prev_node, to_node=r1_params, from_idx=prev_idx, to_idx=0)) G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) pout_dims = ProvisionalDim([conv_shape[0]] + oned_out_shape) all_nodes[node.output[0]] = (r2_params, 0, pout_dims, None) return r2_params else: pout_dims = ProvisionalDim([conv_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=prev_node, to_node=params, from_idx=prev_idx, to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims, None) return params
def _common(cls, node: TFLiteNode, **kwargs): node_opts = node.get_options(DepthwiseConv2DOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes[t] for t in node.input] x = inputs[0] x = cls.remove_known_batch_dimension(G, x, node) x_shape = x[2].shape in_b, h, w, in_c = tuple(x_shape) filt = inputs[1] weights_node = filt[0] filt_shape = filt[2].shape # ['in_c', 'h', 'w', 'out_c'] filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape) # get filter dimensions if filt_h > h or filt_w > w: LOG.warning( "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]", node.name, filt_h, filt_w, h, w) filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) # multiplier should match filter check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c, "invalid multiplier") groups = filt_dim.out_c // node_opts.DepthMultiplier() # compute padding pad = cls.get_tf_padding(node_opts.Padding()) # does it have biases if len(inputs) > 2: bias = inputs[2] bias_node = bias[0] else: bias_node = ConstantInputParameters( f'{node.name}_bias', dims=Dim.unnamed([filt_out_c]), value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check # TFLITE produces single channel input DW convolutions with the # multiplier equal to the number of out channels. This is just # a normal convolution and since we don't handle the channel # multiplier at present (but can) just convert them to normal # convolutions convert_to_conv = in_c == 1 and groups == 1 if convert_to_conv: filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, has_bias=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) else: filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER) params = Conv2DParameters( node.name, filt=filt_dim, stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()), dilation=DilationDim(node_opts.DilationHFactor(), node_opts.DilationWFactor()), padding=pad, groups=groups, multiplier=node_opts.DepthMultiplier(), has_bias=True, tf_depthwise=True, in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_DW_FILTER_ORDER.copy(), ['out_c']], out_dims_hint=[['h', 'w', 'c']]) G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1)) G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2)) cls.new_load_filter_parameters(G, params, params.filter.actual_shape, params.filter.get_order_idx('out_c'), node.input[0], weights_node, bias_node, node.output[0], opts, dw_to_pw=convert_to_conv) in_dim = Dim.named_ordered(h=h, w=w, c=in_c) out_dims = params.get_output_size( [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])]) pout_dims = ProvisionalDim([in_b] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) params = cls.fuse_activation(node_opts, node.name, params, **kwargs) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def conv(cls, node, **kwargs): all_nodes = kwargs['all_nodes'] G = kwargs['G'] valid_name = kwargs['valid_name'] inputs = [all_nodes[inp] for inp in node.input] # input N x C x H x W x = inputs[0] x_rank = len(x[2].shape) x_shape = x[2].shape spatial_size = x_rank - 2 assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported" # M x C/group x kH x kW weights_node = inputs[1][0] weights_node.name = f'{valid_name}_weights' weights = cls.get_constant(inputs[1]) out_c = weights.shape[0] group = node.attrs.get("group", 1) in_c = x_shape[1] filt_in_c = in_c // group if in_c != weights.shape[1] * group: raise ValueError( f'node {valid_name} has incorrect input channel ' f'dimension {in_c} expecting {weights.shape[1] * group}') if spatial_size == 1: filt_w = weights.shape[-1] filt_h = 1 # create a new constant node since we are changing the shape weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w)) weights_node = ConstantInputParameters( f'{valid_name}_weights', value=weights, dims=Dim.unnamed(weights.shape), constant_store=G.constant_store) else: filt_h = weights.shape[-2] filt_w = weights.shape[-1] h = 1 if spatial_size == 1 else x_shape[-2] w = x_shape[-1] filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c) filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER) if len(inputs) > 2: biases_node = inputs[2][0] biases = cls.get_constant(inputs[2]) else: biases = np.zeros([out_c], dtype=np.float32) biases_node = ConstantInputParameters( f'{valid_name}_biases', value=biases, dims=Dim.unnamed(biases.shape), constant_store=G.constant_store) dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2) strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2) pad_dim = cls.calc_pad_dim(node, 4) params = Conv2DParameters(valid_name, filt=filt_dim, stride=StrideDim(strides[0], strides[1]), dilation=DilationDim(dilations[0], dilations[1]), groups=group, padding=pad_dim, has_bias=True, in_dims_hint=[['c', 'h', 'w'], cls.ONNX_FILTER_ORDER, ['c']], out_dims_hint=[['c', 'h', 'w']], constant_store=G.constant_store) in_dim = Dim.named_ordered(c=in_c, h=h, w=w) w_dim = Dim.named_ordered(out_c=out_c, in_c=filt_in_c, h=filt_h, w=filt_w) b_dim = Dim.named_ordered(c=out_c) out_dims = params.get_output_size([in_dim, w_dim, b_dim]) G.add_edge( NNEdge(from_node=weights_node, to_node=params, from_idx=0, to_idx=1)) G.add_edge( NNEdge(from_node=biases_node, to_node=params, from_idx=0, to_idx=2)) if spatial_size == 1: oned_in_shape = [in_c, w] twod_in_shape = [in_c, 1, w] oned_out_shape = [out_dims[0].c, out_dims[0].w] r1_params = ReshapeParameters(f'{valid_name}_reshape2d', old_shape=Dim.unnamed(oned_in_shape), shape=Dim.unnamed(twod_in_shape)) r2_params = ReshapeParameters(f'{valid_name}_reshape1d', old_shape=out_dims[0], shape=Dim.unnamed(oned_out_shape)) G.add_edge( NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0)) G.add_edge( NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0)) G.add_edge( NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0)) pout_dims = ProvisionalDim([x_shape[0]] + oned_out_shape) all_nodes[node.output[0]] = (r2_params, 0, pout_dims) return r2_params else: pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params