def add_mean(G, tensors, name, subgraph, op_name, op, load_tensors=False, dequantize=False): check(op.InputsLength() == 2,\ "Very odd " + str(op.InputsAsNumpy())) mean_dims = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=False) if len(mean_dims) != 2 or mean_dims[0] != 1 or mean_dims[1] != 2: LOG.warning( "MEAN operator seen but can't convert to global average pool") return add_unconverted(G, name, subgraph, op_name, op, load_tensors, dequantize) else: LOG.info("MEAN operator converted to global average pool") inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER) check(inp['n'] == 1, "Multi batch not supported") return add_node( G, PoolingParameters(name, filt=PoolFilterDim(inp['h'], inp['w']), stride=StrideDim(1, 1), padding=PadDim.valid(), pool_type="average", in_dims_hint=SparseList([['h', 'w', 'c']]), out_dims_hint=SparseList([['h', 'w', 'c']])))
def test_conf2d_q2(caplog): caplog.set_level(logging.INFO) weights_q = QType(16, 1, True) weights = weights_q.quantize(np.full([1, 1, 2, 2], 1.0)) filt = Conv2DFilterDim(2, 2, 1, 1) stride = StrideDim(1) pad = PadDim.valid() dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) in_q = QType(16, 0, True) calc_q = QType(weights_q.bits + in_q.bits, weights_q.q + in_q.q, True) qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[in_q], weights_q=weights_q, acc_q=calc_q, calc_q=calc_q) input_ = in_q.quantize(np.full([1, 2, 2], 1.0)) in_dims = Dim.named(c=1, h=2, w=2).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, qrec=qrec) output_ = in_q.dequantize(output_) assert np.array_equal(output_, [[[4.]]])
def calc_pad_dim(cls, node, expected_len): if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET": pad_dim = PadDim(*cls.mix_pads(node.attrs.get("pads", []))) elif node.attrs["auto_pad"] == "VALID": pad_dim = PadDim.valid() elif node.attrs["auto_pad"] == "SAME_UPPER": pad_dim = PadDim.same(same_type="balanced_left") elif node.attrs["auto_pad"] == "SAME_LOWER": pad_dim = PadDim.same(same_type="balanced_right") else: raise ValueError("bad pad type") return pad_dim
def calc_pad_dim(cls, node, spatial_size): pads = cls.pad_start_with(node.attrs.get("pads", [0, 0] * spatial_size), [0, 0], 2) if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET": pad_dim = PadDim(*pads) elif node.attrs["auto_pad"] == "VALID": pad_dim = PadDim.valid() elif node.attrs["auto_pad"] == "SAME_UPPER": pad_dim = PadDim.same(same_type="balanced_left") elif node.attrs["auto_pad"] == "SAME_LOWER": pad_dim = PadDim.same(same_type="balanced_right") else: raise ValueError("bad pad type") return pad_dim
def calc_shapes(node, spatial_size, input_size, kernel_shape): padding = expand_dim(node.attrs.get('pads', None), 4 - spatial_size * 2, 0) auto_pad = node.attrs.get('auto_pad', 'NOTSET') output_shape = expand_dim(node.attrs.get('output_shape', None), 2 - spatial_size, 1) output_padding = Dim2D(*expand_dim(node.attrs.get('output_padding', None), 2 - spatial_size, 0)) dilations = DilationDim(*expand_dim(node.attrs.get('dilations', None), 2 - spatial_size, 1)) strides = StrideDim(*expand_dim(node.attrs.get('strides', None), 2 - spatial_size, 1)) if output_shape: total_padding = strides * (input_size - 1) + output_padding + ((kernel_shape - 1) * dilations + 1) - output_shape if auto_pad == 'SAME_UPPER': pad_start = total_padding // 2 pad_end = total_padding - pad_start else: pad_end = total_padding // 2 pad_start = total_padding - pad_end padding = PadDim(pad_start.h, pad_end.h, pad_start.w, pad_end.w) elif auto_pad == 'NOTSET': assert padding, 'pads not set and auto_pad is NOTSET' padding = PadDim(*padding) elif auto_pad == 'VALID': padding = PadDim.valid() return padding, dilations, strides, output_padding
def test_conf2d_2_in_2_out_c(): weights = np.arange(4).reshape([1, 2, 2]) weights = np.append(weights, weights, axis=0) weights = np.append(weights, weights, axis=0) weights = weights.reshape([2, 2, 2, 2]) filt = Conv2DFilterDim(2, 2, 2, 2) stride = StrideDim(1) pad = PadDim.valid() dilation = DilationDim(1) params = Conv2DParameters("test", filt=filt, stride=stride, padding=pad, dilation=dilation, in_dims_hint=[['c', 'h', 'w']], out_dims_hint=[['c', 'h', 'w']]) input_ = np.arange(9).reshape([1, 3, 3]) input_ = np.append(input_, input_, axis=0) in_dims = Dim.named(c=2, h=3, w=3).impose_order(['c', 'h', 'w']) out_dims = params.get_output_size([in_dims]) output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None) assert np.array_equal(output_, [[[38., 50.], [74., 86.]],\ [[38., 50.], [74., 86.]]])
def get_tf_padding(padding): if padding == Padding.Padding.SAME: return PadDim.same() if padding == Padding.Padding.VALID: return PadDim.valid() raise ValueError("Strange padding type")