def convert_resizebilinear(self, op): resize_size_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_resize_size_str) if resize_size_arg is not None: newdim = resize_size_arg.ints else: height_scale_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_height_scale_str) width_scale_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_width_scale_str) mace_check( height_scale_arg is not None and width_scale_arg is not None, "Wrong ResizeBilinear arguments.") if len(op.input) == 2: op.input.pop() height_scale = height_scale_arg.f width_scale = width_scale_arg.f producer_op = self._producers[op.input[0]] for i in range(len(producer_op.output)): if producer_op.output[i] == op.input[0]: input_shape = producer_op.output_shape[i] break newdim = [ int(height_scale * input_shape.dims[1]), int(width_scale * input_shape.dims[2]) ] self.add_arg_const_node(op, '/newdim:0', [2], newdim) self.add_min_max_const_node(op, op.input[0]) self.add_resize_args(op) op.type = HexagonOp.QuantizedResizeBilinear_8.name
def convert_resizenearestneighbor(self, op): height_scale_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_height_scale_str) width_scale_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_width_scale_str) if height_scale_arg is not None: mace_check( width_scale_arg is not None, "height scale and width scale should be present at the same time." ) # noqa if len(op.input) == 2: op.input.pop() height_scale = height_scale_arg.f width_scale = width_scale_arg.f producer_op = self._producers[op.input[0]] for i in range(len(producer_op.output)): if producer_op.output[i] == op.input[0]: input_shape = producer_op.output_shape[i] break newdim = [ int(height_scale * input_shape.dims[1]), int(width_scale * input_shape.dims[2]) ] self.add_arg_const_node(op, '/newdim:0', [2], newdim) self.add_min_max_const_node(op, op.input[0]) self.add_resize_args(op) op.type = HexagonOp.ResizeNearestNeighbor_8.name
def infer_shape_argmax(self, op): input_shape = self._output_shape_cache[op.input[0]] output_dim_num = len(input_shape) if output_dim_num < 3: output_dim_num = 3 axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str) has_axis = (axis_arg is not None) axis_value = 0 if has_axis: axis_value = axis_arg.i if axis_value < 0: axis_value = len(input_shape) + axis_value top_k = ConverterUtil.get_arg(op, MaceKeyword.mace_top_k_str).i mace_check(top_k >= 1, "Invalid top_k value") out_val = ConverterUtil.get_arg(op, MaceKeyword.mace_out_val_str).i if has_axis: # Produces max_ind or max_val per axis output_shape = input_shape output_shape[axis_value] = top_k else: output_shape = [1] * output_dim_num output_shape[0] = input_shape[0] output_shape[2] = top_k if out_val: # Produces max_ind and max_val output_shape[1] = 2 self.add_output_shape(op, [output_shape])
def convert_reduce(self, op): self.add_min_max_const_node(op, op.input[0]) reduce_type_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_reduce_type_str) mace_check(reduce_type_arg.i == ReduceType.MEAN.value, "Hexagon Reduce only supports Mean now.") keep_dims_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_keepdims_str) mace_check(keep_dims_arg.i == 1, "Hexagon Reduce Mean only supports keep dims now.") axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str) mace_check(1 <= len(axis_arg.ints) <= 2, "Hexagon Reduce Mean only supports spatial now.") for i in axis_arg.ints: mace_check(1 <= i <= 2, "Hexagon Reduce Mean only supports spatial now") producer_op_name, _ = get_op_and_port_from_tensor(op.input[0]) input_dims = None for producer_op in self._model.op: if producer_op.name == producer_op_name: input_dims = producer_op.output_shape[0].dims break mace_check(input_dims is not None, "Missing input shape.") if len(axis_arg.ints) == 1: dim1, dim2 = (input_dims[1], 1) \ if axis_arg.ints[0] == 1 else (1, input_dims[2]) else: dim1, dim2 = input_dims[1], input_dims[2] self.add_arg_const_node(op, '/window:0', [1, dim1, dim2, 1]) self.add_arg_const_node(op, '/strides:0', [1, dim1, dim2, 1]) op.type = HexagonOp.QuantizedAvgPool_8.name
def convert_conv2d(self, op): if len(op.input) < 3: bias = self.add_bias(op) else: bias = op.input.pop() self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) strides_arg = ConverterUtil.get_arg(op, 'strides') mace_check(strides_arg is not None, "Missing strides of Conv or Depthwise Conv.") self.add_arg_const_node( op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1]) op.input.append(bias) self.add_min_max_const_node(op, bias) self.add_min_max_const_node(op, op.output[0], True, True, False) self.add_padding_type_for_conv_pooling(op, self._consts[op.input[1]].dims, strides_arg.ints) dilations_arg = ConverterUtil.get_arg(op, 'dilations') mace_check( dilations_arg is None or (dilations_arg.ints[0] == 1 and dilations_arg.ints[1] == 1), "Hexagon only support dilations[1,1].") if op.type == MaceOp.DepthwiseConv2d.name: op.type = HexagonOp.DepthwiseSupernode_8x8p32to8.name else: op.type = HexagonOp.Supernode_8x8p32to8.name
def ensure_binary_input(self): for _op in self._model.op: if _op.type != MaceOp.Eltwise.name: continue if len(_op.input) != 1: continue eltwise_type = ConverterUtil.get_arg( _op, MaceKeyword.mace_element_type_str).i if eltwise_type != EltwiseType.SUM.value and \ eltwise_type != EltwiseType.PROD.value: continue float_value_arg = ConverterUtil.get_arg( _op, MaceKeyword.mace_scalar_input_str) mace_check( float_value_arg.f is not None, _op.name + ': ' + MaceKeyword.mace_scalar_input_str + ' value float should not be None') scalar = float_value_arg.f const_tensor = self._model.tensors.add() const_tensor.name = _op.name + '/' + \ MaceKeyword.mace_scalar_input_str + ':0' const_tensor.dims.extend([1]) const_tensor.data_type = _op.output_type[0] if _op.output_type[0] == mace_pb2.DT_UINT8 or \ _op.output_type[0] == mace_pb2.DT_INT16: const_tensor.scale = scalar const_tensor.zero_point = 0 const_tensor.quantized = True const_tensor.int32_data.extend([1]) elif _op.output_type[0] == mace_pb2.DT_FLOAT: const_tensor.float_data.extend([scalar]) _op.input.extend([const_tensor.name]) ConverterUtil.del_arg(_op, MaceKeyword.mace_scalar_input_str) ConverterUtil.del_arg(_op, MaceKeyword.mace_scalar_input_index_str)
def convert_reduce(self, op): self.add_min_max_const_node(op, op.input[0]) reduce_type_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_reduce_type_str) mace_check(reduce_type_arg.i == ReduceType.MEAN.value, "Hexagon Reduce only supports Mean now.") keep_dims_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_keepdims_str) mace_check(keep_dims_arg.i == 1, "Hexagon Reduce Mean only supports keep dims now.") axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str) mace_check(1 <= len(axis_arg.ints) <= 2, "Hexagon Reduce Mean only supports spatial now.") for i in axis_arg.ints: mace_check(1 <= i <= 2, "Hexagon Reduce Mean only supports spatial now") input_shape = get_input_shape(op.input[0], self._model) if len(axis_arg.ints) == 1: dim1, dim2 = (input_shape[1], 1) \ if axis_arg.ints[0] == 1 else (1, input_shape[2]) else: dim1, dim2 = input_shape[1], input_shape[2] self.add_arg_const_node(op, '/window:0', [1, dim1, dim2, 1]) self.add_arg_const_node(op, '/strides:0', [1, dim1, dim2, 1]) op.type = HexagonOp.QuantizedAvgPool_8.name
def infer_shape_matmul(self, op): lhs_shape = self._output_shape_cache[op.input[0]] lhs_rank = len(lhs_shape) lhs_rows = lhs_shape[-2] lhs_cols = lhs_shape[-1] rhs_shape = self._output_shape_cache[op.input[1]] rhs_rank = len(rhs_shape) rhs_rows = rhs_shape[-2] rhs_cols = rhs_shape[-1] transpose_a_ = ConverterUtil.get_arg( op, MaceKeyword.mace_transpose_a_str).i transpose_b_ = ConverterUtil.get_arg( op, MaceKeyword.mace_transpose_b_str).i rows = lhs_cols if transpose_a_ else lhs_rows cols = rhs_rows if transpose_b_ else rhs_cols if lhs_rank >= rhs_rank: if lhs_rank > rhs_rank: mace_check( rhs_rank == 2, 'The rhs rank of non-batched MatMul must be 2') # noqa output_shape = lhs_shape.copy() output_shape[lhs_rank - 2] = rows output_shape[lhs_rank - 1] = cols else: output_shape = rhs_shape.copy() output_shape[rhs_rank - 2] = rows output_shape[rhs_rank - 1] = cols self.add_output_shape(op, [output_shape])
def add_tensorflow_padding_value(self): for op in self._model.op: padding_type = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str) if padding_type is None: continue padding_arg = op.arg.add() padding_arg.name = MaceKeyword.mace_padding_values_str if padding_type.i == PaddingMode.VALID.value: padding_arg.ints.extend([0, 0, 0, 0]) elif padding_type.i == PaddingMode.SAME.value: stride = ConverterUtil.get_arg( op, MaceKeyword.mace_strides_str).ints kernel = [] dilation = [1, 1] if op.type == MaceOp.Conv2D.name or \ op.type == MaceOp.DepthwiseConv2d.name or \ op.type == MaceOp.Deconv2D.name: if ConverterUtil.get_arg( op, MaceKeyword.mace_dilations_str) is not None: dilation = ConverterUtil.get_arg( op, MaceKeyword.mace_dilations_str).ints for tensor in self._model.tensors: if tensor.name == op.input[1]: kernel = tensor.dims[1:3] break else: kernel = ConverterUtil.get_arg( op, MaceKeyword.mace_kernel_str).ints in_size = [] for input_info in self._model.input_info: if input_info.name == op.input[0]: in_size = input_info.dims[1:3] break for _op in self._model.op: for out in _op.output: if out == op.input[0]: in_size = _op.output_shape[0].dims[1:3] break if len(in_size) > 0: break out_size = op.output_shape[0].dims[1:3] if (op.type == MaceOp.Deconv2D.name): h = (in_size[0] - 1) * stride[0] + kernel[0] - out_size[0] w = (in_size[1] - 1) * stride[1] + kernel[1] - out_size[1] else: h = (out_size[0] - 1) * stride[0] \ + ((kernel[0] - 1) * dilation[0] + 1) - in_size[0] w = (out_size[1] - 1) * stride[1] \ + ((kernel[1] - 1) * dilation[1] + 1) - in_size[1] top = int(np.floor(h / 2)) left = int(np.floor(w / 2)) bottom = h - top right = w - left padding_arg.ints.extend([top, right, bottom, left])
def convert_elementwise(self, op): element_type = ConverterUtil.get_arg( op, MaceKeyword.mace_element_type_str).i if element_type == EltwiseType.DIV.value and \ op.input[0] in self._consts: tensor = self._consts[op.input[0]] if len(tensor.int32_data) == 1: f = tensor.scale * (tensor.int32_data[0] - tensor.zero_point) if abs(f - 1) < 1e-6: # recip op_input = op.input[1] del op.input[:] op.input.append(op_input) self.add_min_max_const_node(op, op.input[0]) op.type = HexagonOp.QuantizedRecip_8.name return if element_type == EltwiseType.POW.value and \ ConverterUtil.get_arg( op, MaceKeyword.mace_scalar_input_str).f == 0.5: self.add_min_max_const_node(op, op.input[0]) op.type = HexagonOp.QuantizedSqrt_8.name return if element_type == EltwiseType.CLIP.value: self.add_min_max_const_node(op, op.input[0]) coeff = ConverterUtil.get_arg(op, MaceKeyword.mace_coeff_str).floats min_value, max_value = coeff[0], coeff[1] self.add_arg_const_node(op, "/min:0", [1], [min_value], data_type=mace_pb2.DT_FLOAT) self.add_arg_const_node(op, "/max:0", [1], [max_value], data_type=mace_pb2.DT_FLOAT) op.type = HexagonOp.QuantizedClamp_8.name return if len(op.input) == 1: scalar_input = ConverterUtil.get_arg( op, MaceKeyword.mace_scalar_input_str).f self.add_quantized_scalar_const_node("/b:0", scalar_input, op) self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) if element_type in [ EltwiseType.SUM.value, EltwiseType.SUB.value, EltwiseType.MIN.value, EltwiseType.MAX.value, EltwiseType.DIV.value ]: self.add_min_max_const_node(op, op.output[0], True, True, False) try: op.type = self.eltwise_type[element_type] except KeyError: mace_check( False, "Hexagon does not support elementwise %s" % EltwiseType(element_type).name)
def convert_stridedslice(self, op): begin_mask = ConverterUtil.get_arg(op, MaceKeyword.mace_begin_mask_str).i end_mask = ConverterUtil.get_arg(op, MaceKeyword.mace_end_mask_str).i shrink_mask = ConverterUtil.get_arg( op, MaceKeyword.mace_shrink_axis_mask_str).i self.add_arg_const_node(op, "/begin_mask:0", [1], [begin_mask]) self.add_arg_const_node(op, "/end_mask:0", [1], [end_mask]) self.add_arg_const_node(op, "/shrink_mask:0", [1], [shrink_mask]) self.add_min_max_const_node(op, op.input[0]) op.type = HexagonOp.QuantizedStridedSlice_8.name
def add_deconv_pad_node(self, op): padding_type_arg = \ ConverterUtil.get_arg(op, MaceKeyword.mace_padding_type_str) padding_values_arg = \ ConverterUtil.get_arg(op, MaceKeyword.mace_padding_values_str) mace_check( padding_type_arg is not None or padding_values_arg is not None, "Missing padding of Deconv.") if padding_type_arg is not None: padding_type = PaddingMode(padding_type_arg.i) strides_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str) mace_check(strides_arg is not None, "Missing strides of Deconv.") stride_h = strides_arg.ints[0] stride_w = strides_arg.ints[1] input_shape = self.get_input_shape(op.input[0]) input_h = input_shape[1] input_w = input_shape[2] filter_tensor = self._consts[op.input[1]] filter_h = filter_tensor.dims[1] filter_w = filter_tensor.dims[2] output_h = op.output_shape[0].dims[1] output_w = op.output_shape[0].dims[2] if padding_type == PaddingMode.VALID: expected_input_h = (output_h - filter_h + stride_h) // stride_h expected_input_w = (output_w - filter_w + stride_w) // stride_w elif padding_type == PaddingMode.SAME: expected_input_h = (output_h + stride_h - 1) // stride_h expected_input_w = (output_w + stride_w - 1) // stride_w else: raise Exception( 'Hexagon deconv does not support padding type: ', padding_type) mace_check(expected_input_h == input_h, "Wrong input/output height") mace_check(expected_input_w == input_w, "Wrong input/output width") pad_h = (input_h - 1) * stride_h + filter_h - output_h pad_w = (input_w - 1) * stride_w + filter_w - output_w else: pad_h = padding_values_arg.ints[0] pad_w = padding_values_arg.ints[1] pad_h, pad_w = max(pad_h, 0), max(pad_w, 0) pad_top = pad_h // 2 pad_bottom = pad_h - pad_top pad_left = pad_w // 2 pad_right = pad_w - pad_left paddings = [pad_top, pad_bottom, pad_left, pad_right] self.add_arg_const_node(op, "/paddings:0", [1, 1, 2, 2], paddings)
def convert_resizebilinear(self, op): newdim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_resize_size_str) self.add_arg_const_node(op, '/newdim:0', [len(newdim_arg.ints)], newdim_arg.ints) self.add_min_max_const_node(op, op.input[0]) align_corners_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_align_corners_str) self.add_arg_const_node(op, '/align_corners:0', [1], [align_corners_arg.i]) op.type = HexagonOp.QuantizedResizeBilinear_8.name
def convert_activation(self, op): self.add_min_max_const_node(op, op.input[0]) act_type = ConverterUtil.get_arg( op, MaceKeyword.mace_activation_type_str).s.decode() if act_type == ActivationType.RELUX.name: x = ConverterUtil.get_arg( op, MaceKeyword.mace_activation_max_limit_str).f self.add_scalar_const_node("/x:0", x, op) try: op.type = self.activation_type[act_type] except KeyError: mace_check(False, "Hexagon does not support activation %s" % act_type)
def add_resize_args(self, op): align_corners_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_align_corners_str) self.add_arg_const_node(op, '/align_corners:0', [1], [align_corners_arg.i]) coordinate_transformation_mode_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_coordinate_transformation_mode_str) if coordinate_transformation_mode_arg is not None: name = CoordinateTransformationMode( coordinate_transformation_mode_arg.i) value = coordinate_transformation_mode_arg.i mace_check(value == CoordinateTransformationMode.HALF_PIXEL.value, "Hexagon does not support resize %s" % name) self.add_arg_const_node(op, '/half_pixel_centers:0', [1], [1])
def infer_shape_prior_box(self, op): output_shape = [1, 2, 1] input_shape = list(self._output_shape_cache[op.input[0]]) input_w = input_shape[3] input_h = input_shape[2] min_size = ConverterUtil.get_arg( op, MaceKeyword.mace_min_size_str).floats # noqa max_size = ConverterUtil.get_arg( op, MaceKeyword.mace_max_size_str).floats # noqa aspect_ratio = ConverterUtil.get_arg( op, MaceKeyword.mace_aspect_ratio_str).floats # noqa num_prior = len(aspect_ratio) * len(min_size) + len(max_size) output_shape[2] = num_prior * input_h * input_w * 4 self.add_output_shape(op, [output_shape])
def init_multi_net_def_info(self, multi_net_def): netdefs = multi_net_def.net_def self.net_num = len(netdefs) self.net_defs = [None] * self.net_num self.net_op_nums = [0] * self.net_num self.quantizes = [False] * self.net_num self.hexagons = [False] * self.net_num for net_def in netdefs: order = net_def.infer_order self.net_defs[order] = net_def self.net_op_nums[order] = len(net_def.op) is_quantize = ConverterUtil.get_arg( net_def, MaceKeyword.mace_quantize_flag_arg_str) self.quantizes[order] = \ False if is_quantize is None else is_quantize.i == 1 self.hexagons[order] = \ self.quantizes[order] and \ (net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name or net_def.op[-1].type == HexagonOp.OUTPUT.name) self.end_index = self.start_index = 0 for op_num in self.net_op_nums: self.end_index = self.end_index + op_num self.start_net_idx = 0 self.start_op_idx = 0 self.end_net_idx = self.net_num self.end_op_idx = self.net_op_nums[self.end_net_idx - 1]
def run(self): if self._option.quantize: self.use_quant_in_out() self.add_op_output_type() self.ensure_bias_vector() self.ensure_binary_input() self.common_check() if ConverterUtil.get_arg(self._model, MaceKeyword.mace_framework_type_str).i == \ FrameworkType.TENSORFLOW.value: self.add_tensorflow_padding_value() # Calculate the number of apu constant tensors # Any tensors which will be apu constant tensors should be added # above this line const_data_num_arg = self._model.arg.add() const_data_num_arg.name = MaceKeyword.mace_const_data_num_arg_str const_data_num_arg.i = len(self._model.tensors) apu_data_type_arg = self._model.arg.add() apu_data_type_arg.name = MaceKeyword.mace_apu_data_type_arg_str if self._option.quantize_schema == 'mace_apu_16bit_per_tensor': apu_data_type_arg.i = mace_pb2.DT_INT16 elif self._option.quantize: apu_data_type_arg.i = mace_pb2.DT_UINT8 else: apu_data_type_arg.i = mace_pb2.DT_FLOAT self.convert_ops() self.add_node_id() return self._model
def __init__(self, option, model, quantize_activation_info): self._option = option self._model = model self._new_ops = [] self._consts = {} self._producers = {} self._quantize_activation_info = quantize_activation_info self._op_converters = { MaceOp.Activation.name: self.convert_activation, MaceOp.BatchNorm.name: self.convert_batchnorm, MaceOp.BatchToSpaceND.name: self.convert_batchspace, MaceOp.Concat.name: self.convert_concat, MaceOp.Conv2D.name: self.convert_conv2d, MaceOp.Deconv2D.name: self.convert_deconv2d, MaceOp.DepthToSpace.name: self.convert_depthspace, MaceOp.DepthwiseConv2d.name: self.convert_conv2d, MaceOp.Dequantize.name: self.convert_dequantize, MaceOp.Eltwise.name: self.convert_elementwise, MaceOp.ExpandDims.name: self.convert_expanddims, MaceOp.FullyConnected.name: self.convert_fullyconnected, MaceOp.Pad.name: self.convert_pad, MaceOp.Pooling.name: self.convert_pooling, MaceOp.Quantize.name: self.convert_quantize, MaceOp.Reduce.name: self.convert_reduce, MaceOp.ResizeBilinear.name: self.convert_resizebilinear, MaceOp.ResizeNearestNeighbor.name: self.convert_resizenearestneighbor, MaceOp.Softmax.name: self.convert_softmax, MaceOp.Split.name: self.convert_split, MaceOp.StridedSlice.name: self.convert_stridedslice, MaceOp.SpaceToBatchND.name: self.convert_batchspace, MaceOp.SpaceToDepth.name: self.convert_depthspace, } self._framework_type = ConverterUtil.get_arg( self._model, MaceKeyword.mace_framework_type_str).i
def add_deconv_pad_node(self, op): padding_type_arg = \ ConverterUtil.get_arg(op, MaceKeyword.mace_padding_type_str) mace_check(padding_type_arg is not None, "Missing padding of Deconv.") padding_type = PaddingMode(padding_type_arg.i) filter_tensor = self._consts[op.input[1]] filter_height = filter_tensor.dims[1] filter_width = filter_tensor.dims[2] if padding_type == PaddingMode.VALID: paddings = [0, 0, 0, 0] elif padding_type == PaddingMode.SAME: pad_height, pad_width = filter_height // 2, filter_width // 2 paddings = [pad_height, pad_height, pad_width, pad_width] else: raise Exception('Hexagon deconv does not support padding type: ', padding_type) padding_tensor = self._model.tensors.add() padding_tensor.name = op.name + "/paddings:0" padding_tensor.data_type = mace_pb2.DT_INT32 padding_tensor.dims.extend([1, 1, 2, 2]) padding_tensor.int32_data.extend(paddings) self._consts[padding_tensor.name] = padding_tensor op.input.append(padding_tensor.name)
def convert_deconv2d(self, op): if self._framework_type == FrameworkType.TENSORFLOW.value: if len(op.input) < 4: bias = self.add_bias(op) else: bias = op.input.pop() op.input.pop() # output shape else: if len(op.input) < 3: bias = self.add_bias(op) else: bias = op.input.pop() self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) self.add_deconv_pad_node(op) strides_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str) mace_check(strides_arg is not None, "Missing strides of Deconv.") self.add_arg_const_node( op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1]) op.input.append(bias) self.add_min_max_const_node(op, bias) self.add_min_max_const_node(op, op.output[0], True, True, False) op.type = HexagonOp.QuantizedTransposeConv2d_8x8p32to8.name
def convert_pad(self, op): self.add_min_max_const_node(op, op.input[0]) paddings = ConverterUtil.get_arg( op, MaceKeyword.mace_paddings_str).ints self.add_arg_const_node( op, '/paddings:0', [1, 1, len(paddings) // 2, 2], paddings) pad_type = ConverterUtil.get_arg(op, MaceKeyword.mace_pad_type_str).i mace_check(pad_type == PadType.CONSTANT.value, "Hexagon only supports constant pad") constant_value = ConverterUtil.get_arg( op, MaceKeyword.mace_constant_value_str).f self.add_scalar_const_node('/constant_value:0', constant_value, op) op.type = HexagonOp.QuantizedPad_8.name
def convert_deconv2d(self, op): channels = op.output_shape[0].dims[3] if len(op.input) < 4: print('Hexagon deconv requires biasadd, we add it.') bias_data = np.zeros(channels, dtype=int) bias_tensor = self._model.tensors.add() bias_tensor.data_type = mace_pb2.DT_INT32 bias_tensor.dims.extend([channels]) bias_tensor.int32_data.extend(bias_data) bias_tensor.minval = 0 bias_tensor.maxval = 0 bias_tensor.name = op.name + "/bias:0" bias = bias_tensor.name self._consts[bias] = bias_tensor else: bias = op.input.pop() op.input.pop() # output shape self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) self.add_deconv_pad_node(op) strides_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str) mace_check(strides_arg is not None, "Missing strides of Deconv.") self.add_arg_const_node( op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1]) op.input.append(bias) self.add_min_max_const_node(op, bias) self.add_min_max_const_node(op, op.output[0], True, True, False) op.type = HexagonOp.QuantizedTransposeConv2d_8x8p32to8.name
def convert_pooling(self, op): self.add_min_max_const_node(op, op.input[0]) window_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str) self.add_arg_const_node(op, '/window:0', [1, window_arg.ints[0], window_arg.ints[1], 1]) strides_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str) self.add_arg_const_node( op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1]) pooling_type_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_pooling_type_str) if PoolingType(pooling_type_arg.i) == PoolingType.AVG: op.type = HexagonOp.QuantizedAvgPool_8.name else: op.type = HexagonOp.QuantizedMaxPool_8.name
def convert_conv2d(self, op): channels = op.output_shape[0].dims[3] if len(op.input) < 3: print('Supernode requires biasadd, we add it.') bias_data = np.zeros(channels, dtype=int) bias_tensor = self._model.tensors.add() bias_tensor.data_type = mace_pb2.DT_INT32 bias_tensor.dims.extend([channels]) bias_tensor.int32_data.extend(bias_data) bias_tensor.minval = 0 bias_tensor.maxval = 0 bias_tensor.name = op.name + "/bias:0" bias = bias_tensor.name self._consts[bias] = bias_tensor else: bias = op.input.pop() self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) strides_arg = ConverterUtil.get_arg(op, 'strides') mace_check(strides_arg is not None, "Missing strides of Conv or Depthwise Conv.") self.add_arg_const_node( op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1]) op.input.append(bias) self.add_min_max_const_node(op, bias) self.add_min_max_const_node(op, op.output[0], True, True, False) if op.type == MaceOp.DepthwiseConv2d.name: op.type = HexagonOp.DepthwiseSupernode_8x8p32to8.name else: op.type = HexagonOp.Supernode_8x8p32to8.name
def convert_filters_format(self): arg_format = ConverterUtil.get_arg(self.net_def, MaceKeyword.mace_filter_format_str) if (arg_format.i == DataFormat.OHWI.value): return mace_check(arg_format.i == DataFormat.OIHW.value, "Invalid model") arg_format.i = DataFormat.OHWI.value transposed_filter = set() for op in self.net_def.op: # OIHW => OHWI if (op.type == MaceOp.Conv2D.name or op.type == MaceOp.DepthwiseConv2d.name or op.type == MaceOp.FullyConnected.name) and \ op.input[1] not in transposed_filter: print("transform filter: %s" % op.type) filter = self._consts[op.input[1]] tensor_data = np.frombuffer(self.weight_bytes, self.data_type, filter.data_size, filter.offset) filter_data = np.array(tensor_data).reshape(filter.dims) \ .transpose(0, 2, 3, 1) filter_bytes = np.array(filter_data).tobytes() slice_end = filter.offset + len(filter_bytes) self.model_weights[filter.offset:slice_end] = filter_bytes filter.dims[:] = filter_data.shape transposed_filter.add(op.input[1])
def infer_shape_slice(self, op): output_shape = self._output_shape_cache[op.input[0]] axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i output_shape[axis] /= len(op.output) output_shapes = [] for _ in op.output: output_shapes.append(output_shape) self.add_output_shape(op, output_shapes)
def infer_shape_transpose(self, op): input_shape = self._output_shape_cache[op.input[0]] output_shape = np.zeros(len(input_shape), dtype=np.int32) dims_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dims_str) dims_ints = dims_arg.ints for idx in range(len(dims_ints)): output_shape[idx] = input_shape[dims_ints[idx]] self.add_output_shape(op, [output_shape])
def infer_shape_crop(self, op): mace_check(len(op.input) == 2, "crop layer needs two inputs") output_shape = self._output_shape_cache[op.input[0]] input1_shape = self._output_shape_cache[op.input[1]] offsets = ConverterUtil.get_arg(op, MaceKeyword.mace_offset_str).ints for i in range(len(offsets)): if offsets[i] >= 0: output_shape[i] = input1_shape[i] self.add_output_shape(op, [output_shape])
def convert_instancenorm(self, op): affine = ConverterUtil.get_arg(op, MaceKeyword.mace_affine_str).i if not affine: del op.input[1:] self.add_min_max_const_node(op, op.input[0]) op.type = HexagonOp.QuantizedInstanceNorm_8.name else: mace_check(False, "Hexagon does not support instancenorm with affine")