def is_commutative(eltwise_type): return EltwiseType(eltwise_type) in [ EltwiseType.SUM, EltwiseType.PROD, EltwiseType.MAX, EltwiseType.MIN ]
def convert_ops(self): print("Convert mace graph to hexagon.") for op in self._model.op: if not self._hexagon_ops.has_op(op.type): raise Exception('Unsupported op: ', op) for i in range(len(op.input)): if ':' not in op.input[i]: node_name = op.input[i] op.input[i] += ':0' if node_name in self._quantize_activation_info: self._quantize_activation_info[op.input[i]] = \ self._quantize_activation_info[node_name] if op.type == MaceOp.Conv2D.name \ or op.type == MaceOp.DepthwiseConv2d.name: channels = op.output_shape[0].dims[3] if len(op.input) < 3: print('Supernode requires biasadd, we add it.') bias_data = np.zeros(channels, dtype=int) bias_tensor = self._model.tensors.add() bias_tensor.data_type = mace_pb2.DT_INT32 bias_tensor.dims.extend([channels]) bias_tensor.int32_data.extend(bias_data) bias_tensor.minval = 0 bias_tensor.maxval = 0 bias_tensor.name = op.name + "/bias:0" bias = bias_tensor.name self._consts[bias] = bias_tensor else: bias = op.input.pop() self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) strides_arg = ConverterUtil.get_arg(op, 'strides') mace_check(strides_arg is not None, "Missing strides of Conv or Depthwise Conv.") strides = self.add_shape_const_node( op, [1, strides_arg.ints[0], strides_arg.ints[1], 1], MaceKeyword.mace_strides_str) op.input.extend([strides, bias]) self.add_min_max_const_node(op, bias) self.add_min_max_const_node(op, op.output[0], True, True, False) elif op.type == MaceOp.Eltwise.name: self.add_min_max_const_node(op, op.input[0]) self.add_min_max_const_node(op, op.input[1]) self.add_min_max_const_node(op, op.output[0], True, True, False) elif op.type == MaceOp.BatchToSpaceND.name \ or op.type == MaceOp.SpaceToBatchND.name: strides_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_space_batch_block_shape_str) strides_tensor = self._model.tensors.add() strides_tensor.name = op.name + '/strides:0' strides_tensor.data_type = mace_pb2.DT_INT32 strides_tensor.dims.extend([1, 1, 1, len(strides_arg.ints)]) strides_tensor.int32_data.extend(strides_arg.ints) if op.type == MaceOp.BatchToSpaceND.name: pad_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_batch_to_space_crops_str) else: pad_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_paddings_str) pad_tensor = self._model.tensors.add() pad_tensor.name = op.name + '/pad:0' pad_tensor.data_type = mace_pb2.DT_INT32 pad_tensor.dims.extend([1, 1, len(pad_arg.ints) / 2, 2]) pad_tensor.int32_data.extend(pad_arg.ints) op.input.extend([strides_tensor.name, pad_tensor.name]) self.add_min_max_const_node(op, op.input[0]) elif op.type == MaceOp.DepthToSpace.name \ or op.type == MaceOp.SpaceToDepth.name: size_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_space_depth_block_size_str) size_tensor = self._model.tensors.add() size_tensor.name = op.name + '/block_size:0' size_tensor.data_type = mace_pb2.DT_INT32 size_tensor.dims.extend([1]) size_tensor.int32_data.extend([size_arg.i]) op.input.extend([size_tensor.name]) self.add_min_max_const_node(op, op.input[0]) elif op.type == MaceOp.Pooling.name: self.add_min_max_const_node(op, op.input[0]) window_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str) window_tensor = self._model.tensors.add() window_tensor.name = op.name + '/window:0' window_tensor.data_type = mace_pb2.DT_INT32 window_tensor.dims.extend( [1, window_arg.ints[0], window_arg.ints[1], 1]) strides_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_strides_str) strides_tensor = self._model.tensors.add() strides_tensor.name = op.name + '/strides:0' strides_tensor.data_type = mace_pb2.DT_INT32 strides_tensor.dims.extend( [1, strides_arg.ints[0], strides_arg.ints[1], 1]) op.input.extend([window_tensor.name, strides_tensor.name]) elif op.type == MaceOp.Reduce.name: self.add_min_max_const_node(op, op.input[0]) reduce_type_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_reduce_type_str) mace_check(reduce_type_arg.i == ReduceType.MEAN.value, "Hexagon Reduce only supports Mean now.") keep_dims_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_keepdims_str) mace_check(keep_dims_arg.i == 1, "Hexagon Reduce Mean only supports keep dims now.") axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str) mace_check(1 <= len(axis_arg.ints) <= 2, "Hexagon Reduce Mean only supports spatial now.") for i in axis_arg.ints: mace_check( 1 <= i <= 2, "Hexagon Reduce Mean only supports spatial now") producer_op_name, _ = get_op_and_port_from_tensor(op.input[0]) input_dims = None for producer_op in self._model.op: if producer_op.name == producer_op_name: input_dims = producer_op.output_shape[0].dims break mace_check(input_dims is not None, "Missing input shape.") window_tensor = self._model.tensors.add() window_tensor.name = op.name + '/window:0' window_tensor.data_type = mace_pb2.DT_INT32 if len(axis_arg.ints) == 1: dim1, dim2 = (input_dims[1], 1) \ if axis_arg.ints[0] == 1 else (1, input_dims[2]) else: dim1, dim2 = input_dims[1], input_dims[2] window_tensor.dims.extend([1, dim1, dim2, 1]) strides_tensor = self._model.tensors.add() strides_tensor.name = op.name + '/strides:0' strides_tensor.data_type = mace_pb2.DT_INT32 strides_tensor.dims.extend([1, dim1, dim2, 1]) op.input.extend([window_tensor.name, strides_tensor.name]) elif op.type == MaceOp.ResizeBilinear.name: newdim_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_resize_size_str) newdim_tensor = self._model.tensors.add() newdim_tensor.name = op.name + '/newdim:0' newdim_tensor.data_type = mace_pb2.DT_INT32 newdim_tensor.dims.extend([len(newdim_arg.ints)]) newdim_tensor.int32_data.extend(newdim_arg.ints) op.input.extend([newdim_tensor.name]) self.add_min_max_const_node(op, op.input[0]) align_corners_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_align_corners_str) align_corners_tensor = self._model.tensors.add() align_corners_tensor.name = op.name + '/align_corners:0' align_corners_tensor.data_type = mace_pb2.DT_INT32 align_corners_tensor.dims.extend([1]) align_corners_tensor.int32_data.extend([align_corners_arg.i]) op.input.extend([align_corners_tensor.name]) elif op.type == MaceOp.Concat.name: inputs = copy.deepcopy(op.input) for ipt in inputs: self.add_min_max_const_node(op, ipt, True, False) for ipt in inputs: self.add_min_max_const_node(op, ipt, False, True) dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str) dim_tensor = self._model.tensors.add() dim_tensor.name = op.name + '/dim:0' dim_tensor.data_type = mace_pb2.DT_INT32 dim_tensor.dims.extend([1]) dim_tensor.int32_data.extend([dim_arg.i]) op.input.insert(0, dim_tensor.name) elif op.type in [MaceOp.Softmax.name, MaceOp.Dequantize.name]: self.add_min_max_const_node(op, op.input[0]) if op.type != MaceOp.Dequantize.name: min_output_shape = op.output_shape.add() min_output_shape.dims.extend([1]) max_output_shape = op.output_shape.add() max_output_shape.dims.extend([1]) op.output_type.extend( [mace_pb2.DT_UINT8, mace_pb2.DT_FLOAT, mace_pb2.DT_FLOAT]) for i in range(len(op.output_shape)): out_max_byte_size = reduce(mul, op.output_shape[i].dims) if op.output_type[i] == mace_pb2.DT_FLOAT: out_max_byte_size *= 4 op.out_max_byte_size.extend([out_max_byte_size]) op.padding = padding_mode[PaddingMode.NA] arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str) if arg is not None: op.padding = padding_mode[PaddingMode(arg.i)] if op.type == MaceOp.Eltwise.name: element_type = \ ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i if element_type == EltwiseType.SUM.value: op.type = HexagonOp.QuantizedAdd_8p8to8.name elif element_type == EltwiseType.SUB.value: op.type = HexagonOp.QuantizedSub_8p8to8.name else: mace_check( False, "Hexagon does not support eltmentwise %s" % EltwiseType(element_type).name) elif op.type == MaceOp.Pooling.name: pooling_type_arg = ConverterUtil.get_arg( op, MaceKeyword.mace_pooling_type_str) if PoolingType(pooling_type_arg.i) == PoolingType.AVG: op.type = HexagonOp.QuantizedAvgPool_8.name else: op.type = HexagonOp.QuantizedMaxPool_8.name else: op.type = self._hexagon_ops.map_nn_op(op.type)