def convert_general_op(self, tf_op): op = self._mace_net_def.op.add() op.name = tf_op.name op.type = tf_op.type op.input.extend([tf_input.name for tf_input in tf_op.inputs]) op.output.extend([tf_output.name for tf_output in tf_op.outputs]) for tf_output in tf_op.outputs: output_shape = op.output_shape.add() self.infer_tensor_shape(tf_output, output_shape) data_type_arg = op.arg.add() data_type_arg.name = 'T' try: dtype = tf_op.get_attr('T') if dtype == tf.int32 or dtype == tf.bool: data_type_arg.i = mace_pb2.DT_INT32 elif dtype == tf.float32: data_type_arg.i = self._option.data_type else: mace_check(False, "data type %s not supported" % dtype) except ValueError: try: dtype = tf_op.get_attr('SrcT') if dtype == tf.int32 or dtype == tf.bool: data_type_arg.i = mace_pb2.DT_INT32 elif dtype == tf.float32: data_type_arg.i = self._option.data_type else: mace_check(False, "data type %s not supported" % dtype) except ValueError: data_type_arg.i = self._option.data_type framework_type_arg = op.arg.add() framework_type_arg.name = MaceKeyword.mace_framework_type_str framework_type_arg.i = FrameworkType.TENSORFLOW.value ConverterUtil.add_data_format_arg(op, DataFormat.NHWC) return op
def __init__(self, option, src_model_file): self._op_converters = { MGEOpType.AxisAddRemove.name: self.convert_axisaddrm, MGEOpType.BatchNormForward.name: self.convert_batchnorm, MGEOpType.Concat.name: self.convert_concat, MGEOpType.ConvolutionForward.name: self.convert_conv2d, MGEOpType.ConvolutionBackwardData.name: self.convert_deconv2d, MGEOpType.Dimshuffle.name: self.convert_dimshuffle, MGEOpType.Elemwise.name: self.convert_elemwise, MGEOpType.GetVarShape.name: self.convert_shape, MGEOpType.Host2DeviceCopy.name: self.convert_nop, MGEOpType.Identity.name: self.convert_identity, MGEOpType.MarkNoBroadcastElemwise.name: self.convert_identity, MGEOpType.MatrixMul.name: self.convert_matmul, MGEOpType.PoolingForward.name: self.convert_pooling, MGEOpType.Reduce.name: self.convert_reduce, MGEOpType.Reshape.name: self.convert_reshape, MGEOpType.SharedDeviceTensor.name: self.convert_nop, MGEOpType.Subtensor.name: self.convert_subtensor, } self._option = option self._converter_info = dict() self._mace_net_def = mace_pb2.NetDef() ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW) ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW) cg, _, outputs = mgb.load_comp_graph_from_file(src_model_file) map_oprs, _, var2oprs, *_ = mgb.cgtools.graph_traversal(outputs) # prune second input of reshape # because it introduces several ops, may increase the overhead operators = mgb.cgtools.get_oprs_seq(outputs, prune_reshape=True) self._mge_cg = cg self._mge_operators = operators self._mge_map_oprs = map_oprs self._mge_var2oprs = var2oprs self._skip_tensors = set() self._bn_statistis_tensors = {}
def __init__(self, option, src_model_file): self._op_converters = { keras.layers.InputLayer: self.convert_input_layer, keras.layers.Flatten: self.convert_flatten, keras.layers.Dense: self.convert_dense, keras.layers.Conv2D: self.convert_conv2d, keras.layers.MaxPooling2D: self.convert_max_pooling2d, keras.layers.AveragePooling2D: self.convert_average_pooling2d, keras.layers.Dropout: self.convert_dropout, keras.layers.DepthwiseConv2D: self.convert_depthwise_conv2d, keras.layers.Softmax: self.convert_softmax, keras.layers.BatchNormalization: self.convert_batch_normalization, keras.layers.SeparableConv2D: self.convert_separable_conv2d, keras.layers.UpSampling2D: self.convert_upsampling2d, keras.layers.Activation: self.convert_activation, keras.layers.ReLU: self.convert_relu, keras.layers.Reshape: self.convert_reshape, keras.layers.Concatenate: self.convert_concatenate, keras.layers.GlobalAveragePooling2D: self.convert_global_average_pooling2d, keras.layers.Add: self.convert_add, QuantizeLayer: self.convert_quantize_layer, QuantizeWrapper: self.convert_quantize_wrapper, # keras.Sequential: self.convert_sequential, } self._option = option self._converter_info = dict() self._mace_net_def = mace_pb2.NetDef() ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO) ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC) with tfmot.quantization.keras.quantize_scope(): self._keras_model = keras.models.load_model(src_model_file, compile=False) self._keras_model.summary()
def __init__(self, option, src_model_file): # Keep in lexicographical order self._op_converters = { TFOpType.Abs.name: self.convert_elementwise, TFOpType.Add.name: self.convert_add, TFOpType.AddV2.name: self.convert_add, TFOpType.ArgMax.name: self.convert_argmax, TFOpType.AvgPool.name: self.convert_pooling, TFOpType.BatchMatMul.name: self.convert_matmul, TFOpType.BatchMatMulV2.name: self.convert_matmul, TFOpType.BatchToSpaceND.name: self.convert_space_batch, TFOpType.BiasAdd.name: self.convert_biasadd, TFOpType.Cast.name: self.convert_cast, TFOpType.ConcatV2.name: self.convert_concat, TFOpType.Const.name: self.convert_nop, TFOpType.Conv2D.name: self.convert_conv2d, TFOpType.Conv2DBackpropInput.name: self.convert_conv2d, TFOpType.Cumsum.name: self.convert_cumsum, TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d, TFOpType.DepthToSpace.name: self.convert_space_depth, TFOpType.Div.name: self.convert_elementwise, TFOpType.Elu.name: self.convert_activation, TFOpType.Equal.name: self.convert_elementwise, TFOpType.ExpandDims.name: self.convert_expand_dims, TFOpType.ExtractImagePatches.name: self.convert_extract_image_patches, TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize, TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize, TFOpType.Fill.name: self.convert_fill, TFOpType.FloorDiv.name: self.convert_elementwise, TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm, TFOpType.FusedBatchNormV2.name: self.convert_fused_batchnorm, TFOpType.FusedBatchNormV3.name: self.convert_fused_batchnorm, TFOpType.Gather.name: self.convert_gather, TFOpType.GatherV2.name: self.convert_gather, TFOpType.Identity.name: self.convert_identity, TFOpType.LeakyRelu.name: self.convert_activation, TFOpType.MatMul.name: self.convert_matmul, TFOpType.Max.name: self.convert_reduce, TFOpType.Maximum.name: self.convert_elementwise, TFOpType.MaxPool.name: self.convert_pooling, TFOpType.Mean.name: self.convert_reduce, TFOpType.Min.name: self.convert_reduce, TFOpType.Minimum.name: self.convert_elementwise, TFOpType.MirrorPad.name: self.convert_pad, TFOpType.Mul.name: self.convert_elementwise, TFOpType.Neg.name: self.convert_elementwise, TFOpType.NotEqual.name: self.convert_elementwise, TFOpType.OneHot.name: self.convert_one_hot, TFOpType.Pack.name: self.convert_stack, TFOpType.Pad.name: self.convert_pad, TFOpType.PadV2.name: self.convert_pad, TFOpType.Placeholder.name: self.convert_nop, TFOpType.Pow.name: self.convert_elementwise, TFOpType.Prod.name: self.convert_reduce, TFOpType.Sub.name: self.convert_elementwise, TFOpType.RealDiv.name: self.convert_elementwise, TFOpType.SquaredDifference.name: self.convert_elementwise, TFOpType.Square.name: self.convert_elementwise, TFOpType.Rsqrt.name: self.convert_elementwise, TFOpType.Relu.name: self.convert_activation, TFOpType.Relu6.name: self.convert_activation, TFOpType.Tanh.name: self.convert_activation, TFOpType.Reshape.name: self.convert_reshape, TFOpType.ResizeBicubic.name: self.convert_resize_bicubic, TFOpType.ResizeBilinear.name: self.convert_resize_bilinear, TFOpType.ResizeNearestNeighbor.name: self.convert_resize_nearest_neighbor, TFOpType.ReverseV2.name: self.convert_reverse, TFOpType.Select.name: self.convert_select, TFOpType.Shape.name: self.convert_shape, TFOpType.Sigmoid.name: self.convert_activation, TFOpType.Sign.name: self.convert_elementwise, TFOpType.Slice.name: self.convert_slice, TFOpType.Softmax.name: self.convert_softmax, TFOpType.SpaceToBatchND.name: self.convert_space_batch, TFOpType.SpaceToDepth.name: self.convert_space_depth, TFOpType.Split.name: self.convert_split, TFOpType.SplitV.name: self.convert_splitv, TFOpType.Sqrt.name: self.convert_elementwise, TFOpType.Squeeze.name: self.convert_squeeze, TFOpType.Stack.name: self.convert_stack, TFOpType.StridedSlice.name: self.convert_stridedslice, TFOpType.Sum.name: self.convert_reduce, TFOpType.Tile.name: self.convert_tile, TFOpType.Transpose.name: self.convert_transpose, TFOpType.Unpack.name: self.convert_unstack, TFOpType.Unstack.name: self.convert_unstack, } self._option = option self._mace_net_def = mace_pb2.NetDef() ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO) ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC) ConverterUtil.set_framework_type(self._mace_net_def, FrameworkType.TENSORFLOW.value) # import tensorflow graph tf_graph_def = tf.GraphDef() with tf.gfile.Open(src_model_file, 'rb') as f: tf_graph_def.ParseFromString(f.read()) self._placeholders = {} self._skip_tensor = set() self._output_shape = {} print("Run transform_graph: %s" % TFTransformGraphOptions) try: print("output keys: ", option.output_nodes.keys()) transformed_graph_def = TransformGraph(tf_graph_def, option.input_nodes.keys(), option.output_nodes.keys(), TFTransformGraphOptions) except Exception as ex: print("Failed to transform graph using tf tool: %s" % ex) transformed_graph_def = tf_graph_def # To check optimized model, uncomment following code. # tf.io.write_graph( # transformed_graph_def, # ".", # os.path.basename(src_model_file)[:-3] + "_opt.pb", # as_text=False # ) self.add_shape_info(transformed_graph_def) # reset default graph to clear earlier import tf.reset_default_graph() with tf.Session() as session: with session.graph.as_default() as graph: tf.import_graph_def(transformed_graph_def, name='') self._tf_graph = graph self.update_output_shapes(session) # we have polluted graph with 'shape' ops, so reset it and reload it # again tf.reset_default_graph() with tf.Session() as session: with session.graph.as_default() as graph: tf.import_graph_def(transformed_graph_def, name='') self._tf_graph = graph
def __init__(self, option, src_model_file, src_weight_file): self._op_converters = { 'Input': self.convert_nop, 'Convolution': self.convert_conv2d, 'Deconvolution': self.convert_deconv2d, 'Eltwise': self.convert_elementwise, 'Add': self.convert_add, 'ReLU': self.convert_activation, 'ReLU6': self.convert_activation, 'TanH': self.convert_activation, 'Sigmoid': self.convert_activation, 'PReLU': self.convert_activation, 'Clip': self.convert_activation, 'ELU': self.convert_activation, 'Pooling': self.convert_pooling, 'Concat': self.convert_concat, 'Slice': self.convert_slice, 'Softmax': self.convert_softmax, 'InnerProduct': self.convert_fully_connected, 'Interp': self.convert_interp, 'BatchNorm': self.convert_folded_batchnorm, 'GroupNorm': self.convert_group_norm, 'Crop': self.convert_crop, 'Scale': self.convert_scale, 'ShuffleChannel': self.convert_channel_shuffle, 'Permute': self.convert_permute, 'Flatten': self.convert_flatten, 'PriorBox': self.convert_prior_box, 'Reshape': self.convert_reshape, 'L2Normalization': self.convert_lpnorm, 'L1Normalization': self.convert_lpnorm, 'MVN': self.convert_MVN, 'Bias': self.convert_bias, 'ArgMax': self.convert_argmax, 'ResizeNearest': self.convert_resize_nearest, 'NonlocalReshape': self.convert_nonlocal_reshape, 'MatMul': self.convert_matmul, } self._option = option self._mace_net_def = mace_pb2.NetDef() ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW) ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW) ConverterUtil.set_framework_type(self._mace_net_def, FrameworkType.CAFFE.value) self._caffe_net = CaffeNet() self._caffe_layers = caffe_pb2.NetParameter() caffe_weights = caffe_pb2.NetParameter() # parse prototxt with open(src_model_file, 'r') as f: google.protobuf.text_format.Merge(str(f.read()), self._caffe_layers) self.filter_test_layers(self._caffe_layers) for layer in self._caffe_layers.layer: self._caffe_net.add_layer(layer) # parse model weight with open(src_weight_file, 'rb') as f: caffe_weights.ParseFromString(f.read()) self.filter_test_layers(caffe_weights) for weight in caffe_weights.layer: self._caffe_net.add_blob(weight) self._skip_ops = []
def convert_separable_conv2d(self, keras_op): dw_conv2d_op = self.convert_general_op(keras_op) dw_conv2d_op.type = MaceOp.DepthwiseConv2d.name dw_conv2d_op.input.append(get_input(keras_op).name) # Adds kernel tensor dw_conv2d_op.input.append(keras_op.depthwise_kernel.name) dw_kernel = self.add_keras_tensor(keras_op.depthwise_kernel) padding_arg = dw_conv2d_op.arg.add() padding_arg.name = MaceKeyword.mace_padding_str padding_arg.i = padding_mode[keras_op.padding].value strides_arg = dw_conv2d_op.arg.add() strides_arg.name = MaceKeyword.mace_strides_str strides_arg.ints.extend(keras_op.strides) dilation_arg = dw_conv2d_op.arg.add() dilation_arg.name = MaceKeyword.mace_dilations_str dilations = keras_op.dilation_rate dilation_arg.ints.extend(keras_op.dilation_rate) dw_conv2d_output_name = keras_op.name + "_dw" dw_conv2d_op.output.append(dw_conv2d_output_name) input_shape = keras_shape2list(get_input(keras_op).shape) height = conv_output_length(input_shape[1], dw_kernel.dims[0], keras_op.padding, keras_op.strides[0], dilations[0]) width = conv_output_length(input_shape[2], dw_kernel.dims[1], keras_op.padding, keras_op.strides[1], dilations[1]) output_shape = dw_conv2d_op.output_shape.add() output_shape.dims.extend([ input_shape[0], height, width, dw_kernel.dims[2] * dw_kernel.dims[3] ]) pw_conv2d_name = keras_op.name + "_pw" pw_conv2d_op = self._mace_net_def.op.add() pw_conv2d_op.name = pw_conv2d_name pw_conv2d_op.type = MaceOp.Conv2D.name pw_conv2d_op.input.append(dw_conv2d_output_name) # Adds kernel tensor pw_conv2d_op.input.append(keras_op.pointwise_kernel.name) self.add_keras_tensor(keras_op.pointwise_kernel) strides_arg = pw_conv2d_op.arg.add() strides_arg.name = MaceKeyword.mace_strides_str strides_arg.ints.extend([1, 1]) data_type_arg = pw_conv2d_op.arg.add() data_type_arg.name = "T" data_type_arg.i = dtype2mtype(keras_op.dtype) framework_type_arg = pw_conv2d_op.arg.add() framework_type_arg.name = MaceKeyword.mace_framework_type_str framework_type_arg.i = FrameworkType.KERAS.value ConverterUtil.add_data_format_arg(pw_conv2d_op, DataFormat.NHWC) # Adds bias tensor if keras_op.use_bias: pw_conv2d_op.input.append(keras_op.bias.name) self.add_keras_tensor(keras_op.bias) self.split_activation_op(keras_op, pw_conv2d_op)
def convert_subtensor(self, mge_op): op1 = self.convert_general_op(mge_op) op1.type = MaceOp.StridedSlice.name axis = mge_op.inputs[1].inferred_value t_shape = list(mge_op.inputs[0].imm_shape) begin_tensor_name = mge_op.name + "_begin" end_tensor_name = mge_op.name + "_end" stride_tensor_name = mge_op.name + "_stride" begin_tensor_shape = (len(t_shape), ) end_tensor_shape = (len(t_shape), ) stride_tensor_shape = (len(t_shape), ) begin_vals = [0] * len(t_shape) end_vals = [shapei for shapei in t_shape] stride_vals = [1] * len(t_shape) def check_val(sym_var): try: val = sym_var.inferred_value[0] except TypeError: mace_check(False, "you should feed const values for subtensor axis") return val squeeze_dims = [] idx = len(mge_op.inputs) - 1 while idx: val = check_val(mge_op.inputs[idx]) for ai in mge_op.params[::-1]: ai_idx = ai["axis"] if ai["step"] > 0: stride_vals[ai_idx] = val idx -= 1 if idx == 0: break val = check_val(mge_op.inputs[idx]) if ai["end"] > 0: if val < 0: val = t_shape[ai_idx] + val end_vals[ai_idx] = val idx -= 1 if idx == 0: break val = check_val(mge_op.inputs[idx]) if ai["begin"] > 0: if val < 0: val = t_shape[ai_idx] + val begin_vals[ai_idx] = val idx -= 1 if idx == 0: break val = check_val(mge_op.inputs[idx]) if ai["idx"] > 0: if val < 0: val = t_shape[ai_idx] + val squeeze_dims.append(ai_idx) begin_vals[ai_idx] = val end_vals[ai_idx] = val + 1 idx -= 1 if idx == 0: break val = check_val(mge_op.inputs[idx]) for ai_idx in range(len(t_shape)): t_shape[ai_idx] = math.ceil( (end_vals[ai_idx] - begin_vals[ai_idx]) / stride_vals[ai_idx]) self.add_tensor( begin_tensor_name, begin_tensor_shape, mace_pb2.DT_INT32, begin_vals, ) self.add_tensor(end_tensor_name, end_tensor_shape, mace_pb2.DT_INT32, end_vals) self.add_tensor( stride_tensor_name, stride_tensor_shape, mace_pb2.DT_INT32, stride_vals, ) del op1.input[1:] op1.input.extend( [begin_tensor_name, end_tensor_name, stride_tensor_name]) if len(squeeze_dims) > 0: # create squeeze op to remove shape=1 dims mid_output_name = mge_op.name + "_mid_reshape" del op1.output[0] op1.output.extend([mid_output_name]) output_shape = op1.output_shape[0] del output_shape.dims[:] output_shape.dims.extend(t_shape) op2 = self._mace_net_def.op.add() op2.type = MaceOp.Squeeze.name op2.name = mge_op.name + "_squeeze" data_type_arg = op2.arg.add() data_type_arg.name = "T" data_type_arg.i = self._option.data_type framework_type_arg = op2.arg.add() framework_type_arg.name = MaceKeyword.mace_framework_type_str framework_type_arg.i = FrameworkType.MEGENGINE.value ConverterUtil.add_data_format_arg(op2, DataFormat.NCHW) op2.input.extend([mid_output_name]) op2.output.extend([mge_op.outputs[0].name]) output_shape = op2.output_shape.add() output_shape.dims.extend(mge_op.outputs[0].imm_shape) axis_arg = op2.arg.add() axis_arg.name = MaceKeyword.mace_axis_str axis_arg.ints.extend(squeeze_dims)