コード例 #1
0
def convert_op_outputs(mace_op_def, tf_op):
    mace_op_def.out_max_byte_size.extend(
        [max_elem_size(output) for output in tf_op.outputs])
    mace_op_def.output_type.extend(
        [tf_dtype_2_mace_dtype(output.dtype) for output in tf_op.outputs])
    output_shapes = []
    for output in tf_op.outputs:
        output_shape = mace_pb2.OutputShape()
        shape_list = output.shape.as_list()
        if not shape_list:
            shape_list = [1]
        elif len(shape_list) == 2:
            shape_list = [1, 1, shape_list[0], shape_list[1]]
        output_shape.dims.extend(shape_list)
        output_shapes.append(output_shape)
    mace_op_def.output_shape.extend(output_shapes)
コード例 #2
0
def convert_ops(unresolved_ops, resolved_ops, net_def, output_node, dsp_ops):
    first_op = unresolved_ops[0]
    print('Op: ', first_op.name, first_op.type, first_op.outputs[0].shape)

    if first_op.name in resolved_ops:
        pass

    elif first_op.type == 'Const':
        print('Add const node: ', first_op.name)
        tf_tensor = first_op.outputs[0].eval()
        tensor = net_def.tensors.add()
        tensor.name = first_op.outputs[0].name
        tensor.data_type = find_dtype(first_op.outputs[0].dtype)
        shape = list(tf_tensor.shape)
        if len(shape) > 0:
            tensor.dims.extend(shape)
        if first_op.outputs[0].dtype == tf.float32:
            tensor.float_data.extend(tf_tensor.astype(float).flat)
        elif first_op.outputs[0].dtype == tf.int32 or \
                first_op.outputs[0].dtype == tf.int8 or \
                first_op.outputs[0].dtype == tf.int16 or \
                first_op.outputs[0].dtype == tf.quint8 or \
                first_op.outputs[0].dtype == tf.quint16:
            tensor.int32_data.extend(tf_tensor.astype(int).flat)

    else:
        op_def = net_def.op.add()
        op_def.name = first_op.name
        op_def.type = dsp_ops.map_nn_op(first_op.type)
        op_def.padding = padding_mode['NA']

        if len(first_op.outputs) > 0 and first_op.type == 'Dequantize' \
            and len(first_op.outputs[0].consumers()) > 0 \
            and (first_op.outputs[0].consumers()[0].type == 'SpaceToBatchND' or
                 first_op.outputs[0].consumers()[0].type == 'BatchToSpaceND'):
            input_tensor = first_op.inputs[0]
            min_tensor = first_op.inputs[1]
            max_tensor = first_op.inputs[2]
            s2b_op = first_op.outputs[0].consumers()[0]
            reshape_op = s2b_op.outputs[0].consumers()[0]
            min_op = reshape_op.outputs[0].consumers()[0]
            max_op = reshape_op.outputs[0].consumers()[1]
            quantize_op = min_op.outputs[0].consumers()[0]
            resolved_ops.add(s2b_op.name)
            resolved_ops.add(reshape_op.name)
            resolved_ops.add(min_op.name)
            resolved_ops.add(max_op.name)
            resolved_ops.add(quantize_op.name)

            op_def.name = quantize_op.name
            op_def.type = dsp_ops.map_nn_op('Quantized' + s2b_op.type)
            op_def.input.append(input_tensor.name)
            op_def.input.extend([t.name for t in s2b_op.inputs[1:]])
            op_def.input.extend([min_tensor.name, max_tensor.name])
            convert_op_outputs(op_def, quantize_op)
        elif len(first_op.outputs) > 0 and \
            first_op.type == 'QuantizedReshape' and \
            len(first_op.outputs[0].consumers()) > 0 and \
            first_op.outputs[0].consumers()[0].type == 'Dequantize' and \
            len(first_op.outputs[0].consumers()[0].outputs[0].consumers()) \
            > 0 and \
            first_op.outputs[0].consumers()[0].outputs[0].consumers()[0].type \
                == 'Softmax':
            input_tensor = first_op.inputs[0]
            min_tensor = first_op.inputs[2]
            max_tensor = first_op.inputs[3]
            dequantize_op = first_op.outputs[0].consumers()[0]
            softmax_op = dequantize_op.outputs[0].consumers()[0]
            reshape_op = softmax_op.outputs[0].consumers()[0]
            min_op = reshape_op.outputs[0].consumers()[0]
            max_op = reshape_op.outputs[0].consumers()[1]
            quantize_op = min_op.outputs[0].consumers()[0]
            quantize_reshape_op = quantize_op.outputs[0].consumers()[0]

            resolved_ops.add(dequantize_op.name)
            resolved_ops.add(softmax_op.name)
            resolved_ops.add(reshape_op.name)
            resolved_ops.add(min_op.name)
            resolved_ops.add(max_op.name)
            resolved_ops.add(quantize_op.name)
            resolved_ops.add(quantize_reshape_op.name)

            op_def.name = quantize_reshape_op.name
            op_def.type = dsp_ops.map_nn_op('QuantizedSoftmax')
            op_def.input.extend(
                [input_tensor.name, min_tensor.name, max_tensor.name])
            convert_op_outputs(op_def, quantize_reshape_op)
        # remove Squeeze
        elif len(first_op.outputs) > 0 and \
            first_op.type == 'Requantize' and \
            len(first_op.outputs[0].consumers()) > 0 and \
            first_op.outputs[0].consumers()[0].type == 'Dequantize' and \
            len(first_op.outputs[0].consumers()[0].outputs[0].consumers()) \
            > 0 and \
            first_op.outputs[0].consumers()[0].outputs[0].consumers()[0].type \
                == 'Squeeze':
            dequantize_op = first_op.outputs[0].consumers()[0]
            squeeze_op = dequantize_op.outputs[0].consumers()[0]
            reshape_op = squeeze_op.outputs[0].consumers()[0]
            min_op = reshape_op.outputs[0].consumers()[0]
            max_op = reshape_op.outputs[0].consumers()[1]
            quantize_op = min_op.outputs[0].consumers()[0]

            resolved_ops.add(dequantize_op.name)
            resolved_ops.add(squeeze_op.name)
            resolved_ops.add(reshape_op.name)
            resolved_ops.add(min_op.name)
            resolved_ops.add(max_op.name)
            resolved_ops.add(quantize_op.name)

            op_def.name = quantize_op.name
            op_def.input.extend([t.name for t in first_op.inputs])
            convert_op_outputs(op_def, quantize_op)

            # Squeeze -> Softmax
            next_op = quantize_op.outputs[0].consumers()[0] \
                if len(quantize_op.outputs) > 0 else None
            dequantize_op = next_op.outputs[0].consumers()[0] \
                if next_op and len(next_op.outputs) > 0 and \
                next_op.type == 'QuantizedReshape' and \
                len(next_op.outputs[0].consumers()) > 0 else None
            softmax_op = dequantize_op.outputs[0].consumers()[0]\
                if dequantize_op and len(dequantize_op.outputs) > 0 and \
                dequantize_op.type == 'Dequantize' and \
                len(dequantize_op.outputs[0].consumers()) > 0 else None
            if softmax_op and softmax_op.type == 'Softmax':
                reshape_op = softmax_op.outputs[0].consumers()[0]
                min_op = reshape_op.outputs[0].consumers()[0]
                max_op = reshape_op.outputs[0].consumers()[1]
                quantize_op = min_op.outputs[0].consumers()[0]
                quantize_reshape_op = quantize_op.outputs[0].consumers()[0]

                resolved_ops.add(next_op.name)
                resolved_ops.add(dequantize_op.name)
                resolved_ops.add(softmax_op.name)
                resolved_ops.add(reshape_op.name)
                resolved_ops.add(min_op.name)
                resolved_ops.add(max_op.name)
                resolved_ops.add(quantize_op.name)
                resolved_ops.add(quantize_reshape_op.name)

                softmax_op_def = net_def.op.add()
                softmax_op_def.padding = padding_mode['NA']
                softmax_op_def.name = quantize_reshape_op.name
                softmax_op_def.type = dsp_ops.map_nn_op('QuantizedSoftmax')
                softmax_op_def.input.extend([
                    get_tensor_name_from_op(op_def.name, 0),
                    get_tensor_name_from_op(op_def.name, 1),
                    get_tensor_name_from_op(op_def.name, 2)
                ])
                convert_op_outputs(softmax_op_def, quantize_reshape_op)

        elif len(first_op.outputs) > 0 and first_op.type == 'Dequantize' and \
            len(first_op.outputs[0].consumers()) > 0 and \
                first_op.outputs[0].consumers()[0].type == 'Tanh':
            input_tensor = first_op.inputs[0]
            min_tensor = first_op.inputs[1]
            max_tensor = first_op.inputs[2]
            tanh_op = first_op.outputs[0].consumers()[0]

            # if not last op
            resolved_ops.add(tanh_op.name)
            if tanh_op.outputs[0].consumers():
                reshape_op = tanh_op.outputs[0].consumers()[0]
                min_op = reshape_op.outputs[0].consumers()[0]
                max_op = reshape_op.outputs[0].consumers()[1]
                quantize_op = min_op.outputs[0].consumers()[0]
                resolved_ops.add(reshape_op.name)
                resolved_ops.add(min_op.name)
                resolved_ops.add(max_op.name)
                resolved_ops.add(quantize_op.name)

                op_def.name = quantize_op.name
                op_def.type = dsp_ops.map_nn_op('Quantized' + tanh_op.type)
                op_def.input.extend(
                    [input_tensor.name, min_tensor.name, max_tensor.name])
                convert_op_outputs(op_def, quantize_op)
            # tanh is last op
            else:
                op_def.name = tanh_op.name + '/QuantizedTanh'
                op_def.type = dsp_ops.map_nn_op('Quantized' + tanh_op.type)
                op_def.input.extend(
                    [input_tensor.name, min_tensor.name, max_tensor.name])
                op_def.out_max_byte_size.extend([
                    max_elem_size(input_tensor),
                    max_elem_size(min_tensor),
                    max_elem_size(max_tensor)
                ])
                op_def.output_type.extend(
                    [mace_pb2.DT_UINT8, mace_pb2.DT_FLOAT, mace_pb2.DT_FLOAT])
                output_shapes = []
                for output in first_op.inputs:
                    output_shape = mace_pb2.OutputShape()
                    output_shape.dims.extend(output.shape.as_list())
                    output_shapes.append(output_shape)
                op_def.output_shape.extend(output_shapes)

                new_tanh_op_def = net_def.op.add()
                new_tanh_op_def.name = tanh_op.name
                new_tanh_op_def.type = dsp_ops.map_nn_op('Dequantize')
                new_tanh_op_def.input.extend([
                    get_tensor_name_from_op(op_def.name, 0),
                    get_tensor_name_from_op(op_def.name, 1),
                    get_tensor_name_from_op(op_def.name, 2)
                ])
                convert_op_outputs(new_tanh_op_def, tanh_op)
        elif has_padding_and_strides(first_op):
            op_def.padding = padding_mode[first_op.get_attr('padding')]
            op_def.input.extend([t.name for t in first_op.inputs])
            if 'ksize' in first_op.node_def.attr:
                ksize = first_op.get_attr('ksize')
                ksize_tensor = add_shape_const_node(net_def, first_op, ksize,
                                                    'ksize')
                op_def.input.extend([ksize_tensor])
            strides = first_op.get_attr('strides')
            strides_tensor = add_shape_const_node(net_def, first_op, strides,
                                                  'strides')
            op_def.input.extend([strides_tensor])
            convert_op_outputs(op_def, first_op)
        elif is_node_flatten_reshape(first_op):
            op_def.type = 'Flatten'
            op_def.input.extend([first_op.inputs[0].name])
            convert_op_outputs(op_def, first_op)
        elif dsp_ops.has_op(first_op.type):
            op_def.input.extend([t.name for t in first_op.inputs])
            convert_op_outputs(op_def, first_op)
        else:
            raise Exception('Unsupported op: ', first_op)

        resolved_ops.add(first_op.name)

    del unresolved_ops[0]