Beispiel #1
0
def handle_conv2d_parsing(op: tflite.Operator, builtin_code: tflite.BuiltinOperator, graph: tflite.SubGraph, model: tflite.Model) -> Tuple[str, Layer]:
    op_opt = op.BuiltinOptions()

    opt = tflite.Conv2DOptions()
    opt.Init(op_opt.Bytes, op_opt.Pos)


    stride = opt.StrideW()

    input_ids = op.InputsAsNumpy()
    output_ids = op.OutputsAsNumpy()
    assert len(input_ids) == 3
    input_tensor_id, filter_tensor_id, bias_tensor_id = list(input_ids)

    input_tensor = graph.Tensors(input_tensor_id)
    input_shape = input_tensor.ShapeAsNumpy()
    assert len(input_shape) == 4
    _, input_width, input_height, input_depth = list(input_shape)

    filter_tensor = graph.Tensors(filter_tensor_id)
    filter_shape = filter_tensor.ShapeAsNumpy()
    assert len(filter_shape) == 4
    if builtin_code == tflite.BuiltinOperator.CONV_2D:
        filter_count, filter_width, filter_height, filter_in_channels = list(filter_shape)
    else:
        filter_in_channels, filter_width, filter_height, filter_count = list(filter_shape)
    filter_quantization = filter_tensor.Quantization()
    filter_scales = filter_quantization.ScaleAsNumpy()

    filter_data = clean_data_int_op(model.Buffers(filter_tensor.Buffer()).DataAsNumpy(), is_32=False).reshape(filter_shape)

    padding = opt.Padding()
    padding_size = 0
    if padding == tflite.Padding.SAME:
        padding_size = int(filter_width) // 2
    else:
        # PADDING should be valid in this case
        raise Exception(f"Unexpected padding type: {padding}")

    bias_tensor = graph.Tensors(bias_tensor_id)
    bias_data = clean_data_int_op(model.Buffers(bias_tensor.Buffer()).DataAsNumpy(), is_32=True)

    assert len(output_ids) == 1
    output_id = output_ids[0]
    output_tensor = graph.Tensors(output_id)
    output_shape = output_tensor.ShapeAsNumpy()
    output_batch_size, output_width, output_height, output_channels = list(output_shape)
    assert output_channels == filter_count

    output_quantization = output_tensor.Quantization().ScaleAsNumpy()
    assert len(output_quantization) == 1
    output_scale = output_quantization[0]

    activation_type = opt.FusedActivationFunction()
    followed_by_relu = activation_type == tflite.ActivationFunctionType.RELU

    output_name = output_tensor.Name()
    input_name = input_tensor.Name()

    if builtin_code == tflite.BuiltinOperator.CONV_2D:
        return output_name, Conv2d(
            input=None,
            filter_width=filter_width, filter_height=filter_height, filter_in_channels=filter_in_channels, filter_count=filter_count, filter_data=filter_data, bias_data=bias_data,
            input_width=input_width, input_height=input_height, stride=stride, padding=padding_size,
            followed_by_relu=followed_by_relu, output_scale=output_scale, filter_scales=filter_scales,
            output_width=output_width, output_height=output_height
        ), input_name
    else:
        return output_name, Conv2d_DW(
            input=None,
            filter_width=filter_width, filter_height=filter_height, filter_in_channels=filter_in_channels, filter_count=filter_count, filter_data=filter_data, bias_data=bias_data,
            input_width=input_width, input_height=input_height, stride=stride, padding=padding_size,
            followed_by_relu=followed_by_relu, output_scale=output_scale, filter_scales=filter_scales,
            output_width=output_width, output_height=output_height
        ), input_name
Beispiel #2
0
def handle_fc_parsing(op: tflite.Operator, builtin_code: tflite.BuiltinOperator, graph: tflite.SubGraph, model: tflite.Model) -> Tuple[str, Layer]:
    op_opt = op.BuiltinOptions()
    opt = tflite.FullyConnectedOptions()
    opt.Init(op_opt.Bytes, op_opt.Pos)

    input_ids = op.InputsAsNumpy()
    output_ids = op.OutputsAsNumpy()
    assert len(input_ids) == 3
    input_tensor_id, weight_tensor_id, bias_tensor_id = list(input_ids)
    input_tensor = graph.Tensors(input_tensor_id)
    input_shape = input_tensor.ShapeAsNumpy()
    assert len(input_shape) == 2
    input_batch_size, input_size = list(input_shape)

    weight_tensor = graph.Tensors(weight_tensor_id)
    weight_shape = weight_tensor.ShapeAsNumpy()
    assert len(weight_shape) == 2
    weight_width, weight_height = list(weight_shape)
    weight_quantization = weight_tensor.Quantization()
    weight_quantization_scales = weight_quantization.ScaleAsNumpy()
    assert len(weight_quantization_scales) == 1
    weight_scale = weight_quantization_scales[0]
    weight_zero_points = weight_quantization.ZeroPointAsNumpy()
    assert len(weight_zero_points) == 1
    weight_zero_point = weight_zero_points[0]

    weight_data = clean_data_int_op(model.Buffers(weight_tensor.Buffer()).DataAsNumpy(), is_32=False).reshape(weight_shape)

    bias_tensor = graph.Tensors(bias_tensor_id)
    bias_data = clean_data_int_op(model.Buffers(bias_tensor.Buffer()).DataAsNumpy(), is_32=True)

    assert len(output_ids) == 1
    output_id = output_ids[0]
    output_tensor = graph.Tensors(output_id)
    output_shape = output_tensor.ShapeAsNumpy()
    assert len(output_shape) == 2
    output_batch_size, output_size = list(output_shape)
    output_quantization = output_tensor.Quantization()
 
    output_quantization_scales = output_quantization.ScaleAsNumpy()
    assert len(output_quantization_scales) == 1
    output_scale = output_quantization_scales[0]
   
    output_zero_points = output_quantization.ZeroPointAsNumpy()
    assert len(output_zero_points) == 1
    output_zero_point = output_zero_points[0]


    activation_type = opt.FusedActivationFunction()
    followed_by_relu = activation_type == tflite.ActivationFunctionType.RELU
    
    input_name = input_tensor.Name()
    output_name = output_tensor.Name()
    layer = FC(
        input=None, output_size=output_size, input_size=input_size,
        weight_data=weight_data, weight_scale=weight_scale, bias_data=bias_data,
        output_scale=output_scale, followed_by_relu=followed_by_relu,
        weight_zero_point=weight_zero_point,
        output_zero_point=output_zero_point
    )
    return output_name, layer, input_name