Esempio n. 1
0
def convert_tf_crop_and_resize(scope, operator, container):
    if operator.target_opset < 11:
        raise ValueError("CropAndResize op is not supported for opset < 11")
    oopb = OnnxOperatorBuilder(container, scope)
    node = operator.raw_operator
    mode_value = node.get_attr('method')
    transpose_node = oopb.apply_transpose(operator.inputs[0].full_name,
                                          name=operator.full_name + '_transpose_1',
                                          perm=[0, 3, 1, 2])
    cropandresize = oopb.add_node('CropAndResize',
                                  transpose_node + operator.input_full_names[1:],
                                  operator.full_name + '_crop_and_resize',
                                  op_domain='com.microsoft',
                                  op_version=1,
                                  mode=mode_value)
    oopb.apply_op_with_output("apply_transpose",
                              cropandresize,
                              operator.output_full_names,
                              name=operator.full_name + '_transpose_final',
                              perm=[0, 2, 3, 1])
Esempio n. 2
0
def convert_keras_pooling_core(scope, operator, container, n_dims, op_type,
                               input_perm_axes, output_perm_axes):
    op = operator.raw_operator
    no_permutation_required = op.data_format == 'channels_first' if hasattr(
        op, 'data_format') else False

    if no_permutation_required:
        adjusted_pooling_input = operator.inputs[0].full_name
    else:
        adjusted_pooling_input = scope.get_unique_variable_name(
            'input_transposed')
        preprocessor_type = 'Transpose'
        preprocessor_attrs = {
            'name': scope.get_unique_operator_name(preprocessor_type),
            'perm': input_perm_axes
        }
        container.add_node(preprocessor_type, operator.inputs[0].full_name,
                           adjusted_pooling_input, **preprocessor_attrs)

    is_global = type(op).__name__.startswith('Global')
    op_type_prefix = 'Global' if is_global else ''
    onnx_op_type = "AveragePool" if op_type == 'Avg' else 'MaxPool'
    attrs = {}
    if container.target_opset < 10:
        op_version = 7
    elif container.target_opset < 11:
        op_version = 10
    else:
        op_version = 11
    if not is_global:
        attrs['strides'] = list(op.strides)
        attrs['kernel_shape'] = tuple(
            [int(pool_size) for pool_size in op.pool_size])
        attrs['op_version'] = op_version
        # In ONNX opset 10, the ceil_mode attribute was added to local MaxPool and AveragePool
        if container.target_opset >= 10:
            attrs['ceil_mode'] = 0
        if op.padding == 'valid':
            attrs['auto_pad'] = 'VALID'
        elif op.padding == 'same':
            attrs['auto_pad'] = 'SAME_UPPER'
        else:
            raise RuntimeError("Unsupported padding type '{0}'".format(
                op.padding))

    from keras2onnx.common.onnx_ops import OnnxOperatorBuilder
    oopb = OnnxOperatorBuilder(container, scope)
    if no_permutation_required:
        # In this case, the output of our Pool operator just match what Keras produces.
        pool_result = oopb.add_node(op_type_prefix + onnx_op_type,
                                    adjusted_pooling_input,
                                    operator.inputs[0].full_name + '_pooling',
                                    **attrs)
    else:
        # Put the output of Pool operator to an intermediate tensor. Laster we will apply a Transpose to match the
        # original Keras output format
        pool_result_1 = oopb.add_node(
            op_type_prefix + onnx_op_type, adjusted_pooling_input,
            operator.inputs[0].full_name + '_pooling', **attrs)

        # Generate a final Transpose
        pool_result = oopb.add_node('Transpose',
                                    pool_result_1,
                                    operator.inputs[0].full_name +
                                    '_transpose',
                                    perm=output_perm_axes)

    if is_global:
        import numpy as np
        squeeze_result = oopb.add_node(
            'Reshape', [
                pool_result,
                ('_start', oopb.int64, np.array([0, -1], dtype='int64'))
            ], operator.inputs[0].full_name + '_reshape')
    else:
        squeeze_result = pool_result

    oopb.apply_op_with_output("apply_identity",
                              squeeze_result,
                              operator.output_full_names,
                              name=operator.full_name + '_identity')
Esempio n. 3
0
def convert_DetectionLayer(scope, operator, container):
    # type: (keras2onnx.common.InterimContext, keras2onnx.common.Operator, keras2onnx.common.OnnxObjectContainer) -> None
    DETECTION_MAX_INSTANCES = 100
    DETECTION_NMS_THRESHOLD = 0.3
    DETECTION_MIN_CONFIDENCE = 0.7

    oopb = OnnxOperatorBuilder(container, scope)
    box_transpose = scope.get_unique_variable_name(
        operator.inputs[0].full_name + '_tx')
    score_transpose = scope.get_unique_variable_name(
        operator.inputs[1].full_name + '_tx')

    # apply_transpose(scope, operator.inputs[0].full_name, box_transpose, container, perm=[2, 0, 1])
    apply_identity(scope, operator.inputs[0].full_name, box_transpose,
                   container)
    # output shape: [num_batches, spatial_dimension, 4]
    score_identity = scope.get_unique_variable_name(
        operator.inputs[1].full_name + '_id')
    apply_identity(scope, operator.inputs[1].full_name, score_identity,
                   container)
    # output shape: [num_batches, spatial_dimension, num_classes]

    deltas_transpose = scope.get_unique_variable_name(
        operator.inputs[2].full_name + '_tx')
    apply_identity(scope, operator.inputs[2].full_name, deltas_transpose,
                   container)
    image_meta = scope.get_unique_variable_name(operator.inputs[3].full_name +
                                                '_tx')
    apply_identity(scope, operator.inputs[3].full_name, image_meta, container)
    windows_transpose = norm_boxes_graph(scope, operator, container, oopb,
                                         image_meta)
    delta_mul_output = convert_apply_box_deltas_graph(
        scope, operator, container, oopb, box_transpose, score_identity,
        deltas_transpose, windows_transpose)

    sliced_score = oopb.add_node('Slice', [
        score_identity, ('_start', oopb.int64, np.array([1], dtype='int64')),
        ('_end', oopb.int64, np.array([81], dtype='int64')),
        ('_axes', oopb.int64, np.array([2], dtype='int64'))
    ], operator.inputs[1].full_name + '_sliced')
    apply_transpose(scope,
                    sliced_score,
                    score_transpose,
                    container,
                    perm=[0, 2, 1])
    # output shape: [num_batches, num_classes, spatial_dimension]

    max_output_size = scope.get_unique_variable_name('max_output_size')
    iou_threshold = scope.get_unique_variable_name('iou_threshold')
    score_threshold = scope.get_unique_variable_name('layer.score_threshold')

    container.add_initializer(max_output_size, onnx_proto.TensorProto.INT64,
                              [], [DETECTION_MAX_INSTANCES])
    container.add_initializer(iou_threshold, onnx_proto.TensorProto.FLOAT, [],
                              [DETECTION_NMS_THRESHOLD])
    container.add_initializer(score_threshold, onnx_proto.TensorProto.FLOAT,
                              [], [DETECTION_MIN_CONFIDENCE])

    nms_node = next(
        (nd_
         for nd_ in operator.nodelist if nd_.type == 'NonMaxSuppressionV3'),
        operator.nodelist[0])
    nms_output = scope.get_unique_variable_name(operator.output_full_names[0] +
                                                '_nms')
    container.add_node("NonMaxSuppression", [
        delta_mul_output, score_transpose, max_output_size, iou_threshold,
        score_threshold
    ],
                       nms_output,
                       op_version=operator.target_opset,
                       name=nms_node.name)

    add_init = scope.get_unique_variable_name('add')
    container.add_initializer(add_init, onnx_proto.TensorProto.INT64, [1, 3],
                              [0, 1, 0])
    nms_output_add = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_class_add')
    container.add_node("Add", [nms_output, add_init],
                       nms_output_add,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_class_idx_add')

    starts_init = scope.get_unique_variable_name('starts')
    ends_init = scope.get_unique_variable_name('ends')
    axes_init = scope.get_unique_variable_name('axes')

    container.add_initializer(starts_init, onnx_proto.TensorProto.INT32, [1],
                              [1])
    container.add_initializer(ends_init, onnx_proto.TensorProto.INT32, [1],
                              [2])
    container.add_initializer(axes_init, onnx_proto.TensorProto.INT32, [1],
                              [1])

    class_idx_output = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_class_idx')
    container.add_node("Slice",
                       [nms_output_add, starts_init, ends_init, axes_init],
                       class_idx_output,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_class_idx')
    # output shape: [num_selected_indices, 1]

    starts_init_2 = scope.get_unique_variable_name('starts')
    ends_init_2 = scope.get_unique_variable_name('ends')
    axes_init_2 = scope.get_unique_variable_name('axes')

    container.add_initializer(starts_init_2, onnx_proto.TensorProto.INT32, [1],
                              [2])
    container.add_initializer(ends_init_2, onnx_proto.TensorProto.INT32, [1],
                              [3])
    container.add_initializer(axes_init_2, onnx_proto.TensorProto.INT32, [1],
                              [1])

    box_idx_output = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_box_idx')
    container.add_node(
        "Slice", [nms_output_add, starts_init_2, ends_init_2, axes_init_2],
        box_idx_output,
        op_version=operator.target_opset,
        name=nms_node.name + '_box_idx')
    # output shape: [num_selected_indices, 1]

    box_idx_squeeze = oopb.apply_squeeze(box_idx_output,
                                         name=nms_node.name +
                                         '_box_idx_squeeze',
                                         axes=[1])[0]
    # output shape: [num_selected_indices]

    starts_init_3 = scope.get_unique_variable_name('starts')
    ends_init_3 = scope.get_unique_variable_name('ends')
    axes_init_3 = scope.get_unique_variable_name('axes')
    step_init_3 = scope.get_unique_variable_name('steps')

    container.add_initializer(starts_init_3, onnx_proto.TensorProto.INT32, [1],
                              [2])
    container.add_initializer(ends_init_3, onnx_proto.TensorProto.INT32, [1],
                              [0])
    container.add_initializer(axes_init_3, onnx_proto.TensorProto.INT32, [1],
                              [1])
    container.add_initializer(step_init_3, onnx_proto.TensorProto.INT32, [1],
                              [-1])
    from keras2onnx.common.data_types import Int32TensorType, FloatTensorType
    class_box_idx_output = scope.get_local_variable_or_declare_one(
        operator.output_full_names[0] + '_class_box_idx',
        type=Int32TensorType(shape=[None, 2]))
    container.add_node(
        "Slice",
        [nms_output_add, starts_init_3, ends_init_3, axes_init_3, step_init_3],
        class_box_idx_output.full_name,
        op_version=operator.target_opset,
        name=nms_node.name + '_class_box_idx')
    # output shape: [num_selected_indices, 2]

    box_squeeze = oopb.apply_squeeze(delta_mul_output,
                                     name=nms_node.name + '_box_squeeze',
                                     axes=[0])[0]
    # output shape: [spatial_dimension, 4]

    score_squeeze = oopb.apply_squeeze(score_identity,
                                       name=nms_node.name + '_score_squeeze',
                                       axes=[0])[0]
    # output shape: [spatial_dimension, num_classes]

    box_gather = scope.get_unique_variable_name(operator.output_full_names[0] +
                                                '_box_gather')
    attrs = {'axis': 0}
    container.add_node("Gather", [box_squeeze, box_idx_squeeze],
                       box_gather,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_box_gather',
                       **attrs)
    # output shape: [num_selected_indices, 4]

    score_gather = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_score_gather')
    container.add_node("GatherND",
                       [score_squeeze, class_box_idx_output.full_name],
                       score_gather,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_score_gather')
    # output shape: [num_selected_indices]

    score_gather_unsqueeze = oopb.apply_unsqueeze(score_gather,
                                                  name=nms_node.name +
                                                  '_score_gather_unsqueeze',
                                                  axes=[1])[0]
    # output shape: [num_selected_indices, 1]

    top_k_var = scope.get_unique_variable_name('topK')
    container.add_initializer(top_k_var, onnx_proto.TensorProto.FLOAT, [1],
                              [100.0])

    score_gather_shape = oopb.add_node(
        'Shape', [score_gather],
        operator.inputs[1].full_name + '_score_gather_shape')
    attrs = {'to': 1}
    scope_gather_float = oopb.add_node(
        'Cast', [score_gather_shape],
        operator.inputs[1].full_name + '_scope_gather_float', **attrs)
    top_k_min = oopb.add_node('Min', [scope_gather_float, top_k_var],
                              operator.inputs[1].full_name + '_top_k_min')
    attrs = {'to': 7}
    top_k_min_int = oopb.add_node(
        'Cast', [top_k_min], operator.inputs[1].full_name + '_top_k_min_int',
        **attrs)

    score_top_k_output_val = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_score_top_k_output_val')
    # output shape: [num_top_K]
    score_top_k_output_idx = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_score_top_k_output_idx')
    # output shape: [num_top_K]
    attrs = {'axis': 0}
    container.add_node('TopK', [score_gather, top_k_min_int],
                       [score_top_k_output_val, score_top_k_output_idx],
                       op_version=operator.target_opset,
                       name=nms_node.name + '_topK',
                       **attrs)

    class_idx_cast = scope.get_unique_variable_name(
        operator.output_full_names[0] + '_class_idx_cast')
    attrs = {'to': 1}
    container.add_node('Cast',
                       class_idx_output,
                       class_idx_cast,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_class_idx_cast',
                       **attrs)
    # output shape: [num_selected_indices, 1]

    concat_var = scope.get_unique_variable_name(operator.output_full_names[0] +
                                                '_concat_var')
    concat_node = next(
        (nd_ for nd_ in operator.nodelist if nd_.type == 'Concat'),
        operator.nodelist[0])
    attrs = {'axis': 1}
    container.add_node("Concat",
                       [box_gather, class_idx_cast, score_gather_unsqueeze],
                       concat_var,
                       op_version=operator.target_opset,
                       name=concat_node.name,
                       **attrs)
    # output shape: [num_selected_indices, 6]

    all_gather = scope.get_unique_variable_name(operator.output_full_names[0] +
                                                '_all_gather')
    attrs = {'axis': 0}
    container.add_node("Gather", [concat_var, score_top_k_output_idx],
                       all_gather,
                       op_version=operator.target_opset,
                       name=nms_node.name + '_all_gather',
                       **attrs)
    # output shape: [num_top_K, 6]
    padded_result = oopb.add_node('Pad', [
        all_gather,
        np.array([0, 0, DETECTION_MAX_INSTANCES, 0], dtype=np.int64)
    ], nms_node.name + '_padded_result')
    detection_final = oopb.add_node('Slice', [
        padded_result, ('_start', oopb.int64, np.array([0], dtype='int64')),
        ('_end', oopb.int64, np.array([DETECTION_MAX_INSTANCES],
                                      dtype='int64')),
        ('_axes', oopb.int64, np.array([0], dtype='int64'))
    ], nms_node.name + '_detection_final')

    oopb.apply_op_with_output('apply_unsqueeze',
                              detection_final,
                              operator.output_full_names[0],
                              name=nms_node.name + '_concat_unsqueeze',
                              axes=[0])