Ejemplo n.º 1
0
    def wrapper(ops):
        input_signature = []
        for input_type in input_types:
            if input_type is not None and len(input_type) > 0 and isinstance(
                    input_type[-1], dtypes.DType):
                shape, dtype = input_type[:-1], input_type[-1]
            else:
                shape, dtype = input_type, tf.float32
            input_signature.append(tf.TensorSpec(shape=shape, dtype=dtype))

        @tf.function(input_signature=input_signature)
        def tf2_model(*args):
            return ops(*args)

        concrete_func = tf2_model.get_concrete_function()
        inputs = get_tf_node_names(
            [
                t.name
                for t in concrete_func.inputs if t.dtype != dtypes.resource
            ],
            mode="input",
        )
        outputs = get_tf_node_names([t.name for t in concrete_func.outputs],
                                    mode="output")
        return [concrete_func], inputs, outputs
Ejemplo n.º 2
0
    def wrapper(ops):
        class TensorFlowModule(tf.Module):
            input_signature = []
            for input_type in input_types:
                if len(input_type) > 0 and isinstance(input_type[-1],
                                                      dtypes.DType):
                    shape, dtype = input_type[:-1], input_type[-1]
                else:
                    shape, dtype = input_type, tf.float32
                input_signature.append(tf.TensorSpec(shape=shape, dtype=dtype))

            @tf.function(input_signature=input_signature)
            def __call__(self, *args):
                return ops(*args)

        module = TensorFlowModule()
        concrete_func = module.__call__.get_concrete_function()
        inputs = get_tf_node_names(
            [
                t.name
                for t in concrete_func.inputs if t.dtype != dtypes.resource
            ],
            mode="input",
        )
        outputs = get_tf_node_names([t.name for t in concrete_func.outputs],
                                    mode="output")
        return [concrete_func], inputs, outputs
Ejemplo n.º 3
0
def run_compare_tf2(
        model,
        input_dict,
        output_names,
        use_cpu_only=False,
        use_cpu_for_conversion=False,
        frontend_only=False,
        frontend="tensorflow",
        backend=("neuralnetwork", "fp32"),
        debug=False,
        atol=1e-04,
        rtol=1e-05,
):
    """
    Parameters
    ----------
    model: list of tf.ConcreteFunction
        List of TensorFlow 2.x concrete functions.
    input_dict: dict of (str, np.array)
        Dict of name and value pairs representing inputs.
    output_names: list of str
        List of output node names.
    use_cpu_only: bool
        If true, use CPU only for prediction, otherwise, use GPU also.
    use_cpu_for_conversion: bool
        If true, the converter is invoked using "ct.convert(...., useCPUOnly=True)",
        which in turn forces the model to be loaded with the CPU context, which happens
        when the converter loads the ML model object from the proto spec
        using "ct.models.MLModel(proto_spec, useCPUOnly=True)".
        The other argument, i.e., "use_cpu_only" on the other hand refers to only the compute engine
        for prediction purposes. For a model that is loaded on a non-CPU context, it can still be forced
        to execute on the CPU at the time of prediction. Hence,
        "use_cpu_for_conversion = False && use_cpu_only = True" is valid and results in a case when a model is
        loaded for GPU but executed on the CPU.
        The scenario, "use_cpu_for_conversion = True && use_cpu_only = False" is invalid though,
        since once a model is loaded on a CPU context its context cannot be changed to a non CPU device
        at the time of prediction.
    frontend_only: bool
        If true, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    debug: bool
        If true, print verbose information and plot intermediate graphs.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    """
    if use_cpu_for_conversion and not use_cpu_only:
        # use_cpu_for_conversion = True && use_cpu_only = False
        raise ValueError(
            "use_cpu_for_conversion = True && use_cpu_only = False is an invalid test case"
        )

    inputs = []
    cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource]
    for t in cf_inputs:
        name = get_tf_node_names(t.name)[0]
        shape = [RangeDim() if s is None or s == -1 else s \
                for s in list(t.get_shape())]
        inputs.append(
            TensorType(name=name, shape=shape, dtype=t.dtype.as_numpy_dtype))
    outputs = []
    for t in output_names:
        name = get_tf_node_names(t)[0]
        outputs.append(name)

    # get TensorFlow 2.x output as reference and run comparison
    tf_input_values = [tf.constant(t) for t in input_dict.values()]
    tf_outputs = model[0](*tf_input_values)
    if isinstance(tf_outputs, (tuple, list)):
        ref = [t.numpy() for t in tf_outputs]
    else:
        ref = [tf_outputs.numpy()]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}

    mlmodel = ct_convert(
        model,
        source=frontend,
        inputs=inputs,
        outputs=outputs,
        convert_to=backend,
        debug=debug,
        useCPUOnly=use_cpu_for_conversion,
    )

    for k, v in input_dict.items():
        if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer):
            input_dict[k] = v.astype(np.float)  # Core ML only accepts floats

    if frontend_only or _macos_version() < (10, 13) \
       or (mlmodel.is_package and _macos_version() < (12, 0)):
        return mlmodel._spec, mlmodel, input_dict, None

    compare_backend(
        mlmodel,
        input_dict,
        expected_outputs,
        use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=True,
        dtype=backend[1],
    )

    pred = None
    if not coremltoolsutils._has_custom_layer(mlmodel.get_spec()):
        pred = run_core_ml_predict(mlmodel, input_dict, use_cpu_only)
    else:
        print('Skipping model prediction as it has a custom nn layer!')
    return mlmodel._spec, mlmodel, input_dict, pred
Ejemplo n.º 4
0
def run_compare_tf2(
    model,
    input_dict,
    output_names,
    use_cpu_only=False,
    frontend_only=False,
    frontend="tensorflow",
    backend="nn_proto",
    debug=False,
    atol=1e-04,
    rtol=1e-05,
):
    """
    Parameters
    ----------
    model: list of tf.ConcreteFunction
        List of TensorFlow 2.x concrete functions.
    input_dict: dict of (str, np.array)
        Dict of name and value pairs representing inputs.
    output_names: list of str
        List of output node names.
    use_cpu_only: bool
        If true, use CPU only for prediction, otherwise, use GPU also.
    frontend_only: bool
        If true, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    debug: bool
        If true, print verbose information and plot intermediate graphs.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    """
    inputs = []
    cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource]
    for t in cf_inputs:
        name = get_tf_node_names(t.name)[0]
        shape = [RangeDim() if s is None or s == -1 else s \
                for s in list(t.get_shape())]
        inputs.append(
            TensorType(name=name, shape=shape, dtype=t.dtype.as_numpy_dtype))
    outputs = []
    for t in output_names:
        name = get_tf_node_names(t)[0]
        outputs.append(name)

    # get TensorFlow 2.x output as reference and run comparison
    tf_input_values = [tf.constant(t) for t in input_dict.values()]
    tf_outputs = model[0](*tf_input_values)
    if isinstance(tf_outputs, (tuple, list)):
        ref = [t.numpy() for t in tf_outputs]
    else:
        ref = [tf_outputs.numpy()]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}

    mlmodel = ct.convert(
        model,
        source=frontend,
        inputs=inputs,
        outputs=outputs,
        convert_to=backend,
        debug=debug,
    )

    if frontend_only:
        return

    for k, v in input_dict.items():
        if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer):
            input_dict[k] = v.astype(np.float)  # Core ML only accepts floats

    compare_backend(
        mlmodel,
        input_dict,
        expected_outputs,
        use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=True,
    )

    return mlmodel.get_spec()
Ejemplo n.º 5
0
def run_compare_tf2(
    model,
    input_dict,
    output_names,
    inputs_for_conversion=None,
    use_cpu_for_conversion=False,
    frontend_only=False,
    frontend="tensorflow",
    backend=("neuralnetwork", "fp32"),
    debug=False,
    atol=1e-04,
    rtol=1e-05,
    minimum_deployment_target=None,
):
    """
    Parameters
    ----------
    model: list of tf.ConcreteFunction
        List of TensorFlow 2.x concrete functions.
    input_dict: dict of (str, np.array)
        Dict of name and value pairs representing inputs.
    output_names: list of str
        List of output node names.
    inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects
        Defaults to None. It is passed as is to the "inputs" argument of the converter.
    use_cpu_for_conversion: bool
        If True, forces the model to be loaded with the CPU context.
    frontend_only: bool
        If True, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    debug: bool
        If True, print verbose information and plot intermediate graphs.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    minimum_deployment_target: coremltools.target enumeration
        The spec version for the mlmodel
    """
    inputs = []
    if inputs_for_conversion is None:
        cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource]
        for t in cf_inputs:
            name = get_tf_node_names(t.name)[0]
            shape = [RangeDim() if s is None or s == -1 else s \
                    for s in list(t.get_shape())]
            inputs.append(
                TensorType(name=name,
                           shape=shape,
                           dtype=t.dtype.as_numpy_dtype))
    else:
        inputs = inputs_for_conversion

    outputs = []
    for t in output_names:
        name = get_tf_node_names(t)[0]
        outputs.append(name)

    # get TensorFlow 2.x output as reference and run comparison
    tf_input_values = [tf.constant(t) for t in input_dict.values()]
    tf_outputs = model[0](*tf_input_values)
    if isinstance(tf_outputs, (tuple, list)):
        ref = [t.numpy() for t in tf_outputs]
    else:
        ref = [tf_outputs.numpy()]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}

    if use_cpu_for_conversion:
        compute_unit = ct.ComputeUnit.CPU_ONLY
    else:
        compute_unit = ct.ComputeUnit.ALL

    mlmodel = ct_convert(
        model,
        source=frontend,
        inputs=inputs,
        outputs=outputs,
        convert_to=backend,
        debug=debug,
        compute_units=compute_unit,
        minimum_deployment_target=minimum_deployment_target,
    )

    for k, v in input_dict.items():
        if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer):
            input_dict[k] = v.astype(np.float)  # Core ML only accepts floats

    if frontend_only or _macos_version() < (10, 13) \
       or (mlmodel.is_package and _macos_version() < (12, 0)):
        return mlmodel._spec, mlmodel, input_dict, None

    pred = None
    if not coremltoolsutils._has_custom_layer(mlmodel._spec):
        pred = compare_backend(
            mlmodel,
            input_dict,
            expected_outputs,
            atol=atol,
            rtol=rtol,
            also_compare_shapes=True,
            dtype=backend[1],
        )
    else:
        print('Skipping model prediction as it has a custom nn layer!')
    return mlmodel._spec, mlmodel, input_dict, pred