예제 #1
0
def assert_model_is_valid(
    program, inputs, backend="nn_proto", verbose=True, expected_output_shapes=None
):
    """
    Assert Core ML model is valid.

    Inputs:

    - input: str -> shape tuple. All program input names need to appear in str.
      shape tuple can only contain positive integers.
    """
    input_dict = dict()
    for name, shape in inputs.items():
        input_dict[name] = np.random.rand(*shape)

    # Avoid circular import
    from coremltools.converters._converters_entry import convert
    mlmodel = convert(program, source="mil", convert_to=backend)
    assert mlmodel is not None

    if verbose:
        from coremltools.models.neural_network.printer import print_network_spec
        print_network_spec(mlmodel.get_spec(), style="coding")

    if _IS_MACOS:
        prediction = mlmodel.predict(input_dict, useCPUOnly=True)
        assert prediction is not None
        if expected_output_shapes is not None:
            for out_name, out_shape in expected_output_shapes.items():
                assert out_name in prediction
                assert out_shape == prediction[out_name].shape
예제 #2
0
    def test_fusion_with_image_full(self):
        from coremltools.converters._converters_entry import convert

        @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))])
        def prog(x):
            x1 = mb.transpose(x=x, perm=[0, 3, 1, 2])
            x2 = mb.relu(x=x)
            x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
            x4 = mb.add(x=x1, y=x3)
            return mb.relu(x=x4)

        mlmodel = convert(prog,
                          inputs=[
                              ImageType(name="x",
                                        shape=(10, 20, 30, 3),
                                        channel_first=False)
                          ],
                          source="mil",
                          convert_to="nn_proto")
        assert mlmodel is not None
        assert len(mlmodel.get_spec().neuralNetwork.layers) == 3
예제 #3
0
def get_core_ml_prediction(
    build, input_placeholders, input_values, use_cpu_only=False, 
    backend="nn_proto"):
    """
    Return predictions of the given model.
    """
    program = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        program.add_function("main", ssa_func)

    # Avoid circular import
    from coremltools.converters._converters_entry import convert
    mlmodel = convert(program, source="mil",
        convert_to=backend, useCPUOnly=use_cpu_only)
    return mlmodel.predict(input_values, useCPUOnly=use_cpu_only)
예제 #4
0
def convert_to_mlmodel(model_spec, tensor_inputs, backend="nn_proto"):
    # Avoid circular dependency
    from coremltools.converters._converters_entry import convert

    def _convert_to_inputtype(inputs):
        if isinstance(inputs, list):
            return [_convert_to_inputtype(x) for x in inputs]
        elif isinstance(inputs, tuple):
            return tuple([_convert_to_inputtype(x) for x in inputs])
        elif isinstance(inputs, TensorType):
            return inputs
        elif isinstance(inputs, torch.Tensor):
            return TensorType(shape=inputs.shape,
                              dtype=torch_to_mil_types[inputs.dtype])
        else:
            raise ValueError("Unable to parse type {} into InputType.".format(
                type(inputs)))

    inputs = list(_convert_to_inputtype(tensor_inputs))
    return convert(model_spec,
                   inputs=inputs,
                   convert_to=backend,
                   source="pytorch")
예제 #5
0
def tf_graph_to_mlmodel(
    graph, feed_dict, output_nodes, frontend="tensorflow", backend="nn_proto"
):
    """
    Parameters
    ----------
    graph: tf.Graph
        TensorFlow 1.x model in tf.Graph format.
    feed_dict: dict of {tf.placeholder -> np.array or python primitive)
        Dict of placeholder and value pairs representing inputs.
    output_nodes: tf.node or list[tf.node]
        List of names representing outputs.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    -----------
    Returns MLModel, Input Values, Output Names
    """
    # Avoid circular dependency
    from coremltools.converters._converters_entry import convert

    if isinstance(output_nodes, tuple):
        output_nodes = list(output_nodes)
    if not isinstance(output_nodes, list):
        output_nodes = [output_nodes]

    # Convert TF graph.
    input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs")
    output_names = get_tf_node_names(output_nodes, mode="outputs")
    input_values = {name: val for name, val in zip(input_names, feed_dict.values())}

    mlmodel = convert(
        graph, inputs=None, outputs=output_names, source=frontend, convert_to=backend
    )

    return mlmodel, input_values, output_names, output_nodes
예제 #6
0
def run_compare_builder(
    build,
    input_placeholders,
    input_values,
    expected_output_types=None,
    expected_outputs=None,
    use_cpu_only=False,
    frontend_only=False,
    backend="nn_proto",
    atol=1e-04,
    rtol=1e-05,
    inputs=None,
    also_compare_shapes=False,
):
    """
    Inputs:
        - build: python function taking input of Vars and returning Var or
          list[Var]. Each input argument in build must match a key in
          input_values / input_placeholders.

        - input_placeholders: str -> placeholder. It may not be an empty
                              dict as MLModel doesn't support function with
                              no input.

        - input_values: str -> np.array or PIL.Image. Keys must match those in
          input_placeholders.

        - expected_output_types: list[(shape, builtin_type)] or (shape,
          builtin_type).  None skips type inference validation.

        - expected_outputs: list[np.array] or np.array. Required iff
          frontend_only == False

        - frontend_only: True to test up to proto generation.

        - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType])
    """
    from coremltools.converters._converters_entry import convert

    if not isinstance(expected_output_types, list):
        expected_output_types = [expected_output_types]

    if expected_outputs is not None and not isinstance(expected_outputs, list):
        expected_outputs = [expected_outputs]

    prog = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        prog.add_function("main", ssa_func)

    # get output names for output_vars
    output_names = [x.name for x in output_vars]

    # Validate type inference
    msg = (
        "Provided expected outputs types {} should match number of output"
        + " variables {}"
    )
    assert_msg = msg.format(len(expected_output_types), len(output_vars))
    assert len(output_vars) == len(expected_output_types), assert_msg

    for out_var, s in zip(output_vars, expected_output_types):
        if out_var.dtype != s[-1]:
            raise ValueError(
                "Output {} type: expect {}, got {}. Program:\n{}".format(
                    out_var.name, s[-1].__type_info__(),
                    out_var.dtype.__type_info__(), prog
                )
            )
        if UNK_VARIADIC in s[:-1]:
            msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}"
            logging.debug(msg.format(out_var.shape, s[:-1]))
            continue
        expected_shape = s[:-1]
        msg = "Output {} shape: expect {}, got {}. Program:\n{}".format(
            out_var.name, expected_shape, out_var.shape, prog
        )
        # No more variadic here.
        if len(out_var.shape) != len(expected_shape):
            raise ValueError(msg)
        # replace UNK_SYM in out_var.shape.
        output_shape = [
            0 if es == UNK_SYM else os for os, es in zip(out_var.shape, expected_shape)
        ]
        expected_shape = [0 if es == UNK_SYM else es for es in expected_shape]
        # convert float etc to int.
        output_shape = [i if is_symbolic(i) else int(i) for i in output_shape]
        expected_shape = [i if is_symbolic(i) else int(i) for i in expected_shape]
        if output_shape != expected_shape:
            raise ValueError(msg)

    mlmodel = convert(prog, source="mil", convert_to=backend, inputs=inputs)

    if frontend_only:
        return

    if expected_outputs:
        assert len(output_vars) == len(expected_outputs), (
            "Provided expected_outputs {}"
            " should match number of output"
            " variables {}".format(len(expected_outputs), len(output_vars))
        )

        expected_outputs = {
            name: val for name, val in zip(output_names, expected_outputs)
        }

    compare_backend(
        mlmodel=mlmodel,
        input_key_values=input_values,
        expected_outputs=expected_outputs,
        use_cpu_only=use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=also_compare_shapes,
    )
예제 #7
0
def run_compare_tf_keras(
    model,
    input_values,
    use_cpu_only=False,
    frontend_only=False,
    frontend="tensorflow",
    backend="nn_proto",
    atol=1e-04,
    rtol=1e-05,
):
    """
    Parameters
    ----------
    model: TensorFlow 2.x model
        TensorFlow 2.x model annotated with @tf.function.
    input_values: list of np.array
        List of input values in the same order as the input signature.
    use_cpu_only: bool
        If true, use CPU only for prediction, otherwise, use GPU also.
    frontend_only: bool
        If true, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    """
    # Avoid circular dependency
    from coremltools.converters._converters_entry import convert

    mlmodel = convert(model, source=frontend, convert_to=backend)

    # assumes conversion preserve the i/o names
    proto = mlmodel.get_spec()
    inputs = sorted([str(i.name) for i in proto.description.input])
    outputs = [str(o.name) for o in proto.description.output]

    if frontend_only:
        return

    # get tf.keras model output as reference and run comparison
    keras_outputs = model(input_values)
    if not isinstance(keras_outputs, list):
        keras_outputs = [keras_outputs]
    ref = [output.numpy() for output in keras_outputs]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}
    input_key_values = {n: v for n, v in zip(inputs, input_values)}
    compare_backend(
        mlmodel,
        input_key_values,
        expected_outputs,
        use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=True,
    )

    pred = None
    if not coremltoolsutils._has_custom_layer(proto):
        pred = run_core_ml_predict(mlmodel, input_key_values, use_cpu_only)
    else:
        print('Skipping model prediction as it has a custom nn layer!')
    return proto, mlmodel, input_key_values, pred