Exemple #1
0
def get_core_ml_prediction(build,
                           input_placeholders,
                           input_values,
                           use_cpu_only=False,
                           backend="nn_proto"):
    """
    Return predictions of the given model.
    """
    program = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        program.add_function("main", ssa_func)

    # Avoid circular import
    from coremltools.converters.mil.testing_reqs import ct
    mlmodel = ct.convert(program,
                         source="mil",
                         convert_to=backend,
                         useCPUOnly=use_cpu_only)
    return mlmodel.predict(input_values, useCPUOnly=use_cpu_only)
Exemple #2
0
def assert_model_is_valid(
    program, inputs, backend="nn_proto", verbose=True, expected_output_shapes=None
):
    """
    Assert Core ML model is valid.

    Inputs:

    - input: str -> shape tuple. All program input names need to appear in str.
      shape tuple can only contain positive integers.
    """
    input_dict = dict()
    for name, shape in inputs.items():
        input_dict[name] = np.random.rand(*shape)

    # Avoid circular import
    from coremltools.converters.mil.testing_reqs import ct
    mlmodel = ct.convert(program, source="mil", convert_to=backend)
    assert mlmodel is not None

    if verbose:
        from coremltools.models.neural_network.printer import print_network_spec
        print_network_spec(mlmodel.get_spec(), style="coding")

    if _IS_MACOS:
        prediction = mlmodel.predict(input_dict, useCPUOnly=True)
        assert prediction is not None
        if expected_output_shapes is not None:
            for out_name, out_shape in expected_output_shapes.items():
                assert out_name in prediction
                assert out_shape == prediction[out_name].shape
Exemple #3
0
def run_compare_tf_keras(
    model,
    input_values,
    use_cpu_only=False,
    frontend_only=False,
    frontend="tensorflow",
    backend="nn_proto",
    atol=1e-04,
    rtol=1e-05,
):
    """
    Parameters
    ----------
    model: TensorFlow 2.x model
        TensorFlow 2.x model annotated with @tf.function.
    input_values: list of np.array
        List of input values in the same order as the input signature.
    use_cpu_only: bool
        If true, use CPU only for prediction, otherwise, use GPU also.
    frontend_only: bool
        If true, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    """

    mlmodel = ct.convert(model, source=frontend, convert_to=backend)

    # assumes conversion preserve the i/o names
    proto = mlmodel.get_spec()
    inputs = sorted([str(i.name) for i in proto.description.input])
    outputs = [str(o.name) for o in proto.description.output]

    if frontend_only:
        return

    # get tf.keras model output as reference and run comparison
    keras_outputs = model(input_values)
    if not isinstance(keras_outputs, list):
        keras_outputs = [keras_outputs]
    ref = [output.numpy() for output in keras_outputs]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}
    input_key_values = {n: v for n, v in zip(inputs, input_values)}
    compare_backend(
        mlmodel,
        input_key_values,
        expected_outputs,
        use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=True,
    )

    return proto
    def test_with_interleave(self):
        """
        input1(1, 5, 3, 4) -----> stack(axis=2) -----> reshape(shape=(1, 10, 3, 4)) ---> out(1, 10, 3, 4)
                                    ^
                                    |
        input2(1, 5, 3, 4) ----------

        Output graph:
        input -----> concat ----> out

        """
        @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))])
        def prog(x1, x2):
            x = mb.stack(values=[x1, x2], axis=2)
            x = mb.reshape(x=x, shape=[1, 10, 3, 4])
            return x

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "common::replace_stack_reshape"
        )
        self.assertEqual(
            get_op_types_in_program(prev_prog), ["stack", "reshape"]
        )
        self.assertEqual(get_op_types_in_program(prog), ["concat"])

        inputs = {"x1": (1, 5, 3, 4), "x2": (1, 5, 3, 4)}
        assert_model_is_valid(
            prog,
            inputs,
            expected_output_shapes={block.outputs[0].name: (1, 10, 3, 4)},
        )

        concat_ops = [op for op in block.operations if op.op_type == 'concat']
        concat_op = concat_ops[0]
        assert concat_op.interleave.val == True

        output_name = block.outputs[0].name

        mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork")

        if not _IS_MACOS:
            # Can not get predictions unless on macOS.
            return

        input_dict = dict()
        for name, shape in inputs.items():
            input_dict[name] = np.random.rand(*shape)

        old_prediction = np.reshape(np.stack([input_dict["x1"], input_dict["x2"]], axis=2), newshape=[1, 10, 3, 4])

        prediction = mlmodel.predict(input_dict, useCPUOnly=True)

        np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05)
    def test_multiple(self):
        @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)), mb.TensorSpec(shape=(1, 2, 3, 4)), 
                                 mb.TensorSpec(shape=(1, 2, 3, 4)), mb.TensorSpec(shape=(1, 2, 3, 4))])
        def prog(x1, x2, x3, x4):
            a = mb.stack(values=[x1, x2], axis=1)
            a = mb.reshape(x=a, shape=[1, 4, 3, 4])

            b = mb.stack(values=[x3, x4], axis=1)
            b = mb.reshape(x=b, shape=[1, 4, 3, 4])

            c = mb.stack(values=[a, b], axis=2) 
            c = mb.reshape(x=c, shape=[1, 4, 6, 4])

            return c

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "common::replace_stack_reshape"
        )
        self.assertEqual(
            get_op_types_in_program(prev_prog), ["stack", "reshape", "stack", "reshape", "stack", "reshape"]
        )
        self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"])

        inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)}
        assert_model_is_valid(
            prog,
            inputs,
            expected_output_shapes={block.outputs[0].name: (1, 4, 6, 4)},
        )

        output_name = block.outputs[0].name

        mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork")

        if not _IS_MACOS:
            # Can not get predictions unless on macOS.
            return

        input_dict = dict()
        for name, shape in inputs.items():
            input_dict[name] = np.random.rand(*shape)

        branch_1 = np.reshape(np.stack([input_dict['x1'], input_dict['x2']], axis=1), newshape=[1, 4, 3, 4])
        branch_2 = np.reshape(np.stack([input_dict['x3'], input_dict['x4']], axis=1), newshape=[1, 4, 3, 4])
        old_prediction = np.reshape(np.stack([branch_1, branch_2], axis=2), newshape=[1, 4, 6, 4])

        prediction = mlmodel.predict(input_dict, useCPUOnly=True)

        np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05)
Exemple #6
0
def convert_to_mlmodel(model_spec, tensor_inputs, backend="nn_proto"):
    def _convert_to_inputtype(inputs):
        if isinstance(inputs, list):
            return [_convert_to_inputtype(x) for x in inputs]
        elif isinstance(inputs, tuple):
            return tuple([_convert_to_inputtype(x) for x in inputs])
        elif isinstance(inputs, TensorType):
            return inputs
        elif isinstance(inputs, torch.Tensor):
            return TensorType(shape=inputs.shape, dtype=torch_to_mil_types[inputs.dtype])
        else:
            raise ValueError(
                "Unable to parse type {} into InputType.".format(type(inputs))
            )

    inputs = list(_convert_to_inputtype(tensor_inputs))
    return ct.convert(model_spec, inputs=inputs, convert_to=backend,
        source="pytorch")
    def test_fusion_with_image_full(self):
        @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))])
        def prog(x):
            x1 = mb.transpose(x=x, perm=[0, 3, 1, 2])
            x2 = mb.relu(x=x)
            x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
            x4 = mb.add(x=x1, y=x3)
            return mb.relu(x=x4)

        mlmodel = ct.convert(prog,
                             inputs=[
                                 ImageType(name="x",
                                           shape=(10, 20, 30, 3),
                                           channel_first=False)
                             ],
                             source="mil",
                             convert_to="nn_proto")
        assert mlmodel is not None
        assert len(mlmodel.get_spec().neuralNetwork.layers) == 3
def tf_graph_to_mlmodel(graph,
                        feed_dict,
                        output_nodes,
                        frontend="tensorflow",
                        backend="nn_proto"):
    """
    Parameters
    ----------
    graph: tf.Graph
        TensorFlow 1.x model in tf.Graph format.
    feed_dict: dict of {tf.placeholder -> np.array or python primitive)
        Dict of placeholder and value pairs representing inputs.
    output_nodes: tf.node or list[tf.node]
        List of names representing outputs.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    -----------
    Returns MLModel, Input Values, Output Names
    """
    if isinstance(output_nodes, tuple):
        output_nodes = list(output_nodes)
    if not isinstance(output_nodes, list):
        output_nodes = [output_nodes]

    # Convert TF graph.
    input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs")
    output_names = get_tf_node_names(output_nodes, mode="outputs")
    input_values = {
        name: val
        for name, val in zip(input_names, feed_dict.values())
    }

    mlmodel = ct.convert(graph,
                         inputs=None,
                         outputs=output_names,
                         source=frontend,
                         convert_to=backend)

    return mlmodel, input_values, output_names, output_nodes
Exemple #9
0
def run_compare_tf2(
    model,
    input_dict,
    output_names,
    use_cpu_only=False,
    frontend_only=False,
    frontend="tensorflow",
    backend="nn_proto",
    debug=False,
    atol=1e-04,
    rtol=1e-05,
):
    """
    Parameters
    ----------
    model: list of tf.ConcreteFunction
        List of TensorFlow 2.x concrete functions.
    input_dict: dict of (str, np.array)
        Dict of name and value pairs representing inputs.
    output_names: list of str
        List of output node names.
    use_cpu_only: bool
        If true, use CPU only for prediction, otherwise, use GPU also.
    frontend_only: bool
        If true, skip the prediction call, only validate conversion.
    frontend: str
        Frontend to convert from.
    backend: str
        Backend to convert to.
    debug: bool
        If true, print verbose information and plot intermediate graphs.
    atol: float
        The absolute tolerance parameter.
    rtol: float
        The relative tolerance parameter.
    """
    inputs = []
    cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource]
    for t in cf_inputs:
        name = get_tf_node_names(t.name)[0]
        shape = [RangeDim() if s is None or s == -1 else s \
                for s in list(t.get_shape())]
        inputs.append(
            TensorType(name=name, shape=shape, dtype=t.dtype.as_numpy_dtype))
    outputs = []
    for t in output_names:
        name = get_tf_node_names(t)[0]
        outputs.append(name)

    # get TensorFlow 2.x output as reference and run comparison
    tf_input_values = [tf.constant(t) for t in input_dict.values()]
    tf_outputs = model[0](*tf_input_values)
    if isinstance(tf_outputs, (tuple, list)):
        ref = [t.numpy() for t in tf_outputs]
    else:
        ref = [tf_outputs.numpy()]
    expected_outputs = {n: v for n, v in zip(outputs, ref)}

    mlmodel = ct.convert(
        model,
        source=frontend,
        inputs=inputs,
        outputs=outputs,
        convert_to=backend,
        debug=debug,
    )

    if frontend_only:
        return

    for k, v in input_dict.items():
        if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer):
            input_dict[k] = v.astype(np.float)  # Core ML only accepts floats

    compare_backend(
        mlmodel,
        input_dict,
        expected_outputs,
        use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=True,
    )

    return mlmodel.get_spec()
Exemple #10
0
def run_compare_builder(
    build,
    input_placeholders,
    input_values,
    expected_output_types=None,
    expected_outputs=None,
    use_cpu_only=False,
    frontend_only=False,
    backend="nn_proto",
    atol=1e-04,
    rtol=1e-05,
    inputs=None,
    also_compare_shapes=False,
):
    """
    Inputs:
        - build: python function taking input of Vars and returning Var or
          list[Var]. Each input argument in build must match a key in
          input_values / input_placeholders.

        - input_placeholders: str -> placeholder. It may not be an empty
                              dict as MLModel doesn't support function with
                              no input.

        - input_values: str -> np.array or PIL.Image. Keys must match those in
          input_placeholders.

        - expected_output_types: list[(shape, builtin_type)] or (shape,
          builtin_type).  None skips type inference validation.

        - expected_outputs: list[np.array] or np.array. Required iff
          frontend_only == False

        - frontend_only: True to test up to proto generation.

        - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType])
    """
    if not isinstance(expected_output_types, list):
        expected_output_types = [expected_output_types]

    if expected_outputs is not None and not isinstance(expected_outputs, list):
        expected_outputs = [expected_outputs]

    prog = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        prog.add_function("main", ssa_func)

    # get output names for output_vars
    output_names = [x.name for x in output_vars]

    # Validate type inference
    msg = (
        "Provided expected outputs types {} should match number of output"
        + " variables {}"
    )
    assert_msg = msg.format(len(expected_output_types), len(output_vars))
    assert len(output_vars) == len(expected_output_types), assert_msg

    for out_var, s in zip(output_vars, expected_output_types):
        if out_var.dtype != s[-1]:
            raise ValueError(
                "Output {} type: expect {}, got {}. Program:\n{}".format(
                    out_var.name, s[-1].__type_info__(),
                    out_var.dtype.__type_info__(), prog
                )
            )
        if UNK_VARIADIC in s[:-1]:
            msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}"
            logging.debug(msg.format(out_var.shape, s[:-1]))
            continue
        expected_shape = s[:-1]
        msg = "Output {} shape: expect {}, got {}. Program:\n{}".format(
            out_var.name, expected_shape, out_var.shape, prog
        )
        # No more variadic here.
        if len(out_var.shape) != len(expected_shape):
            raise ValueError(msg)
        # replace UNK_SYM in out_var.shape.
        output_shape = [
            0 if es == UNK_SYM else os for os, es in zip(out_var.shape, expected_shape)
        ]
        expected_shape = [0 if es == UNK_SYM else es for es in expected_shape]
        # convert float etc to int.
        output_shape = [i if is_symbolic(i) else int(i) for i in output_shape]
        expected_shape = [i if is_symbolic(i) else int(i) for i in expected_shape]
        if output_shape != expected_shape:
            raise ValueError(msg)

    mlmodel = ct.convert(prog, source="mil", convert_to=backend, inputs=inputs)

    if frontend_only:
        return

    if expected_outputs:
        assert len(output_vars) == len(expected_outputs), (
            "Provided expected_outputs {}"
            " should match number of output"
            " variables {}".format(len(expected_outputs), len(output_vars))
        )

        expected_outputs = {
            name: val for name, val in zip(output_names, expected_outputs)
        }

    compare_backend(
        mlmodel=mlmodel,
        input_key_values=input_values,
        expected_outputs=expected_outputs,
        use_cpu_only=use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=also_compare_shapes,
    )
Exemple #11
0
    def test_success(self):
        """
        Input graph:
        input1(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----> concat(axis=3, interleave=True) ---> out(1, 2, 6, 8)
                                             ^                                           ^
                                             |                                           |
        input2(1, 2, 3, 4) -------------------                                           |
                                                                                         |
        input3(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----------------------|
                                             ^
                                             |
        input4(1, 2, 3, 4) ------------------|

        Output graph:
        input1(1, 2, 3, 4) -----> concat(axis=1) ---> pixel_shuffle(upsample_factor=2) ----> out(1, 2, 6, 8)
                                     ^
        input2(1, 2, 3, 4) ----------|
                                     | 
        input3(1, 2, 3, 4) ----------|
                                     | 
        input4(1, 2, 3, 4) ----------|
        """
        @mb.program(input_specs=[
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4))
        ])
        def prog(x1, x2, x3, x4):
            ab = mb.concat(values=[x1, x2], axis=2, interleave=True)
            cd = mb.concat(values=[x3, x4], axis=2, interleave=True)
            x = mb.concat(values=[ab, cd], axis=3, interleave=True)

            return x

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "common::concat_to_pixel_shuffle")
        self.assertEqual(get_op_types_in_program(prev_prog),
                         ["concat", "concat", "concat"])
        self.assertEqual(get_op_types_in_program(prog),
                         ["concat", "pixel_shuffle"])

        inputs = {
            "x1": (1, 2, 3, 4),
            "x2": (1, 2, 3, 4),
            "x3": (1, 2, 3, 4),
            "x4": (1, 2, 3, 4)
        }
        assert_model_is_valid(
            prog,
            inputs,
            expected_output_shapes={block.outputs[0].name: (1, 2, 6, 8)},
        )

        mlmodel = ct.convert(prog,
                             source="milinternal",
                             convert_to="neuralnetwork")

        if not _IS_MACOS:
            # Can not get predictions unless on macOS.
            return

        input_dict = dict()
        input_dict["x1"] = np.ones(inputs["x1"])
        input_dict["x2"] = np.ones(inputs["x2"]) * 2
        input_dict["x3"] = np.ones(inputs["x3"]) * 3
        input_dict["x4"] = np.ones(inputs["x4"]) * 4

        output_name = block.outputs[0].name

        ab = np.reshape(np.stack((input_dict["x1"], input_dict["x2"]), axis=3),
                        newshape=[1, 2, 6, 4])
        cd = np.reshape(np.stack((input_dict["x3"], input_dict["x4"]), axis=3),
                        newshape=[1, 2, 6, 4])
        old_prediction = np.reshape(np.stack((ab, cd), axis=4),
                                    newshape=[1, 2, 6, 8])

        prediction = mlmodel.predict(input_dict, useCPUOnly=True)
        np.testing.assert_allclose(old_prediction,
                                   prediction[output_name],
                                   atol=1e-04,
                                   rtol=1e-05)
Exemple #12
0
    def test_nested(self):
        """
        Two nested blocks that will each be transformed.
        """
        @mb.program(input_specs=[
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4)),
            mb.TensorSpec(shape=(1, 2, 3, 4))
        ])
        def prog(x1, x2, x3, x4, x5, x6, x7, x8):
            ab = mb.concat(values=[x1, x2], axis=2, interleave=True)
            cd = mb.concat(values=[x3, x4], axis=2, interleave=True)
            x = mb.concat(values=[ab, cd], axis=3, interleave=True)

            ef = mb.concat(values=[x5, x6], axis=2, interleave=True)
            gh = mb.concat(values=[x7, x8], axis=2, interleave=True)
            y = mb.concat(values=[ef, gh], axis=3, interleave=True)

            z = mb.concat(values=[x, y], axis=1)

            return z

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "common::concat_to_pixel_shuffle")
        self.assertEqual(get_op_types_in_program(prev_prog), [
            "concat", "concat", "concat", "concat", "concat", "concat",
            "concat"
        ])
        self.assertEqual(
            get_op_types_in_program(prog),
            ["concat", "pixel_shuffle", "concat", "pixel_shuffle", "concat"])

        inputs = {
            "x1": (1, 2, 3, 4),
            "x2": (1, 2, 3, 4),
            "x3": (1, 2, 3, 4),
            "x4": (1, 2, 3, 4),
            "x5": (1, 2, 3, 4),
            "x6": (1, 2, 3, 4),
            "x7": (1, 2, 3, 4),
            "x8": (1, 2, 3, 4)
        }
        assert_model_is_valid(
            prog,
            inputs,
            expected_output_shapes={block.outputs[0].name: (1, 4, 6, 8)},
        )

        input_dict = dict()
        for name, shape in inputs.items():
            input_dict[name] = np.random.rand(*shape)

        output_name = block.outputs[0].name

        ab = np.reshape(np.stack((input_dict["x1"], input_dict["x2"]), axis=3),
                        newshape=[1, 2, 6, 4])
        cd = np.reshape(np.stack((input_dict["x3"], input_dict["x4"]), axis=3),
                        newshape=[1, 2, 6, 4])
        x = np.reshape(np.stack((ab, cd), axis=4), newshape=[1, 2, 6, 8])

        ef = np.reshape(np.stack((input_dict["x5"], input_dict["x6"]), axis=3),
                        newshape=[1, 2, 6, 4])
        gh = np.reshape(np.stack((input_dict["x7"], input_dict["x8"]), axis=3),
                        newshape=[1, 2, 6, 4])
        y = np.reshape(np.stack((ef, gh), axis=4), newshape=[1, 2, 6, 8])

        old_prediction = np.concatenate((x, y), axis=1)

        mlmodel = ct.convert(prog,
                             source="milinternal",
                             convert_to="neuralnetwork")

        if _IS_MACOS:
            prediction = mlmodel.predict(input_dict, useCPUOnly=True)
            np.testing.assert_allclose(old_prediction,
                                       prediction[output_name],
                                       atol=1e-04,
                                       rtol=1e-05)