Exemplo n.º 1
0
def get_core_ml_prediction(build,
                           input_placeholders,
                           input_values,
                           use_cpu_only=True,
                           backend=("neuralnetwork", "fp32")):
    """
    Return predictions of the given model.
    """
    program = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        program.add_function("main", ssa_func)

    if use_cpu_only:
        compute_unit = ct.ComputeUnit.CPU_ONLY
    else:
        compute_unit = ct.ComputeUnit.ALL

    mlmodel = ct_convert(program,
                         source="milinternal",
                         convert_to=backend,
                         compute_units=compute_unit)
    return mlmodel.predict(input_values)
Exemplo n.º 2
0
def load(model_spec, specification_version, file_weights_dir="", **kwargs):
    if not isinstance(model_spec, ml.Model):
        raise TypeError("Invalid Model sepc object")
    
    if specification_version < model_spec.specificationVersion:
        raise ValueError("specification_version must be greater or equal to the input model spec version")
        
    if model_spec.WhichOneof("Type") != "mlProgram":
        raise ValueError("Only MIL proto based mlmodels can be loaded")

    program_spec = model_spec.mlProgram
    if not isinstance(program_spec, pm.Program):
        raise TypeError("Invalid Program spec object")

    if program_spec.docString:
        raise NotImplementedError("Docstring would be lost in the process")

    if program_spec.version != 1:
        raise ValueError("Invalid program version")

    context = TranscriptionContext(file_weights_dir)
    pymil_program = Program()
    for func_name, func_spec in program_spec.functions.items():
        pymil_program.add_function(
            func_name, _load_function(context, func_spec, specification_version)
        )

    for attr_name, attr_spec in program_spec.attributes.items():
        if attr_name not in ("buildInfo",):
            raise ValueError("Invalid attribute for program")

    return pymil_program
Exemplo n.º 3
0
def get_core_ml_prediction(build,
                           input_placeholders,
                           input_values,
                           use_cpu_only=False,
                           backend="nn_proto"):
    """
    Return predictions of the given model.
    """
    program = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        program.add_function("main", ssa_func)

    # Avoid circular import
    from coremltools.converters.mil.testing_reqs import ct
    mlmodel = ct.convert(program,
                         source="mil",
                         convert_to=backend,
                         useCPUOnly=use_cpu_only)
    return mlmodel.predict(input_values, useCPUOnly=use_cpu_only)
Exemplo n.º 4
0
    def test_nn_backend_style_sanitization(self):
        '''
        Test that intermediate var names are unchanged, and
        only model input and output names are modified, i.e.
        sanitized (adhering to the format [a-zA-Z_][a-zA-Z0-9_]*)
        for the NN backend.
        '''

        prog = Program()
        func_inputs = {"x/0": mb.placeholder(shape=[2, 3]),
                       "y": mb.placeholder(shape=[2, 3])}
        with Function(func_inputs) as ssa_fun:
            x, y = ssa_fun.inputs["x/0"], ssa_fun.inputs["y"]
            x = mb.relu(x=x, name="relu/1")
            z = mb.add(x=x, y=y, name="out/1")
            ssa_fun.set_outputs([z])
        prog.add_function("main", ssa_fun)

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "common::sanitize_input_output_names",
            skip_output_name_check=True
        )

        relu_op = prog.find_ops(op_type="relu", exactly_one=True)[0]
        assert relu_op.inputs["x"].name == "x_0" # input name: sanitized
        assert relu_op.outputs[0].name == "relu/1" # intermediate name: unchanged
        assert block.outputs[0].name == "out_1" # output name: sanitized

        # convert prev_prog to NN backend
        mlmodel = ct.convert(prev_prog)
        spec = mlmodel._spec
        assert spec.description.input[0].name == "x_0"
        assert spec.description.output[0].name == "out_1"
        relu_layer = spec.neuralNetwork.layers[0]
        assert relu_layer.output[0] == "relu/1"
Exemplo n.º 5
0
    def convert(self):

        _logging.info("Converting graph.")

        # This will hold the converted model.
        prog = Program()

        # Construct placeholder for input to ssa function
        # This is where input renaming occurs
        ssa_func_inputs = OrderedDict()
        for index, (name, spec) in enumerate(self.graph.inputs.items()):
            placeholder = self._create_placeholder(spec)
            # Set ssa function input name to user defined name if provided.
            if spec.name is not None:
                name = spec.name
            self.inputs[index].name = name
            ssa_func_inputs[name] = placeholder
        prog.set_main_input_types(tuple(self.inputs))

        # Initialize the SSA for conversion
        with Function(ssa_func_inputs) as ssa_func:

            # Map internal @self.graph.inputs to user specified @ssa_func_inputs
            # If @self.graph.inputs == @ssa_func_inputs this just adds the inputs
            # to the context.
            for internal_name, users_name in zip(self.graph.inputs.keys(),
                                                 ssa_func_inputs.keys()):
                self.context.add(ssa_func.inputs[users_name],
                                 torch_name=internal_name)
            for name, val in self.graph.params.items():
                mode = decide_immediate_or_file(val)
                const = mb.const(val=val, mode=mode, name=name)
                self.context.add(const)

            # Add the rest of the operations
            convert_nodes(self.context, self.graph)

            graph_outputs = [self.context[name] for name in self.graph.outputs]

            # An output can be None when it's a None constant, which happens
            # in Fairseq MT.
            for g in graph_outputs:
                if g is None:
                    msg = "Droping output {} which is None"
                    _logging.warning(msg.format(g))
            graph_outputs = [g for g in graph_outputs if g is not None]

            # Output renaming occurs
            if self.output_names:
                for index, var in enumerate(graph_outputs):
                    output_rename = self.output_names[index]
                    var.name = output_rename

            ssa_func.set_outputs(graph_outputs)
            prog.add_function("main", ssa_func)

        # TODO (sberardi): graph cleanup passes
        # rdar://60177439
        return prog
Exemplo n.º 6
0
def get_core_ml_prediction(build,
                           input_placeholders,
                           input_values,
                           use_cpu_only=False,
                           backend="nn_proto"):
    """
    Return predictions of the given model.
    """
    program = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        program.add_function("main", ssa_func)

    proto = _converter._convert(program,
                                convert_from="mil",
                                convert_to=backend)
    model = coremltools.models.MLModel(proto, use_cpu_only)
    return model.predict(input_values, useCPUOnly=use_cpu_only)
Exemplo n.º 7
0
    def test_cast_with_symbolic_value(self):
        input_shape = [get_new_symbol(), 1]
        input_placeholders = {
            "x": mb.placeholder(shape=input_shape),
        }

        def build(x):
            shape = mb.shape(x=x)
            return mb.cast(x=shape, dtype="int32")

        prog = Program()
        with Function(input_placeholders) as ssa_func:
            output_vars = build(**ssa_func.inputs)
            assert is_compatible_symbolic_vector(output_vars.sym_val,
                                                 [get_new_symbol(), 1])
Exemplo n.º 8
0
    def convert(self):
        prog = Program()
        if len(self.graph_stack) == 0:
            raise ValueError("At least one TF function must be present")
        if self.graph_stack[0] != "main":
            msg = "TF root graph must be named 'main'. Got {}"
            raise ValueError(msg.format(self.graph_stack[0]))
        graph = self.tfssa.functions["main"].graph
        for g_name in self.graph_stack[1:]:
            self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)
        self.convert_main_graph(prog, graph)

        # Apply TF frontend passes on Program. These passes are different
        # from passes applied to tfssa.
        self.tensorflow_passes(prog)

        return prog
Exemplo n.º 9
0
    def __init__(self,
                 torchscript,
                 inputs,
                 outputs=None,
                 cut_at_symbols=None,
                 opset_version=None):
        """
        Arguments:
            torchscript: torch.jit.ScriptModule object representing the model to convert.
            inputs: Input values and optional names. See kwarg in load.py for full description.
            outputs: List of outputs as ct.InputType. See kwarg in load.py for full description.
            cut_at_symbols: A list of internal symbol name strings. Graph conversion will
                terminate once these symbols have been generated. For debugging use
                only. See kwarg in load.py.
            opset_version: An int represents the coreml opset version
        """

        assert isinstance(torchscript, _torch.jit.ScriptModule)
        self.inputs = inputs
        for idx, inp in enumerate(self.inputs):
            if isinstance(
                    inp, ImageType) and self.inputs[idx].channel_first is None:
                self.inputs[idx].channel_first = True
        self.torchscript = torchscript
        self.outputs = outputs
        self.output_names = get_output_names(self.outputs)
        self.opset_version = _target(
            opset_version) if opset_version is not None else None
        self.context = TranscriptionContext()
        raw_graph, params_dict = self._expand_and_optimize_ir(self.torchscript)
        self.params_dict = params_dict
        self.graph = InternalTorchIRGraph(raw_graph, params_dict, self.inputs,
                                          cut_at_symbols)
        passes = [
            transform_inplace_ops,
            flatten_graph_input_values,
            flatten_graph_output_values,
            remove_getattr_nodes,
            generate_tensor_assignment_ops,
        ]
        for p in passes:
            p(self.graph)
        self.inputs = [v for v in self.graph.inputs.values()]
        self.torch_passes = torch_passes
        self._prog = Program()
Exemplo n.º 10
0
 def wrapper(*args, **kwargs):
     prog = Program()
     with Function({}) as ssa_func:
         func(*args, **kwargs)
Exemplo n.º 11
0
def run_compare_builder(
    build,
    input_placeholders,
    input_values,
    expected_output_types=None,
    expected_outputs=None,
    use_cpu_only=False,
    frontend_only=False,
    backend="nn_proto",
    atol=1e-04,
    rtol=1e-05,
    inputs=None,
):
    """
    Inputs:
        - build: python function taking input of Vars and returning Var or
          list[Var]. Each input argument in build must match a key in
          input_values / input_placeholders.

        - input_placeholders: str -> placeholder. It may not be an empty
                              dict as MLModel doesn't support function with
                              no input.

        - input_values: str -> np.array or PIL.Image. Keys must match those in
          input_placeholders.

        - expected_output_types: list[(shape, builtin_type)] or (shape,
          builtin_type).  None skips type inference validation.

        - expected_outputs: list[np.array] or np.array. Required iff
          frontend_only == False

        - frontend_only: True to test up to proto generation.

        - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType])
    """
    if not isinstance(expected_output_types, list):
        expected_output_types = [expected_output_types]

    if expected_outputs is not None and not isinstance(expected_outputs, list):
        expected_outputs = [expected_outputs]

    prog = Program()
    with Function(input_placeholders) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        prog.add_function("main", ssa_func)

    # get output names for output_vars
    output_names = [x.name for x in output_vars]

    # Validate type inference
    msg = ("Provided expected outputs types {} should match number of output" +
           " variables {}")
    assert_msg = msg.format(len(expected_output_types), len(output_vars))
    assert len(output_vars) == len(expected_output_types), assert_msg

    for out_var, s in zip(output_vars, expected_output_types):
        if out_var.dtype != s[-1]:
            raise ValueError(
                "Output {} type: expect {}, got {}. Program:\n{}".format(
                    out_var.name, s[-1], out_var.dtype, prog))
        if UNK_VARIADIC in s[:-1]:
            msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}"
            logging.debug(msg.format(out_var.shape, s[:-1]))
            continue
        expected_shape = s[:-1]
        msg = "Output {} shape: expect {}, got {}. Program:\n{}".format(
            out_var.name, expected_shape, out_var.shape, prog)
        # No more variadic here.
        if len(out_var.shape) != len(expected_shape):
            raise ValueError(msg)
        # replace UNK_SYM in out_var.shape.
        output_shape = [
            0 if es == UNK_SYM else os
            for os, es in zip(out_var.shape, expected_shape)
        ]
        expected_shape = [0 if es == UNK_SYM else es for es in expected_shape]
        # convert float etc to int.
        output_shape = [i if is_symbolic(i) else int(i) for i in output_shape]
        expected_shape = [
            i if is_symbolic(i) else int(i) for i in expected_shape
        ]
        if output_shape != expected_shape:
            raise ValueError(msg)

    proto = converter._convert(prog,
                               convert_from="mil",
                               convert_to=backend,
                               inputs=inputs)

    if frontend_only:
        return

    if expected_outputs:
        assert len(output_vars) == len(expected_outputs), (
            "Provided expected_outputs {}"
            " should match number of output"
            " variables {}".format(len(expected_outputs), len(output_vars)))

        expected_outputs = {
            name: val
            for name, val in zip(output_names, expected_outputs)
        }

    compare_backend(
        proto=proto,
        input_key_values=input_values,
        expected_outputs=expected_outputs,
        use_cpu_only=use_cpu_only,
        atol=atol,
        rtol=rtol,
        also_compare_shapes=False,
    )
Exemplo n.º 12
0
def run_compare_builder(
    build,
    input_placeholders,
    input_values,
    expected_output_types=None,
    expected_outputs=None,
    use_cpu_only=False,
    frontend_only=False,
    backend=("neuralnetwork", "fp32"),
    atol=1e-04,
    rtol=1e-05,
    inputs=None,
    also_compare_shapes=False,
    converter=ct.convert,
    minimum_deployment_target=None,
):
    """
    Inputs:
        - build: python function taking input of Vars and returning Var or
          list[Var]. Each input argument in build must match a key in
          input_values / input_placeholders.

        - input_placeholders: str -> placeholder. It may not be an empty
                              dict as MLModel doesn't support function with
                              no input.

        - input_values: str -> np.array or PIL.Image. Keys must match those in
          input_placeholders.

        - expected_output_types: list[(shape, builtin_type)] or (shape,
          builtin_type).  None skips type inference validation.

        - expected_outputs: list[np.array] or np.array. Required iff
          frontend_only == False

        - frontend_only: True to test up to proto generation.

        - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType])

        - converter: function
            Reference to convert function to be used.
            Default: ct.convert

        - minimum_deployment_target : coremltools.target enumeration (optional)
            A member of the ``coremltools.target`` enum.

    Returns:
        The converted mlmodel
    """
    if not isinstance(expected_output_types, list):
        expected_output_types = [expected_output_types]

    if expected_outputs is not None and not isinstance(expected_outputs, list):
        expected_outputs = [expected_outputs]

    prog = Program()
    with Function(input_placeholders,
                  opset_version=minimum_deployment_target) as ssa_func:
        output_vars = build(**ssa_func.inputs)
        if isinstance(output_vars, tuple):
            output_vars = list(output_vars)
        elif not isinstance(output_vars, list):
            output_vars = [output_vars]
        ssa_func.set_outputs(output_vars)
        prog.add_function("main", ssa_func)

    # get output names for output_vars
    output_names = [x.name for x in output_vars]

    # Validate type inference
    msg = ("Provided expected outputs types {} should match number of output" +
           " variables {}")
    assert_msg = msg.format(len(expected_output_types), len(output_vars))
    assert len(output_vars) == len(expected_output_types), assert_msg

    for out_var, s in zip(output_vars, expected_output_types):
        if out_var.dtype != s[-1]:
            raise ValueError(
                "Output {} type: expect {}, got {}. Program:\n{}".format(
                    out_var.name, s[-1].__type_info__(),
                    out_var.dtype.__type_info__(), prog))
        if UNK_VARIADIC in s[:-1]:
            msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}"
            logging.debug(msg.format(out_var.shape, s[:-1]))
            continue
        expected_shape = s[:-1]
        msg = "Output {} shape: expect {}, got {}. Program:\n{}".format(
            out_var.name, expected_shape, out_var.shape, prog)
        # No more variadic here.
        if len(out_var.shape) != len(expected_shape):
            raise ValueError(msg)
        # replace UNK_SYM in out_var.shape.
        output_shape = [
            0 if es == UNK_SYM else os
            for os, es in zip(out_var.shape, expected_shape)
        ]
        expected_shape = [0 if es == UNK_SYM else es for es in expected_shape]
        # convert float etc to int.
        output_shape = [i if is_symbolic(i) else int(i) for i in output_shape]
        expected_shape = [
            i if is_symbolic(i) else int(i) for i in expected_shape
        ]
        if output_shape != expected_shape:
            raise ValueError(msg)

    if use_cpu_only:
        compute_unit = ct.ComputeUnit.CPU_ONLY
    else:
        compute_unit = ct.ComputeUnit.ALL

    mlmodel = ct_convert(prog,
                         converter=converter,
                         source="milinternal",
                         convert_to=backend,
                         inputs=inputs,
                         compute_units=compute_unit,
                         minimum_deployment_target=minimum_deployment_target)

    if frontend_only:
        return mlmodel

    if expected_outputs:
        assert len(output_vars) == len(expected_outputs), (
            "Provided expected_outputs {}"
            " should match number of output"
            " variables {}".format(len(expected_outputs), len(output_vars)))

        expected_outputs = {
            name: val
            for name, val in zip(output_names, expected_outputs)
        }

    compare_backend(mlmodel=mlmodel,
                    input_key_values=input_values,
                    expected_outputs=expected_outputs,
                    atol=atol,
                    rtol=rtol,
                    also_compare_shapes=also_compare_shapes,
                    dtype=backend[1])

    return mlmodel