def apply(self, prog):
        user_provided_output_types = prog.main_output_types
        main_func = prog.functions["main"]
        output_vars = main_func.outputs
        if user_provided_output_types is None or len(
                user_provided_output_types) == 0:
            return
        if len(output_vars) != len(user_provided_output_types):
            msg = "Number of outputs provided by the user, which is {}, " \
                  "does not match the number of outputs generated by the model, which is {}"
            raise ValueError(
                msg.format(len(user_provided_output_types), len(output_vars)))

        new_outputs = []
        for i, output_type in enumerate(user_provided_output_types):
            required_output_dtype = output_type.dtype
            output_var = output_vars[i]
            if required_output_dtype is None or \
                not (types.is_tensor(output_var.sym_type) or types.is_scalar(output_var.sym_type)) or \
                required_output_dtype == output_var.dtype:
                # no need to update the output var's dtype in this case
                new_outputs.append(output_var)
            else:
                output_var_name = output_var.name
                output_var.set_name(output_var_name + "_type_" +
                                    types.builtin_to_string(output_var.dtype))
                with main_func:
                    output_var = mb.cast(
                        x=output_var,
                        dtype=types.builtin_to_string(required_output_dtype))
                    output_var.set_name(output_var_name)
                new_outputs.append(output_var)

        main_func.set_outputs(new_outputs)
Example #2
0
def _promoted_var(op, var, promoted_dtype):
    if var.val is None:
        x = mb.cast(
            x=var, dtype=builtin_to_string(promoted_dtype), name=var.name + "_promoted", before_op=op
        )
    else:
        const_value_after_cast = cast_op_class.get_cast_value(var, builtin_to_string(promoted_dtype))
        x = mb.const(val=const_value_after_cast, name=var.name + "_promoted", before_op=op)
    return x
Example #3
0
 def shape_str(self):
     annotation = ""
     if self.val is not None:
         annotation = "*"
     elif self.sym_val is not None:
         annotation = "^"
     shape_str = str(self.shape)[:-1]  # trim the ")"
     if self.rank > 1:
         shape_str += ", "
     if types.builtin_to_string(self.dtype) is None:
         shape_str += ")" + annotation
     else:
         shape_str += types.builtin_to_string(self.dtype) + ")" + annotation
     return shape_str
Example #4
0
    def test_mixed_input_dtypes(self, op, x_dtype, y_dtype):
        @mb.program(input_specs=[
            mb.TensorSpec(shape=(10, 10), dtype=string_to_builtin(x_dtype)),
            mb.TensorSpec(shape=(10, 10), dtype=string_to_builtin(y_dtype))
        ])
        def prog(x, y):
            x = getattr(mb, op)(x=x, y=y)
            return x

        assert get_op_types_in_program(prog) == [op]

        _, _, block = apply_pass_and_basic_check(
            prog, "mil_backend::homogenize_input_dtypes")

        assert get_op_types_in_program(prog) == ["cast", op]

        promoted_dtype = promote_types(string_to_builtin(x_dtype),
                                       string_to_builtin(y_dtype))

        # Asserting cast configuration
        cast = block.find_ops(op_type="cast")[0]
        assert cast.dtype.val == builtin_to_string(promoted_dtype)
        assert len(cast.outputs) == 1
        assert len(cast.outputs[0].child_ops) == 1
        assert cast.outputs[0].child_ops[0].op_type == op
Example #5
0
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (types.is_tensor(output_type) or types.is_scalar(output_type)) \
            and output_var.dtype != types.fp32 \
            and output_var.dtype != types.int32 \
            and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16):
            # since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast
            output_dtype_str = types.builtin_to_string(output_var.dtype)
            supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                "{int32, fp16, fp32, fp64}"
            msg = "\nOutput '{}' is of dtype {}. The " +\
                           "CoreML runtime does not support outputs with this dtype " +\
                           "(supported dtypes are: {}). This output will be assigned a dtype " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dtype to the dtype supported by the CoreML runtime.\n"
            if output_var.dtype == types.fp16:
                msg += "fp16 dtype output is supported if function.opset_version is chosen to be at least " \
                       "iOS16/macOS13.\n"
            logging.warning(
                msg.format(
                    output_var.name,
                    output_dtype_str,
                    supported_dtypes,
                ))

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            output_var = mb.cast(x=output_var, dtype="fp32")
            output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
def alert_return_type_cast(prog):
    """
    prog: Program

    # NN always implicitly cast return types to fp32. Detect any return
    # types that are not builtin.fp32 and alert user of the implicit
    # casting. This pass must be at the end. Example:
    #
    # Given:
    #
    #    main(%x: (2, 3, fp32)) {
    #      block0() {
    #        %shape_0: (2,i32)* = const(val=[4, 7])
    #      } -> (%shape_0)
    #    }
    #
    # (Notice that %shape_0 is i32, not fp32)
    #
    # Result:
    #
    # The same program.
    #
    # Alert messages about %shape_0 being implicitly cast from i32 to fp32.
    #
    # Comment: This pass should do more proper casting as backend supports more types.
    """
    for f_name, f in prog.functions.items():
        for v in f.outputs:
            if isinstance(v, Var) and v.dtype != types.fp32:
                msg = (
                    "Output var {} of type {} in function {} is " + "cast to type fp32"
                )
                logging.warning(
                    msg.format(v.name, types.builtin_to_string(v.dtype), f_name)
                )
Example #7
0
def _tensor_field_by_type(tensor_val, builtin_type):
    if builtin_type == types.bool:
        return tensor_val.bools.values
    elif types.is_int(builtin_type):
        if (builtin_type == types.int64 or builtin_type == types.uint64):
            return tensor_val.longInts.values
        if builtin_type in (types.int8, types.uint8, types.uint32):
            return tensor_val.bytes.values
        return tensor_val.ints.values
    elif types.is_float(builtin_type):
        if (builtin_type == types.fp64):
            return tensor_val.doubles.values
        elif (builtin_type == types.fp32):
            return tensor_val.floats.values
        elif (builtin_type == types.fp16):
            return tensor_val.bytes.values
        else:
            raise TypeError(
                "Unsupported float dtype for MIL proto serialization: {}".
                format(builtin_to_string(builtin_type)))
    elif builtin_type == types.str:
        return tensor_val.strings.values
    else:
        raise NotImplementedError("Unimplemented tensor type for: " +
                                  str(builtin_type))
 def apply(self, prog):
     for f_name, f in prog.functions.items():
         for v in f.outputs:
             if isinstance(v, Var) and v.dtype != types.fp32:
                 msg = ("Output var {} of type {} in function {} is " +
                        "cast to type fp32")
                 logging.warning(
                     msg.format(v.name, types.builtin_to_string(v.dtype),
                                f_name))
def _adjust_ops(block):
    len_block = len(block.operations)
    i = 0
    while i < len_block:
        op = block.operations[i]

        # Classifier is a special exception to this rule. It can output 64 bit integer labels.
        # Classifier should be inserted after running this pass.
        if op.op_type == "classify":
            raise ValueError("ML Program backend pass adjust_to_supported_types does not support programs" +\
                             " that have already added a classify op.")

        for subblock in op.blocks:
            _adjust_block_inputs(subblock)
            _adjust_ops(subblock)

        for var in op.outputs:
            _adjust_var(var)

        # Cast ops have a param (dtype) that should match the output dtype.
        # If the output dtype or input dtype was previously adjusted,
        # the cast op must change or be removed in kind.
        if op.op_type == "cast":
            output_type_str = _types.builtin_to_string(op.outputs[0].dtype)
            if op.outputs[0].dtype == op.x.dtype:
                # The type of the input or output of this cast op was changed per the rules
                # defined in the top level comment for adjust_io_to_supported_types.
                #
                # That changed output type is the same type as the input to the cast
                # op. Therefore, regardless of whether the user created this cast or
                # not, it is now redundant (noop), and should be removed.
                #
                # The removal isn't covered by the main cast
                # optimization pass since that pass runs before this pass.
                block.replace_uses_of_var_after_op(anchor_op=op,
                                                   old_var=op.outputs[0],
                                                   new_var=op.x)
                block.remove_ops([op])
                len_block = len(block.operations)
                i -= 1
            elif output_type_str != op.dtype.val:
                # The type of the output of this cast op was changed per the rules
                # defined in the top level comment for adjust_io_to_supported_types.
                #
                # This cast is meaningful, and the "dtype" param now differs from the output
                # type. Replace the dtype cast with a new cast op with a matching dtype param.
                with block:
                    new_cast_out = _mb.cast(x=op.x,
                                            dtype=output_type_str,
                                            before_op=op)
                    block.replace_uses_of_var_after_op(anchor_op=op,
                                                       old_var=op.outputs[0],
                                                       new_var=new_cast_out)
                block.remove_ops([op])
                len_block = len(block.operations)
        i = i + 1
    return block
Example #10
0
def cast_to_framework_io_dtype(var, is_output):
    if var.dtype == types.fp32:
        return ft.ArrayFeatureType.ArrayDataType.FLOAT32
    elif var.dtype == types.int32:
        return ft.ArrayFeatureType.ArrayDataType.INT32
    else:
        ioname = "Output " if is_output else "Input "
        ioname2 = "outputs" if is_output else "inputs"
        raise NotImplementedError(ioname + var.name + " has data type " + builtin_to_string(var.dtype) + \
                                  ". ML Program models only support fp32 and int32 " + ioname2 + ".")
Example #11
0
 def shape_str(self):
     length = "?"
     if not self.dynamic_length:
         length = str(self.init_length)
     if self._elem_type == types.unknown:
         return "List[{}, unknown]".format(length)
     elem_shape = self._elem_type.get_shape()
     elem_dtype = self._elem_type.get_primitive()
     shape_str = str(elem_shape)[:-1]  # trim the ")"
     if len(elem_shape) > 1:
         shape_str += ", "
     shape_str += types.builtin_to_string(elem_dtype) + ")"
     return "List[{}, {}]".format(length, shape_str)
Example #12
0
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != types.fp32 \
             and input_var.dtype != types.int32:
            input_dtype_str = types.builtin_to_string(input_var.dtype)
            if types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                logging.warning("Input" + input_var.name + " is of dtype " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dtype of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.int32)
            elif input_var.dtype == types.fp64:
                # Replace float64 input type with fp32.
                logging.warning("Input '" + input_var.name + "' is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dtype " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.fp32)
            elif input_var.dtype == types.fp16 \
                 and func.opset_version >= target.iOS16:
                pass  # do nothing, since fp16 is a valid input type for CoreML
            else:
                # This is some other dtype. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                    "{int32, fp16, fp32, fp64}"
                msg = "\nInput '{}' is of dtype {}. The " +\
                               "CoreML runtime does not support inputs with this dtype " +\
                               "(supported dtypes are: {}). This input will be assigned a dtype of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dtype.\n"
                if input_var.dtype == types.fp16:
                    msg += "fp16 dtype input is supported if the function.opset_version is chosen to be at least " \
                           "iOS16/macOS13.\n"
                logging.warning(
                    msg.format(input_var.name, input_dtype_str,
                               supported_dtypes))

                casted_input_var = mb.cast(x=input_var,
                                           dtype=input_dtype_str,
                                           before_op=first_op)
                func.replace_uses_of_var_after_op(
                    anchor_op=casted_input_var.op,
                    old_var=input_var,
                    new_var=casted_input_var)
                _adjust_var_dtype_helper(input_var, types.fp32)
Example #13
0
def TensorListReserve(context, node):
    element_shape = context[node.inputs[0]]
    num_elements = context[node.inputs[1]]
    element_dtype = node.attr.get("element_dtype")
    dtype = builtin_to_string(element_dtype)

    if element_shape is not None and all(
            _np.atleast_1d(element_shape.val) != -1):
        ls = mb.make_list(
            init_length=num_elements,
            elem_shape=tuple(element_shape.val.tolist()),
            dynamic_length=num_elements.val is None,
            dtype=dtype,
            name=node.name,
        )
    else:
        ls = mb.tf_make_list(init_length=num_elements,
                             dtype=dtype,
                             dynamic_length=num_elements.val is None,
                             name=node.name)
    context.add(node.name, ls)
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (_types.is_tensor(output_type) or _types.is_scalar(output_type)) \
            and output_var.dtype != _types.fp32 \
            and output_var.dtype != _types.int32:
            output_dtype_str = _types.builtin_to_string(output_var.dtype)
            _warnings.warn("Output" + output_var.name + " is of dType " + output_dtype_str + ". The " +\
                           "CoreML runtime does not support outputs with this dType (only int32 and " +\
                           "fp32 are supported for outputs). This output will be assigned a dType " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dType to the dType supported by the CoreML runtime.")

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            with func:
                output_var = _mb.cast(x=output_var, dtype="fp32")
                output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (_types.is_tensor(input_var.sym_type) or _types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != _types.fp32 \
             and input_var.dtype != _types.int32:
            input_dtype_str = _types.builtin_to_string(input_var.dtype)
            if _types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dType of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.int32)
            elif input_var.dtype == _types.fp64:
                # Replace float64 input type with fp32.
                _warnings.warn("Input" + input_var.name + " is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dType " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.fp32)
            else:
                # This is some other dType. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str + ". The " +\
                               "CoreML runtime does not support inputs with this dType (only fp32 and " +\
                               "int32 inputs are supported). This input will be assigned a dType of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dType.")
                with func:
                    casted_input_var = _mb.cast(x=input_var,
                                                dtype=input_dtype_str,
                                                before_op=first_op)
                    func.replace_uses_of_var_after_op(
                        anchor_op=casted_input_var.op,
                        old_var=input_var,
                        new_var=casted_input_var)
                    _adjust_var_dtype_helper(input_var, _types.fp32)
Example #16
0
def _adjust_var(var):
    """
    Changes the dtype of the provided variable according
    to the rules outlined in the top level pass comment
    (see adjust_io_to_supported_types).
    """
    if (types.is_tensor(var.sym_type) or types.is_scalar(var.sym_type)) \
        and var.dtype not in __RUNTIME_SUPPORTED_TYPES:
        dtype_str = types.builtin_to_string(var.dtype)
        if types.is_int(var.dtype):
            # Replace non-int32 input type with int32.
            logging.warning("Input '" + var.name + "' is of dtype " + dtype_str +\
                           ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                           "This input will be assigned a dtype of int32. " +\
                           "No cast will be inserted; the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.int32)
        else:
            # This is some other unsupported dtype. Change the input type to fp32.
            logging.warning("Var " + var.name + " is of dtype " + dtype_str + ". The CoreML runtime " +\
                           "does not support this dtype (only fp16, fp32, bool, and int32 are supported). " +\
                           "This input will be assigned a dtype of fp32. No cast will be inserted; " +\
                           "the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.fp32)
Example #17
0
def TensorListFromTensor(context, node):
    value = context[node.inputs[0]]
    element_shape = context[node.inputs[1]]
    element_dtype = node.attr.get("element_dtype")
    dtype_str = builtin_to_string(element_dtype)

    length = mb.shape(x=value)
    length = mb.slice_by_index(x=length,
                               begin=[0],
                               end=[1],
                               squeeze_mask=[True])

    if element_shape is not None and all(
            _np.atleast_1d(element_shape.val) != -1):
        ls = mb.make_list(init_length=length,
                          elem_shape=tuple(element_shape.val.tolist()),
                          dtype=dtype_str)
    else:
        ls = mb.tf_make_list(init_length=length, dtype=dtype_str)

    indices = mb.range_1d(end=length, start=0, step=1)
    ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name)
    context.add(node.name, ls)
Example #18
0
 def type_str(self):
     return 'tensor or scalar of dtype from type domain ' + str([types.builtin_to_string(v) for v in self.type_domain])
Example #19
0
def _is_same_dtype(dtype1, dtype2):
    return (dtype1 is dtype2) or (builtin_to_string(dtype1) == builtin_to_string(dtype2))
Example #20
0
 def is_tensor_or_scalar_of(self, dtype: str):
     return (types.is_tensor(self.sym_type) or types.is_scalar(
         self.sym_type)) and builtin_to_string(self.dtype) == dtype
def _promoted_var(op, var, promoted_dtype):
    x = mb.cast(x=var,
                dtype=builtin_to_string(promoted_dtype),
                name=var.name + "_promoted",
                before_op=op)
    return x