Beispiel #1
0
def check_output_shapes(x, node):
    """
    x: list[Var] or tuple[Var]
    node: ParsedTFNode
    """
    if isinstance(x, ListVar):
        # No check on list.
        return
    if not isinstance(x, (list, tuple)):
        x = [x]
    tf_shapes = node.attr.get("_output_shapes", None)
    if tf_shapes is None:
        return
    inf_shapes = []
    for y in x:
        if y is None:
            msg = "TF convert returns None type in TF node {}"
            raise TypeError(msg.format(node.name))
        if types.is_tensor(y.sym_type):
            inf_shapes.append(list(y.shape))
        elif types.is_scalar(y.sym_type):
            inf_shapes.append([])
        else:
            msg = "Output type {} not understood"
            raise ValueError(msg.format(y))

    for t, s in zip(tf_shapes, inf_shapes):
        if not compatible_shapes(t, s):
            msg = ("Op {} ({}) type inference ({}) and TF output shape " +
                   "({}) mismatch")
            raise ValueError(msg.format(node.name, node.op, s, t))
Beispiel #2
0
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (types.is_tensor(output_type) or types.is_scalar(output_type)) \
            and output_var.dtype != types.fp32 \
            and output_var.dtype != types.int32 \
            and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16):
            # since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast
            output_dtype_str = types.builtin_to_string(output_var.dtype)
            supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                "{int32, fp16, fp32, fp64}"
            msg = "\nOutput '{}' is of dtype {}. The " +\
                           "CoreML runtime does not support outputs with this dtype " +\
                           "(supported dtypes are: {}). This output will be assigned a dtype " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dtype to the dtype supported by the CoreML runtime.\n"
            if output_var.dtype == types.fp16:
                msg += "fp16 dtype output is supported if function.opset_version is chosen to be at least " \
                       "iOS16/macOS13.\n"
            logging.warning(
                msg.format(
                    output_var.name,
                    output_dtype_str,
                    supported_dtypes,
                ))

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            output_var = mb.cast(x=output_var, dtype="fp32")
            output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
Beispiel #3
0
 def _does_block_contain_symbolic_shape(block):
     for op in block.operations:
         for b in op.blocks:
             if _does_block_contain_symbolic_shape(b):
                 return True
         for out in op.outputs:
             if types.is_tensor(out.sym_type):
                 shape = out.sym_type.get_shape()
                 if any_symbolic(shape):
                     return True
             elif types.is_scalar(out.sym_type) or types.is_str(
                     out.sym_type):
                 if is_symbolic(out.val):
                     return True
             elif types.is_list(out.sym_type):
                 if types.is_tensor(out.elem_type):
                     if any_symbolic(out.elem_type.get_shape()):
                         return True
                 else:
                     raise NotImplementedError(
                         "\'{}\' type in a list not handled".format(
                             out.elem_type))
             else:
                 raise NotImplementedError(
                     "\'{}\' type is not handled".format(out.sym_type))
     return False
Beispiel #4
0
def types_to_proto(valuetype):
    if types.is_tensor(valuetype):
        primitive = types_to_proto_primitive(valuetype.get_primitive())
        return create_valuetype_tensor(valuetype.get_shape(), primitive)
    elif types.is_tuple(valuetype):
        v_type = pm.ValueType()
        t_type = v_type.tupleType
        for t in valuetype.T:
            new_v_type = t_type.types.add()
            new_v_type.CopyFrom(types_to_proto(t))
        return v_type
    elif types.is_list(valuetype):
        elem = valuetype.T[0]
        length = valuetype.T[1]
        if types.is_tensor(elem):
            dtype = types_to_proto_primitive(elem.get_primitive())
            elem_shape = elem.get_shape()
        elif types.is_scalar(elem):
            dtype = types_to_proto_primitive(valuetype)
            elem_shape = ()
        elif types.is_str(elem):
            dtype = types_to_proto_primitive(elem)
            elem_shape = ()
        else:
            raise NotImplementedError(
                "Only list of either tensors or scalars supported. "
                "Got element of type {}".format(elem.__type_info__()))
        return create_valuetype_list(length=length,
                                     elem_shape=elem_shape,
                                     dtype=dtype)
    elif types.is_dict(valuetype):
        return create_valuetype_dict(valuetype.T[0], valuetype.T[1])
    else:
        return create_valuetype_scalar(types_to_proto_primitive(valuetype))
    def apply(self, prog):
        user_provided_output_types = prog.main_output_types
        main_func = prog.functions["main"]
        output_vars = main_func.outputs
        if user_provided_output_types is None or len(
                user_provided_output_types) == 0:
            return
        if len(output_vars) != len(user_provided_output_types):
            msg = "Number of outputs provided by the user, which is {}, " \
                  "does not match the number of outputs generated by the model, which is {}"
            raise ValueError(
                msg.format(len(user_provided_output_types), len(output_vars)))

        new_outputs = []
        for i, output_type in enumerate(user_provided_output_types):
            required_output_dtype = output_type.dtype
            output_var = output_vars[i]
            if required_output_dtype is None or \
                not (types.is_tensor(output_var.sym_type) or types.is_scalar(output_var.sym_type)) or \
                required_output_dtype == output_var.dtype:
                # no need to update the output var's dtype in this case
                new_outputs.append(output_var)
            else:
                output_var_name = output_var.name
                output_var.set_name(output_var_name + "_type_" +
                                    types.builtin_to_string(output_var.dtype))
                with main_func:
                    output_var = mb.cast(
                        x=output_var,
                        dtype=types.builtin_to_string(required_output_dtype))
                    output_var.set_name(output_var_name)
                new_outputs.append(output_var)

        main_func.set_outputs(new_outputs)
Beispiel #6
0
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != types.fp32 \
             and input_var.dtype != types.int32:
            input_dtype_str = types.builtin_to_string(input_var.dtype)
            if types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                logging.warning("Input" + input_var.name + " is of dtype " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dtype of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.int32)
            elif input_var.dtype == types.fp64:
                # Replace float64 input type with fp32.
                logging.warning("Input '" + input_var.name + "' is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dtype " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.fp32)
            elif input_var.dtype == types.fp16 \
                 and func.opset_version >= target.iOS16:
                pass  # do nothing, since fp16 is a valid input type for CoreML
            else:
                # This is some other dtype. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                    "{int32, fp16, fp32, fp64}"
                msg = "\nInput '{}' is of dtype {}. The " +\
                               "CoreML runtime does not support inputs with this dtype " +\
                               "(supported dtypes are: {}). This input will be assigned a dtype of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dtype.\n"
                if input_var.dtype == types.fp16:
                    msg += "fp16 dtype input is supported if the function.opset_version is chosen to be at least " \
                           "iOS16/macOS13.\n"
                logging.warning(
                    msg.format(input_var.name, input_dtype_str,
                               supported_dtypes))

                casted_input_var = mb.cast(x=input_var,
                                           dtype=input_dtype_str,
                                           before_op=first_op)
                func.replace_uses_of_var_after_op(
                    anchor_op=casted_input_var.op,
                    old_var=input_var,
                    new_var=casted_input_var)
                _adjust_var_dtype_helper(input_var, types.fp32)
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (_types.is_tensor(output_type) or _types.is_scalar(output_type)) \
            and output_var.dtype != _types.fp32 \
            and output_var.dtype != _types.int32:
            output_dtype_str = _types.builtin_to_string(output_var.dtype)
            _warnings.warn("Output" + output_var.name + " is of dType " + output_dtype_str + ". The " +\
                           "CoreML runtime does not support outputs with this dType (only int32 and " +\
                           "fp32 are supported for outputs). This output will be assigned a dType " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dType to the dType supported by the CoreML runtime.")

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            with func:
                output_var = _mb.cast(x=output_var, dtype="fp32")
                output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (_types.is_tensor(input_var.sym_type) or _types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != _types.fp32 \
             and input_var.dtype != _types.int32:
            input_dtype_str = _types.builtin_to_string(input_var.dtype)
            if _types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dType of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.int32)
            elif input_var.dtype == _types.fp64:
                # Replace float64 input type with fp32.
                _warnings.warn("Input" + input_var.name + " is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dType " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.fp32)
            else:
                # This is some other dType. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str + ". The " +\
                               "CoreML runtime does not support inputs with this dType (only fp32 and " +\
                               "int32 inputs are supported). This input will be assigned a dType of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dType.")
                with func:
                    casted_input_var = _mb.cast(x=input_var,
                                                dtype=input_dtype_str,
                                                before_op=first_op)
                    func.replace_uses_of_var_after_op(
                        anchor_op=casted_input_var.op,
                        old_var=input_var,
                        new_var=casted_input_var)
                    _adjust_var_dtype_helper(input_var, _types.fp32)
Beispiel #9
0
def _adjust_var(var):
    """
    Changes the dtype of the provided variable according
    to the rules outlined in the top level pass comment
    (see adjust_io_to_supported_types).
    """
    if (types.is_tensor(var.sym_type) or types.is_scalar(var.sym_type)) \
        and var.dtype not in __RUNTIME_SUPPORTED_TYPES:
        dtype_str = types.builtin_to_string(var.dtype)
        if types.is_int(var.dtype):
            # Replace non-int32 input type with int32.
            logging.warning("Input '" + var.name + "' is of dtype " + dtype_str +\
                           ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                           "This input will be assigned a dtype of int32. " +\
                           "No cast will be inserted; the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.int32)
        else:
            # This is some other unsupported dtype. Change the input type to fp32.
            logging.warning("Var " + var.name + " is of dtype " + dtype_str + ". The CoreML runtime " +\
                           "does not support this dtype (only fp16, fp32, bool, and int32 are supported). " +\
                           "This input will be assigned a dtype of fp32. No cast will be inserted; " +\
                           "the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.fp32)
Beispiel #10
0
def _load_operation(context, op_spec):
    if not isinstance(op_spec, pm.Operation):
        raise TypeError("Invalid Operation spec object")

    op_type = op_spec.type
    if op_type == "const" or op_type.startswith("constexpr_"):
        if op_spec.blocks:
            raise ValueError("const / constexpr operation can't have any block")
        if op_spec.inputs:
            raise ValueError("const / constexpr operation can't have any input")

        inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
        pymil_var = getattr(mb, op_type)(**inputs)
        context.register_var_with_name(op_spec.outputs[0].name, pymil_var)

    else:
        if op_type == "custom_layer":
            raise NotImplementedError(
                "Loading Custom Layer operation not yet implemented"
            )

        if op_spec.attributes:
            raise ValueError("Attributes on operation not supported")

        # The conversion steps of an operation proto -> PyMIL operation are as following:

        # (i)   Convert the input arguments:
        #       In most of the cases, the input variable is already created beforehand, hence we can
        #       directly access and get them through the TranscriptionContext.
        #       There are cases, though, the inputs are literal value. This could happens in the classify op spec.
        #       For that case, we directly create a constant variable.

        # (ii)  Create nested blocks for control flow operations:
        #       The Python functinoal input arguments for control flow ops cannot be recovered from milproto -> pymil conversion,
        #       for instance, the _body, _cond for mb.while_loop and _true_fn, _false_fn for mb.cond are not invertible
        #       Hence, here we directly create the nested blocks from the proto, and set them to mb.while_loop.blocks / mb.cond.blocks.
        #       Note that, when creating a block, PyMIL required an outer_op, which should be the control flow operation itself. However,
        #       in this approach we take, the outer_op hasn't been created at the time when the blocks produced. Here, we make a "dummy outer_op",
        #       which could pass the check in PyMIL, also it could provide enough information (such as visible variables in the blocks etc.)
        #       for the creation of the block.

        # (iii) Create PyMIL operation using inputs / blocks
        #       Note that for the control flow cases, we create dummy functional inputs, and use the exisiting block to create the op.

        # (iv)  Set the outer_op for control flow
        #       Once the operation is created, we replace the dummy outer_op with the legit one, to make it a valid PyMIL program

        inputs = {}
        for param_name, argument in op_spec.inputs.items():
            vars = []
            for binding in argument.arguments:
                binding_type = binding.WhichOneof("binding")
                if binding_type == "name":
                    vars.append(context.get_var_from_name(binding.name))
                elif binding_type == "value":
                    # We only support the list value for now (for the classifier use case)
                    value_spec = binding.value
                    assert value_spec.WhichOneof("value") == "immediateValue"
                    assert value_spec.immediateValue.WhichOneof("value") == "list"
                    list_value = _load_immediate_value(value_spec.immediateValue)
                    values = []
                    for value_spec in list_value:
                        values.append(_load_value(context, value_spec))
                    var = mb.const(val=mil_list(values))
                    vars.append(var)
                else:
                    raise NotImplementedError("Binding {} not yet implemented".format(binding_type))
            op_cls = _SSAOpRegistry._get_core_op_cls(op_type)
            if len(vars) == 1 and not isinstance(
                op_cls.input_spec.input_types[param_name], TupleInputType
            ):
                inputs[param_name] = vars[0]
            else:
                inputs[param_name] = vars

        blocks = _create_nested_blocks(context, op_spec)
        _set_inputs_for_control_flow_op(inputs, blocks, op_type)

        output_var = getattr(mb, op_type)(**inputs)
        if not isinstance(output_var, (tuple, list)):
            output_var = [output_var]

        if len(output_var) != len(op_spec.outputs):
            raise AssertionError(
                "Mismatch between number of outputs in operation specification vs PyMIL outputs"
            )

        for spec, var in zip(op_spec.outputs, output_var):
            context.register_var_with_name(spec.name, var)

            pymil_type = var.sym_type
            proto_type = proto_to_types(spec.type)
            if not types.is_compatible_type(pymil_type, proto_type):
                # We allow a corner case where the pymil has an 0 rank tensor and the spec produces a scalar
                if types.is_tensor(pymil_type) and types.is_scalar(proto_type):
                    if pymil_type.get_primitive() == proto_type:
                        continue
                raise AssertionError(
                    "Mismatch between var types in specification vs PyMIL"
                )

        _set_outer_op_for_nested_blocks(blocks, output_var[0].op)
Beispiel #11
0
    def convert(self):
        _logging.info("Converting graph.")

        # This will hold the converted model.
        prog = self._prog

        # Construct placeholder for input to ssa function
        # This is where input renaming occurs
        ssa_func_inputs = OrderedDict()
        for index, (name, spec) in enumerate(self.graph.inputs.items()):
            placeholder = self._create_placeholder(spec)
            # Set ssa function input name to user defined name if provided.
            if spec.name is not None:
                name = spec.name
            self.inputs[index].name = name
            ssa_func_inputs[name] = placeholder
        prog.set_main_input_types(tuple(self.inputs))

        # Initialize the SSA for conversion
        with Function(ssa_func_inputs,
                      opset_version=self.opset_version) as ssa_func:

            # Map internal @self.graph.inputs to user specified @ssa_func_inputs
            # If @self.graph.inputs == @ssa_func_inputs this just adds the inputs
            # to the context.
            for internal_name, users_name in zip(self.graph.inputs.keys(),
                                                 ssa_func_inputs.keys()):
                input_var = ssa_func.inputs[users_name]
                if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \
                    and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64):
                    # cast the input var to float32
                    # We need to do this because the type inference is very buggy when started from
                    # float16/float64 typed inputs. Until that is fixed in the following radar
                    # we cast all inputs of type float16/float64 to float32 as the first step.
                    # These casts will later get removed, if compute_precision=Float16 is
                    # provided, which will cause the FP16ComputePrecision pass to run.
                    # TODO: remove this when this radar is fixed: rdar://93731970
                    input_var = mb.cast(x=input_var, dtype="fp32")
                self.context.add(input_var, torch_name=internal_name)

            self.convert_const()

            # Add the rest of the operations
            convert_nodes(self.context, self.graph)

            graph_outputs = [self.context[name] for name in self.graph.outputs]

            # An output can be None when it's a None constant, which happens
            # in Fairseq MT.
            for g in graph_outputs:
                if g is None:
                    msg = "Droping output {} which is None"
                    _logging.warning(msg.format(g))
            graph_outputs = [g for g in graph_outputs if g is not None]

            # Output renaming occurs
            if self.outputs is not None:
                if len(self.outputs) != len(graph_outputs):
                    msg = "Number of outputs provided, {}, do not match the number of outputs detected in the model, {}."
                    raise ValueError(
                        msg.format(
                            len(self.outputs),
                            len(graph_outputs),
                        ))
            if self.output_names:
                for index, var in enumerate(graph_outputs):
                    if self.output_names[index] is not None:
                        output_rename = self.output_names[index]
                        var.name = output_rename

            ssa_func.set_outputs(graph_outputs)
            prog.add_function("main", ssa_func)
            if self.outputs is not None:
                prog.set_main_output_types(self.outputs)
        self.torch_passes(prog)
        return prog
Beispiel #12
0
def _adjust_var_dtype_helper(var, dtype):
    if (types.is_scalar(var.sym_type)):
        var._sym_type = dtype
    else:
        var._sym_type = types.tensor(dtype, var.sym_type.get_shape())
Beispiel #13
0
 def _is_compatible(self, v):
     result = types.is_scalar(v.dtype) or types.is_tensor(v.dtype)
     if self.type_domain:
         result = result and (v.dtype in self.type_domain)
     return result
Beispiel #14
0
 def _is_compatible(self, v):
     return (types.is_list(v.sym_type) or types.is_scalar(v.dtype)
             or types.is_tensor(v.dtype))
Beispiel #15
0
 def _is_compatible(self, v):
     return types.is_scalar(v.dtype) or types.is_tensor(v.dtype)
Beispiel #16
0
def load(prog, **kwargs):
    if "main" not in prog.functions:
        msg = "main function not found in program {}"
        raise ValueError(msg.format(prog))
    if len(prog.functions) != 1:
        msg = ("Program must have exactly one `main` function to "
               "convert to NN. Program: {}")
        raise ValueError(msg.format(prog))

    nn_backend_passes(prog)
    input_types = prog.main_input_types
    output_types = prog.main_output_types

    v1_inputs = []
    symbolic_inputs = {}
    for name, var in prog.functions["main"].inputs.items():
        if types.is_tensor(var.sym_type):
            sym_shape = var.sym_type.get_shape()
            if any_variadic(sym_shape):
                raise NotImplementedError("Variadic rank is not supported")
            if any_symbolic(sym_shape):
                user_specified = False
                for input_type in input_types:
                    if name == input_type.name:
                        sym_shape = input_type.shape.default
                        user_specified = True
                        break
                # Use dummy static shape, and will set it later.
                shape = [1 if is_symbolic(d) else d for d in sym_shape]
                if not user_specified:
                    symbolic_inputs[name] = sym_shape
            else:
                shape = sym_shape
            v1_inputs.append((name, Array(*shape)))
        elif types.is_scalar(var.sym_type):
            v1_inputs.append((name, Array(1)))
        else:
            raise NotImplementedError()

    v1_outputs = []
    for var in prog.functions["main"].outputs:
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            # Disregard the output types
            v1_outputs.append((var.name, None))
        else:
            raise NotImplementedError()

    # create neural network builder
    builder = neural_network.NeuralNetworkBuilder(
        v1_inputs,
        v1_outputs,
        disable_rank5_shape_mapping=True,
        use_float_arraytype=True,
    )

    # const in V2 are added lazily to V1 by each op whenever needed.
    # `const_context` stores the const names we've added so far and avoid
    # adding a const more than once.
    # const_context: list[set of str] (const name for v1 & v2
    # (the same)). Note that in NN in outer layer is visible from the inner
    # layer, so the const_context is simply a stack of set.
    const_context = []
    # Iterate through ops and add to builder
    convert_ops(
        const_context,
        builder,
        prog.functions["main"].operations,
        prog.functions["main"].outputs,
    )

    proto = builder.spec
    # image input
    has_image_input = any([isinstance(s, ImageType) for s in input_types])
    if has_image_input:
        proto = _convert_to_image_input(proto,
                                        input_types,
                                        skip_model_load=kwargs.get(
                                            "skip_model_load", False))

    # image output
    if output_types is not None:
        assert len(output_types) == len(prog.functions["main"].outputs), \
                "number of mil program outputs do not match the number of outputs provided by the user"
        for i, output_proto_desc in enumerate(proto.description.output):
            output_var = prog.functions["main"].outputs[i]
            if isinstance(output_types[i], ImageType):
                if not types.is_tensor(var.sym_type):
                    raise ValueError(
                        "Image output, '{}', is a scalar, but it should be a tensor of rank 4"
                        .format(var.name))
                shape = var.sym_type.get_shape()
                if any_variadic(shape):
                    raise ValueError(
                        "Variable rank model outputs, that are ImageTypes, are not supported"
                    )
                if any([is_symbolic(d) for d in shape]):
                    raise NotImplementedError(
                        "Image output '{}' has symbolic dimensions in its shape"
                        .format(var.name))
                _validate_image_input_output_shapes(
                    output_types[i].color_layout,
                    shape,
                    var.name,
                    is_input=False)
                clr_space = _get_colorspace_enum(output_types[i].color_layout)
                output_proto_desc.type.imageType.colorSpace = clr_space
                output_proto_desc.type.imageType.width = shape[-1]
                output_proto_desc.type.imageType.height = shape[-2]

    # classifier flag
    classifier_config = kwargs.get("classifier_config", None)
    if classifier_config is not None:
        # verify that classifier_config.predicted_probabilities_output if its exists.
        # And if its empty/None, fill it with the last non const op's output
        # this is done in "_get_probability_var_for_classifier()"
        probability_var = _get_probability_var_for_classifier(
            prog, classifier_config)
        if classifier_config.predicted_probabilities_output != probability_var.name:
            classifier_config.predicted_probabilities_output = probability_var.name
        # add classifier related fields to the proto spec
        proto = _convert_to_classifier(proto,
                                       classifier_config,
                                       skip_model_load=kwargs.get(
                                           "skip_model_load", False))

    _set_user_inputs(proto, input_types)
    _set_symbolic_inputs(proto, symbolic_inputs)
    _set_optional_inputs(proto, input_types)

    return proto
Beispiel #17
0
    def convert_main_graph(self, prog, graph):
        func_inputs = {}
        for input_type in self.inputs:
            func_inputs[input_type.name] = mb.placeholder(
                input_type.shape.symbolic_shape, dtype=input_type.dtype)
        prog.set_main_input_types(self.inputs)

        with Function(func_inputs,
                      opset_version=self.opset_version) as ssa_func:
            # Get the input Var
            for name in func_inputs.keys():
                input_var = ssa_func.inputs[name]
                if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \
                        and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64):
                    # cast the input var to float32
                    # We need to do this because the type inference is very buggy when started from
                    # float16/float64 typed inputs. Until that is fixed in the following radar
                    # we cast all inputs of type float16/float64 to float32 as the first step.
                    # These casts will later get removed, if compute_precision=Float16 is
                    # provided, which will cause the FP16ComputePrecision pass to run.
                    # TODO: remove this when this radar is fixed: rdar://93731970
                    input_var = mb.cast(x=input_var, dtype="fp32", name=name)
                self.context.add(name, input_var)
            outputs = convert_graph(self.context, graph, self.output_names)
            ssa_func.set_outputs(outputs)
            prog.add_function("main", ssa_func)
        # check duplicate output
        # Note: sometimes two outputs are pointing to the same Var, we should
        # create mb.identity for those cases
        block = prog["main"]
        with block:
            name_counts = {}
            new_outputs = [output for output in block.outputs]
            for i, v_o in enumerate(block.outputs):
                if v_o.name not in name_counts:
                    name_counts[v_o.name] = 1
                else:
                    name_counts[v_o.name] += 1
                    new_name = v_o.name + "_duplicate_" + str(
                        name_counts[v_o.name])
                    x = mb.identity(x=v_o, name=new_name)
                    new_outputs[i] = x
            block.set_outputs(new_outputs)

        # Rename outputs to TF's name. This is needed when the last op doesn't
        # generate a new Var (e.g., get_tuple, Identity etc.), and thus the
        # last Var would have a different name than the last TF op's name.
        #
        # Example:
        #
        # TF code:
        #    x = tf.placeholder(tf.float32, shape=(1,))
        #    y = tf.placeholder(tf.float32, shape=(1,))
        #    c = lambda i, j: \
        #            tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))
        #    b = lambda i, j: (tf.add(i, 1), j)
        #    res = tf.while_loop(c, b, [x, y])
        #
        # Resulting nodes (excluding the nodes in while loop cond & body):
        #
        # node name: Placeholder op type: Placeholder inputs: []
        # node name: Placeholder_1 op type: Placeholder inputs: []
        # node name: make_input_0 op type: make_tuple inputs: ['Placeholder',
        #         'Placeholder_1']
        # node name: while_0 op type: while inputs: ['make_input_0']
        # node name: while/Exit op type: get_tuple inputs: ['while_0']
        # node name: while/Exit_1 op type: get_tuple inputs: ['while_0']
        #
        # Observe that return node `while/Exit` is an output from get_tuple,
        # which in our translation simply unpack a python tuple of Vars
        # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to
        # rename `while_0:0` to `while/Exit` in order for users to find the
        # output.
        # Note: only rename the output if the output is not Placeholder.

        input_names = [x.name for x in self.inputs]
        for v_o, out_name in zip(prog["main"].outputs, self.output_names):
            if v_o.name != out_name and v_o.name not in input_names:
                logging.info("Renaming output var: '{}' -> '{}'".format(
                    v_o.name, out_name))
                v_o.name = out_name
        self.check_placeholder_output(prog, self.output_names)

        # verify that if model output dtypes / names are provided by the user, they are valid
        if self.main_output_types is not None:
            self._validate_and_update_main_output_types(prog)
            prog.set_main_output_types(self.main_output_types)
Beispiel #18
0
def load(prog, weights_dir, resume_on_errors=False, **kwargs):
    if "main" not in prog.functions:
        raise ValueError("main function not found in program")

    mil_passes.mil_backend_passes(prog)

    # if user has specified "ClassifierConfig", then add the "classify" op to the prog
    classifier_config = kwargs.get("classifier_config", None)
    predicted_feature_name = None
    predicted_probabilities_name = None
    if classifier_config is not None:
        predicted_feature_name, predicted_probabilities_name = _add_classify_op(
            prog, classifier_config)

    input_types = prog.main_input_types
    weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME)
    blob_writer = BlobWriter(weight_path)

    function_protos = {}
    for func_name, func in prog.functions.items():
        function_protos[func_name] = convert_function(func, prog.parameters,
                                                      blob_writer)

    proto = pm.Program(
        version=1,
        functions=function_protos,
    )

    input_features = []
    output_features = []
    symbolic_inputs = []
    image_input_names = {
    }  # these are the model inputs marked as image by the user
    input_shape_map = {}

    for input_type in input_types:
        if isinstance(input_type, ImageType):
            image_input_names[input_type.name] = input_type
            # error checking for input(s) marked as images
            if input_type.name not in list(
                    prog.functions["main"].inputs.keys()):
                msg = "Provided image input '{}' is not one of the inputs of the MIL program"
                raise ValueError(msg.format(input_type.name))
        input_shape_map[input_type.name] = input_type

    for name, var in prog.functions["main"].inputs.items():
        input_feature_type = ft.FeatureType()

        # error checking for input(s) marked as images
        # an image input must be of type tensor in program proto
        # (since an image type does not exist in MIL program)
        if name in image_input_names and \
                not types.is_tensor(var.sym_type):
            raise ValueError(
                "For the image input, '{}', its type in the MIL program must be tensor. "
                "Instead it is {}.".format(name, var.sym_type.__type_info__()))

        if types.is_tensor(var.sym_type):
            shape = var.sym_type.get_shape()
            if any_variadic(shape):
                raise ValueError(
                    "Variable rank model inputs are not supported!")
            if any_symbolic(shape):
                symbolic_inputs.append(name)
                # We extract the default input shape given by user first
                if name in input_shape_map:
                    shape = input_shape_map[name].shape.default
                else:
                    logging.warning(
                        "Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead."
                    )
                # If no input shape is provided (ex. auto conversion of -1 in Tensorflow)
                shape = [1 if is_symbolic(d) else d for d in shape]

            if name not in image_input_names:
                # make a feature type of Type "multiArrayType"
                array_type = ft.ArrayFeatureType(
                    shape=shape,
                    dataType=cast_to_framework_io_dtype(var, False))
                input_feature_type.multiArrayType.CopyFrom(array_type)
            else:
                if len(shape) < 3:
                    raise ValueError(
                        "Image input, '{}', must have rank at least 3. Instead it has rank {}"
                        .format(name, len(shape)))
                # make a feature type of Type "imageType"
                input_type = image_input_names[name]
                if not input_type.channel_first:
                    raise ValueError(
                        "Image input, '{}', must be in the channel_first format"
                        .format(name))

                if input_type.color_layout == "G":
                    clr_space = ft.ImageFeatureType.ColorSpace.GRAYSCALE
                elif input_type.color_layout == "BGR":
                    clr_space = ft.ImageFeatureType.ColorSpace.BGR
                else:
                    clr_space = ft.ImageFeatureType.ColorSpace.RGB

                image_type = ft.ImageFeatureType(width=shape[-1],
                                                 height=shape[-2],
                                                 colorSpace=clr_space)
                input_feature_type.imageType.CopyFrom(image_type)

            input_features.append(
                ml.FeatureDescription(name=name, type=input_feature_type))
        elif types.is_scalar(var.sym_type):
            array_type = ft.ArrayFeatureType(
                shape=[1], dataType=cast_to_framework_io_dtype(var, False))
            input_feature_type.multiArrayType.CopyFrom(array_type)
            input_features.append(
                ml.FeatureDescription(name=var.name, type=input_feature_type))
        else:
            raise NotImplementedError()

    for var in prog.functions["main"].outputs:
        output_feature_type = ft.FeatureType()
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            dataType = None
            if classifier_config is None or var.name != predicted_feature_name:
                # Not a classifier output, make sure model output type matches with ML Program type.
                dataType = cast_to_framework_io_dtype(var, True)
            else:
                # Classifier outputs are set up separately, so default to fp32 for now.
                dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32

            array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)
            output_feature_type.multiArrayType.CopyFrom(array_type)
            output_features.append(
                ml.FeatureDescription(name=var.name, type=output_feature_type))
        elif (types.is_dict(var.sym_type)):
            output_feature_type.dictionaryType.MergeFromString(b"")
            keytype, valtype = var.sym_type.T
            if types.is_str(keytype):
                output_feature_type.dictionaryType.stringKeyType.MergeFromString(
                    b"")
            elif (keytype == types_int64):
                output_feature_type.dictionaryType.int64KeyType.MergeFromString(
                    b"")
            else:
                raise ValueError("Dictionary key type not supported.")
            output_features.append(
                ml.FeatureDescription(name=var.name, type=output_feature_type))
        else:
            raise NotImplementedError()

    # Model description
    desc = ml.ModelDescription(input=input_features, output=output_features)
    if classifier_config is not None:
        desc.predictedFeatureName = predicted_feature_name
        desc.predictedProbabilitiesName = predicted_probabilities_name

        # Manually edit output type of predictedFeatureName.
        # It doesn't use MLMultiArray and really uses a "primitive" type.
        for output in desc.output:
            if output.name == predicted_feature_name:
                if type(classifier_config.class_labels[0]) == int:
                    output.type.int64Type.MergeFromString(b"")
                else:
                    output.type.stringType.MergeFromString(b"")
                break

    # Create ML Model
    model = ml.Model(description=desc,
                     specificationVersion=_SPECIFICATION_VERSION_IOS_15)
    model.mlProgram.CopyFrom(proto)

    # Set symbolic shapes
    for input_name in symbolic_inputs:
        input_type = input_shape_map.get(input_name, None)

        if isinstance(input_type, ImageType):
            if isinstance(input_type.shape, EnumeratedShapes):
                enumerated_shapes = []
                for s in input_type.shape.shapes:
                    enumerated_shapes.append(
                        NeuralNetworkImageSize(height=s.shape[-2],
                                               width=s.shape[-1]))
                add_enumerated_image_sizes(model,
                                           input_name,
                                           sizes=enumerated_shapes)
            else:
                img_range = NeuralNetworkImageSizeRange()
                H = input_type.shape.shape[-2]
                W = input_type.shape.shape[-1]

                if isinstance(H, RangeDim):
                    img_range.add_height_range((H.lower_bound, H.upper_bound))
                elif is_symbolic(H):
                    img_range.add_height_range((1, -1))
                else:
                    img_range.add_height_range((H, H))
                if isinstance(W, RangeDim):
                    img_range.add_width_range((W.lower_bound, W.upper_bound))
                elif is_symbolic(W):
                    img_range.add_width_range((1, -1))
                else:
                    img_range.add_width_range((W, W))

                update_image_size_range(model, input_name, img_range)
        elif isinstance(input_type, TensorType):
            if isinstance(input_type.shape, EnumeratedShapes):
                add_multiarray_ndshape_enumeration(
                    model, input_name,
                    [tuple(s.shape) for s in input_type.shape.shapes])
            else:
                lb = []
                ub = []
                for s in input_type.shape.shape:
                    if isinstance(s, RangeDim):
                        lb.append(s.lower_bound)
                        ub.append(s.upper_bound)
                    elif is_symbolic(s):
                        lb.append(1)
                        ub.append(-1)
                    else:
                        lb.append(s)
                        ub.append(s)
                set_multiarray_ndshape_range(model,
                                             input_name,
                                             lower_bounds=lb,
                                             upper_bounds=ub)
        elif input_type is None:
            sym_type = prog.functions["main"].inputs[input_name].sym_type
            lb = []
            ub = []
            for s in sym_type.get_shape():
                if is_symbolic(s):
                    lb.append(1)
                    ub.append(-1)
                else:
                    lb.append(s)
                    ub.append(s)
            set_multiarray_ndshape_range(model,
                                         input_name,
                                         lower_bounds=lb,
                                         upper_bounds=ub)

    # Set optional inputs
    _set_optional_inputs(model, input_types)

    return model
Beispiel #19
0
 def is_tensor_or_scalar_of(self, dtype: str):
     return (types.is_tensor(self.sym_type) or types.is_scalar(
         self.sym_type)) and builtin_to_string(self.dtype) == dtype
Beispiel #20
0
def load(prog, **kwargs):
    if "main" not in prog.functions:
        msg = "main function not found in program {}"
        raise ValueError(msg.format(prog))
    if len(prog.functions) != 1:
        msg = ("Program must have exactly one `main` function to "
               "convert to NN. Program: {}")
        raise ValueError(msg.format(prog))

    nn_backend_passes(prog)
    input_types = prog.main_input_types

    v1_inputs = []
    symbolic_inputs = {}
    for name, var in prog.functions["main"].inputs.items():
        if types.is_tensor(var.sym_type):
            sym_shape = var.sym_type.get_shape()
            if any_variadic(sym_shape):
                # TODO: rdar://59559656
                raise NotImplementedError("Variadic rank is not supported")
            if any_symbolic(sym_shape):
                user_specified = False
                for input_type in input_types:
                    if name == input_type.name:
                        sym_shape = input_type.shape.default
                        user_specified = True
                        break
                # Use dummy static shape, and will set it later.
                shape = [1 if is_symbolic(d) else d for d in sym_shape]
                if not user_specified:
                    symbolic_inputs[name] = sym_shape
            else:
                shape = sym_shape
            v1_inputs.append((name, datatypes.Array(*shape)))
        elif types.is_scalar(var.sym_type):
            v1_inputs.append((name, datatypes.Array(1)))
        else:
            raise NotImplementedError()

    v1_outputs = []
    for var in prog.functions["main"].outputs:
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            # Disregard the output types
            v1_outputs.append((var.name, None))
        else:
            raise NotImplementedError()

    # create neural network builder
    builder = neural_network.NeuralNetworkBuilder(
        v1_inputs,
        v1_outputs,
        disable_rank5_shape_mapping=True,
        use_float_arraytype=True,
    )

    # const in V2 are added lazily to V1 by each op whenever needed.
    # `const_context` stores the const names we've added so far and avoid
    # adding a const more than once.
    # const_context: list[set of str] (const name for v1 & v2
    # (the same)). Note that in NN in outer layer is visible from the inner
    # layer, so the const_context is simply a stack of set.
    const_context = []
    # Iterate through ops and add to builder
    convert_ops(
        const_context,
        builder,
        prog.functions["main"].operations,
        prog.functions["main"].outputs,
    )

    # Replace model outputs's name with v1_outputs
    output_names = [x[0] for x in v1_outputs]
    for i, spec_layer in enumerate(builder.nn_spec.layers):
        for j, name in enumerate(spec_layer.output):
            for output_name in output_names:
                if output_name.split(":")[0] == name:
                    spec_layer.output[j] = output_name

    proto = builder.spec
    # image input
    has_image_input = any([isinstance(s, ImageType) for s in input_types])
    if has_image_input:
        proto = _convert_to_image_input(proto, input_types)

    # classifier flag
    classifier_config = kwargs.get("classifier_config", None)
    if classifier_config is not None:
        proto = _convert_to_classifier(proto, classifier_config)

    _set_user_inputs(proto, input_types)
    _set_symbolic_inputs(proto, symbolic_inputs)

    return proto