Esempio n. 1
0
    def get_compat_shape(type1, type2):
        """
        For tensor types `type1`, `type2` that are of the same rank, return
        compat_shape (python list) where compat_shape[i] is integer iff type1
        and type2 have the same integer shape on dim i. compat_shape[i] is
        symbolic otherwise.

        Return None if `type1`, `type2` have different rank or non-tensor
        type.
        """
        if not types.is_tensor(type1) or not types.is_tensor(type2):
            return None

        s1 = type1.get_shape()
        s2 = type2.get_shape()

        if len(s1) != len(s2):
            return None

        compat_shape = []
        for d1, d2 in zip(s1, s2):
            if d1 != d2:
                compat_shape.append(get_new_symbol())
            else:
                compat_shape.append(d1)
        return compat_shape
Esempio n. 2
0
def types_to_proto(valuetype):
    if types.is_tensor(valuetype):
        primitive = types_to_proto_primitive(valuetype.get_primitive())
        return create_valuetype_tensor(valuetype.get_shape(), primitive)
    elif types.is_tuple(valuetype):
        v_type = pm.ValueType()
        t_type = v_type.tupleType
        for t in valuetype.T:
            new_v_type = t_type.types.add()
            new_v_type.CopyFrom(types_to_proto(t))
        return v_type
    elif types.is_list(valuetype):
        elem = valuetype.T[0]
        length = valuetype.T[1]
        if types.is_tensor(elem):
            dtype = types_to_proto_primitive(elem.get_primitive())
            elem_shape = elem.get_shape()
        elif types.is_scalar(elem):
            dtype = types_to_proto_primitive(valuetype)
            elem_shape = ()
        elif types.is_str(elem):
            dtype = types_to_proto_primitive(elem)
            elem_shape = ()
        else:
            raise NotImplementedError(
                "Only list of either tensors or scalars supported. "
                "Got element of type {}".format(elem.__type_info__()))
        return create_valuetype_list(length=length,
                                     elem_shape=elem_shape,
                                     dtype=dtype)
    elif types.is_dict(valuetype):
        return create_valuetype_dict(valuetype.T[0], valuetype.T[1])
    else:
        return create_valuetype_scalar(types_to_proto_primitive(valuetype))
Esempio n. 3
0
 def _does_block_contain_symbolic_shape(block):
     for op in block.operations:
         for b in op.blocks:
             if _does_block_contain_symbolic_shape(b):
                 return True
         for out in op.outputs:
             if types.is_tensor(out.sym_type):
                 shape = out.sym_type.get_shape()
                 if any_symbolic(shape):
                     return True
             elif types.is_scalar(out.sym_type) or types.is_str(
                     out.sym_type):
                 if is_symbolic(out.val):
                     return True
             elif types.is_list(out.sym_type):
                 if types.is_tensor(out.elem_type):
                     if any_symbolic(out.elem_type.get_shape()):
                         return True
                 else:
                     raise NotImplementedError(
                         "\'{}\' type in a list not handled".format(
                             out.elem_type))
             else:
                 raise NotImplementedError(
                     "\'{}\' type is not handled".format(out.sym_type))
     return False
Esempio n. 4
0
def promoted_primitive_type(type1, type2):
    """
    Given a pair of tensor or primitive types, find the smallest type that can store an instance
    of their primitive type.
    """
    ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1
    ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2
    return types.promote_types(ptype1, ptype2)
Esempio n. 5
0
def check_output_shapes(x, node):
    """
    x: list[Var] or tuple[Var]
    node: ParsedTFNode
    """
    if isinstance(x, ListVar):
        # No check on list.
        return
    if not isinstance(x, (list, tuple)):
        x = [x]
    tf_shapes = node.attr.get("_output_shapes", None)
    if tf_shapes is None:
        return
    inf_shapes = []
    for y in x:
        if y is None:
            msg = "TF convert returns None type in TF node {}"
            raise TypeError(msg.format(node.name))
        if types.is_tensor(y.sym_type):
            inf_shapes.append(list(y.shape))
        elif types.is_scalar(y.sym_type):
            inf_shapes.append([])
        else:
            msg = "Output type {} not understood"
            raise ValueError(msg.format(y))

    for t, s in zip(tf_shapes, inf_shapes):
        if not compatible_shapes(t, s):
            msg = ("Op {} ({}) type inference ({}) and TF output shape " +
                   "({}) mismatch")
            raise ValueError(msg.format(node.name, node.op, s, t))
Esempio n. 6
0
    def get_cast_value(input_var, dtype_val):
        type_map = {
            "int32": np.int32,
            "int64": np.int64,
            "fp16": np.float16,
            "fp32": np.float32,
            "fp64": np.float64,
            "bool": np.bool,
        }

        if dtype_val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), dtype_val))

        if input_var.val is None:
            if input_var.sym_val is not None and not is_symbolic(
                    input_var.sym_val) and len(input_var.sym_val.shape) == 1:
                result = [
                    np.array(val).astype(dtype=type_map[dtype_val]).item()
                    if not is_symbolic(val) else val
                    for val in input_var.sym_val
                ]
                return np.array(result)
            return None

        if not types.is_tensor(input_var.sym_type):
            return input_var.val.astype(dtype=type_map[dtype_val])
        else:
            return np.array(input_var.val).astype(dtype=type_map[dtype_val])
Esempio n. 7
0
    def apply(self, prog):
        user_provided_output_types = prog.main_output_types
        main_func = prog.functions["main"]
        output_vars = main_func.outputs
        if user_provided_output_types is None or len(
                user_provided_output_types) == 0:
            return
        if len(output_vars) != len(user_provided_output_types):
            msg = "Number of outputs provided by the user, which is {}, " \
                  "does not match the number of outputs generated by the model, which is {}"
            raise ValueError(
                msg.format(len(user_provided_output_types), len(output_vars)))

        new_outputs = []
        for i, output_type in enumerate(user_provided_output_types):
            required_output_dtype = output_type.dtype
            output_var = output_vars[i]
            if required_output_dtype is None or \
                not (types.is_tensor(output_var.sym_type) or types.is_scalar(output_var.sym_type)) or \
                required_output_dtype == output_var.dtype:
                # no need to update the output var's dtype in this case
                new_outputs.append(output_var)
            else:
                output_var_name = output_var.name
                output_var.set_name(output_var_name + "_type_" +
                                    types.builtin_to_string(output_var.dtype))
                with main_func:
                    output_var = mb.cast(
                        x=output_var,
                        dtype=types.builtin_to_string(required_output_dtype))
                    output_var.set_name(output_var_name)
                new_outputs.append(output_var)

        main_func.set_outputs(new_outputs)
Esempio n. 8
0
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (types.is_tensor(output_type) or types.is_scalar(output_type)) \
            and output_var.dtype != types.fp32 \
            and output_var.dtype != types.int32 \
            and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16):
            # since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast
            output_dtype_str = types.builtin_to_string(output_var.dtype)
            supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                "{int32, fp16, fp32, fp64}"
            msg = "\nOutput '{}' is of dtype {}. The " +\
                           "CoreML runtime does not support outputs with this dtype " +\
                           "(supported dtypes are: {}). This output will be assigned a dtype " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dtype to the dtype supported by the CoreML runtime.\n"
            if output_var.dtype == types.fp16:
                msg += "fp16 dtype output is supported if function.opset_version is chosen to be at least " \
                       "iOS16/macOS13.\n"
            logging.warning(
                msg.format(
                    output_var.name,
                    output_dtype_str,
                    supported_dtypes,
                ))

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            output_var = mb.cast(x=output_var, dtype="fp32")
            output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
Esempio n. 9
0
def _load_function(context, func_spec, spec_version):
    if not isinstance(func_spec, pm.Function):
        raise TypeError("Invalid Function spec object")

    if func_spec.attributes:
        raise ValueError("Attributes on functions not supported")

    func_inputs = {}
    for named_value_type in func_spec.inputs:
        name = named_value_type.name
        valuetype = proto_to_types(named_value_type.type)

        if not types.is_tensor(valuetype):
            raise ValueError("Functions inputs can only be tensors")
        func_inputs[name] = Placeholder(
            sym_shape=valuetype.get_shape(), dtype=valuetype.get_primitive(), name=name
        )
        context.register_var_with_name(name, func_inputs[name].outputs[0])
        
    opset = func_spec.opset
    if opset not in func_spec.block_specializations:
        raise ValueError("Missing block specialization for opset {}".format(opset))

    with Function(func_inputs, opset_version=_target(spec_version)) as pymil_func:
        _load_block(context, func_spec.block_specializations[opset])

    return pymil_func
Esempio n. 10
0
 def _create_placeholder(node):
     node.parse_from_attr()
     shape = []
     dtype = node.attr["dtype"]
     if types.is_tensor(node.datatype):
         shape = node.datatype.get_shape()
         shape = tuple(get_new_symbol() if s is None or s < 0 else s for s in shape)
     return mb.placeholder(shape, dtype=dtype)
Esempio n. 11
0
 def type_str(self):
     is_tensor = types.is_tensor(self.sym_type)
     is_list = types.is_list(self.sym_type)
     if is_tensor:
         type_string = "(Tensor)"
     elif is_list:
         type_string = "(List)"
     else:
         type_string = "(Scalar)"
     return type_string
Esempio n. 12
0
    def type_inference(self):
        typea = self.x.sym_type
        typeb = self.y.sym_type
        primitive_type = promoted_primitive_type(typea, typeb)
        if primitive_type is None:
            raise ValueError(
                "Incompatible primitive types in broadcast operation")
        primitive_type = self.get_dtype(primitive_type)

        # broadcast
        if not types.is_tensor(typea) and not types.is_tensor(typeb):
            # both typea and typeb are not tensors
            return primitive_type
        if types.is_tensor(typea) and not types.is_tensor(typeb):
            # a is tensor, b is not
            return types.tensor(primitive_type, typea.get_shape())
        if not types.is_tensor(typea) and types.is_tensor(typeb):
            # a is not tensor, b is
            return types.tensor(primitive_type, typeb.get_shape())

        # both a, b are tensors
        shapea = list(typea.get_shape())
        shapeb = list(typeb.get_shape())
        ret_shape = broadcast_shapes(shapea, shapeb)
        return types.tensor(primitive_type, ret_shape)
Esempio n. 13
0
def create_immediate_value(var):
    if types.is_tensor(var.sym_type):
        return create_tensor_value(var.val)
    elif types.is_list(var.sym_type):
        if var.elem_type == types.str:
            return create_list_scalarvalue(var.val, np.str)
        elif var.elem_type == types.int64:
            return create_list_scalarvalue(var.val, np.int64)
        else:
            raise NotImplementedError(
                "List element type, {}, not supported yet.".format(
                    var.sym_type.__type_info__()))
    else:
        return create_scalar_value(var.val)
Esempio n. 14
0
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != types.fp32 \
             and input_var.dtype != types.int32:
            input_dtype_str = types.builtin_to_string(input_var.dtype)
            if types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                logging.warning("Input" + input_var.name + " is of dtype " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dtype of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.int32)
            elif input_var.dtype == types.fp64:
                # Replace float64 input type with fp32.
                logging.warning("Input '" + input_var.name + "' is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dtype " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, types.fp32)
            elif input_var.dtype == types.fp16 \
                 and func.opset_version >= target.iOS16:
                pass  # do nothing, since fp16 is a valid input type for CoreML
            else:
                # This is some other dtype. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
                                    "{int32, fp16, fp32, fp64}"
                msg = "\nInput '{}' is of dtype {}. The " +\
                               "CoreML runtime does not support inputs with this dtype " +\
                               "(supported dtypes are: {}). This input will be assigned a dtype of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dtype.\n"
                if input_var.dtype == types.fp16:
                    msg += "fp16 dtype input is supported if the function.opset_version is chosen to be at least " \
                           "iOS16/macOS13.\n"
                logging.warning(
                    msg.format(input_var.name, input_dtype_str,
                               supported_dtypes))

                casted_input_var = mb.cast(x=input_var,
                                           dtype=input_dtype_str,
                                           before_op=first_op)
                func.replace_uses_of_var_after_op(
                    anchor_op=casted_input_var.op,
                    old_var=input_var,
                    new_var=casted_input_var)
                _adjust_var_dtype_helper(input_var, types.fp32)
    def value_inference(self):
        type_map = {
            "int32": np.int32,
            "int64": np.int64,
            "fp32": np.float32,
            "fp64": np.float64,
            "bool": np.bool,
        }

        if self.dtype.val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), self.dtype.val))

        if not types.is_tensor(self.x.sym_type):
            return self.x.val.astype(dtype=type_map[self.dtype.val])
        else:
            return np.array(self.x.val).astype(dtype=type_map[self.dtype.val])
    def type_inference(self):
        type_map = {
            "int32": types.int32,
            "int64": types.int64,
            "fp32": types.fp32,
            "fp64": types.fp64,
            "bool": types.bool,
        }

        if self.dtype.val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), self.dtype.val))

        if not types.is_tensor(self.x.sym_type):
            return type_map[self.dtype.val]

        ret_shape = self.x.shape
        return types.tensor(type_map[self.dtype.val], ret_shape)
Esempio n. 17
0
def _load_value(context, value_spec):
    if not isinstance(value_spec, pm.Value):
        raise TypeError("Invalid Value spec object")

    if value_spec.docString:
        raise ValueError("Docstring would get lost in the process.")

    if value_spec.type.WhichOneof("type") == "tensorType":
        valuetype = proto_to_types(value_spec.type)

        is_tensor = types.is_tensor(valuetype)

        dtype = valuetype if not is_tensor else valuetype.get_primitive()
        shape = () if not is_tensor else valuetype.get_shape()

        if value_spec.WhichOneof("value") == "immediateValue":
            value = _load_immediate_value(value_spec.immediateValue)
        else:
            value = _load_file_value(context, value_spec.blobFileValue, dtype)

        if dtype in (types.fp16, types.int8, types.uint8, types.uint32):
            value = np.frombuffer(value, types.nptype_from_builtin(dtype)).reshape(
                shape
            )
        elif dtype == types.str and shape == ():
            value = str(value[0])
        elif dtype in (types.fp32, types.str, types.bool, types.int32, types.int64):
            value = (
                np.array(value).astype(types.nptype_from_builtin(dtype)).reshape(shape)
            )
        else:
            raise ValueError("Invalid dtype for tensor value")
    else:
        raise NotImplementedError("Only value of tensorType implemented yet")

    if not is_tensor and not isinstance(value, str):
        value = types.nptype_from_builtin(dtype)(value.item())

    return value
def _adjust_main_outputs(func):
    new_outputs = []
    for output_var in func.outputs:
        output_type = output_var.sym_type
        if (_types.is_tensor(output_type) or _types.is_scalar(output_type)) \
            and output_var.dtype != _types.fp32 \
            and output_var.dtype != _types.int32:
            output_dtype_str = _types.builtin_to_string(output_var.dtype)
            _warnings.warn("Output" + output_var.name + " is of dType " + output_dtype_str + ". The " +\
                           "CoreML runtime does not support outputs with this dType (only int32 and " +\
                           "fp32 are supported for outputs). This output will be assigned a dType " +\
                           "of fp32. A cast will be inserted at the end of the program to convert" +\
                           "the original output dType to the dType supported by the CoreML runtime.")

            output_var_name = output_var.name
            output_var.set_name(output_var_name + "__pre__output__fp32__cast")
            # Convert the output to fp32, and add a cast.
            with func:
                output_var = _mb.cast(x=output_var, dtype="fp32")
                output_var.set_name(output_var_name)
        new_outputs.append(output_var)
    func.set_outputs(new_outputs)
def _adjust_main_inputs(func):
    first_op = func.operations[0] if len(func.operations) > 0 else None
    for input_name, input_var in func.inputs.items():
        if (_types.is_tensor(input_var.sym_type) or _types.is_scalar(input_var.sym_type)) \
             and input_var.dtype != _types.fp32 \
             and input_var.dtype != _types.int32:
            input_dtype_str = _types.builtin_to_string(input_var.dtype)
            if _types.is_int(input_var.dtype):
                # Replace non-int32 input type with int32.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str +\
                               ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                               "This input will be assigned a dType of int32. " +\
                               "No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.int32)
            elif input_var.dtype == _types.fp64:
                # Replace float64 input type with fp32.
                _warnings.warn("Input" + input_var.name + " is of dtype fp64. 64 bit float inputs are " +\
                               "not supported by ML program models. This input will be assigned a dType " +\
                               "of fp32. No cast will be inserted; the previous dtype will be replaced.")
                _adjust_var_dtype_helper(input_var, _types.fp32)
            else:
                # This is some other dType. Change the type to fp32 and add a cast.
                # This is only a limitation of main--other functions do not represent CoreML model inputs
                # and do not have the same limitation on input types.
                _warnings.warn("Input" + input_var.name + " is of dType " + input_dtype_str + ". The " +\
                               "CoreML runtime does not support inputs with this dType (only fp32 and " +\
                               "int32 inputs are supported). This input will be assigned a dType of " +\
                               "fp32. A cast will be inserted at the beginning of the program to " +\
                               "convert the input to the originally defined dType.")
                with func:
                    casted_input_var = _mb.cast(x=input_var,
                                                dtype=input_dtype_str,
                                                before_op=first_op)
                    func.replace_uses_of_var_after_op(
                        anchor_op=casted_input_var.op,
                        old_var=input_var,
                        new_var=casted_input_var)
                    _adjust_var_dtype_helper(input_var, _types.fp32)
Esempio n. 20
0
def _adjust_var(var):
    """
    Changes the dtype of the provided variable according
    to the rules outlined in the top level pass comment
    (see adjust_io_to_supported_types).
    """
    if (types.is_tensor(var.sym_type) or types.is_scalar(var.sym_type)) \
        and var.dtype not in __RUNTIME_SUPPORTED_TYPES:
        dtype_str = types.builtin_to_string(var.dtype)
        if types.is_int(var.dtype):
            # Replace non-int32 input type with int32.
            logging.warning("Input '" + var.name + "' is of dtype " + dtype_str +\
                           ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\
                           "This input will be assigned a dtype of int32. " +\
                           "No cast will be inserted; the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.int32)
        else:
            # This is some other unsupported dtype. Change the input type to fp32.
            logging.warning("Var " + var.name + " is of dtype " + dtype_str + ". The CoreML runtime " +\
                           "does not support this dtype (only fp16, fp32, bool, and int32 are supported). " +\
                           "This input will be assigned a dtype of fp32. No cast will be inserted; " +\
                           "the previous dtype will be replaced.")
            _adjust_var_dtype_helper(var, types.fp32)
Esempio n. 21
0
 def _is_compatible(self, v):
     return (types.is_list(v.sym_type) or types.is_scalar(v.dtype)
             or types.is_tensor(v.dtype))
Esempio n. 22
0
def _load_operation(context, op_spec):
    if not isinstance(op_spec, pm.Operation):
        raise TypeError("Invalid Operation spec object")

    op_type = op_spec.type
    if op_type == "const" or op_type.startswith("constexpr_"):
        if op_spec.blocks:
            raise ValueError("const / constexpr operation can't have any block")
        if op_spec.inputs:
            raise ValueError("const / constexpr operation can't have any input")

        inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
        pymil_var = getattr(mb, op_type)(**inputs)
        context.register_var_with_name(op_spec.outputs[0].name, pymil_var)

    else:
        if op_type == "custom_layer":
            raise NotImplementedError(
                "Loading Custom Layer operation not yet implemented"
            )

        if op_spec.attributes:
            raise ValueError("Attributes on operation not supported")

        # The conversion steps of an operation proto -> PyMIL operation are as following:

        # (i)   Convert the input arguments:
        #       In most of the cases, the input variable is already created beforehand, hence we can
        #       directly access and get them through the TranscriptionContext.
        #       There are cases, though, the inputs are literal value. This could happens in the classify op spec.
        #       For that case, we directly create a constant variable.

        # (ii)  Create nested blocks for control flow operations:
        #       The Python functinoal input arguments for control flow ops cannot be recovered from milproto -> pymil conversion,
        #       for instance, the _body, _cond for mb.while_loop and _true_fn, _false_fn for mb.cond are not invertible
        #       Hence, here we directly create the nested blocks from the proto, and set them to mb.while_loop.blocks / mb.cond.blocks.
        #       Note that, when creating a block, PyMIL required an outer_op, which should be the control flow operation itself. However,
        #       in this approach we take, the outer_op hasn't been created at the time when the blocks produced. Here, we make a "dummy outer_op",
        #       which could pass the check in PyMIL, also it could provide enough information (such as visible variables in the blocks etc.)
        #       for the creation of the block.

        # (iii) Create PyMIL operation using inputs / blocks
        #       Note that for the control flow cases, we create dummy functional inputs, and use the exisiting block to create the op.

        # (iv)  Set the outer_op for control flow
        #       Once the operation is created, we replace the dummy outer_op with the legit one, to make it a valid PyMIL program

        inputs = {}
        for param_name, argument in op_spec.inputs.items():
            vars = []
            for binding in argument.arguments:
                binding_type = binding.WhichOneof("binding")
                if binding_type == "name":
                    vars.append(context.get_var_from_name(binding.name))
                elif binding_type == "value":
                    # We only support the list value for now (for the classifier use case)
                    value_spec = binding.value
                    assert value_spec.WhichOneof("value") == "immediateValue"
                    assert value_spec.immediateValue.WhichOneof("value") == "list"
                    list_value = _load_immediate_value(value_spec.immediateValue)
                    values = []
                    for value_spec in list_value:
                        values.append(_load_value(context, value_spec))
                    var = mb.const(val=mil_list(values))
                    vars.append(var)
                else:
                    raise NotImplementedError("Binding {} not yet implemented".format(binding_type))
            op_cls = _SSAOpRegistry._get_core_op_cls(op_type)
            if len(vars) == 1 and not isinstance(
                op_cls.input_spec.input_types[param_name], TupleInputType
            ):
                inputs[param_name] = vars[0]
            else:
                inputs[param_name] = vars

        blocks = _create_nested_blocks(context, op_spec)
        _set_inputs_for_control_flow_op(inputs, blocks, op_type)

        output_var = getattr(mb, op_type)(**inputs)
        if not isinstance(output_var, (tuple, list)):
            output_var = [output_var]

        if len(output_var) != len(op_spec.outputs):
            raise AssertionError(
                "Mismatch between number of outputs in operation specification vs PyMIL outputs"
            )

        for spec, var in zip(op_spec.outputs, output_var):
            context.register_var_with_name(spec.name, var)

            pymil_type = var.sym_type
            proto_type = proto_to_types(spec.type)
            if not types.is_compatible_type(pymil_type, proto_type):
                # We allow a corner case where the pymil has an 0 rank tensor and the spec produces a scalar
                if types.is_tensor(pymil_type) and types.is_scalar(proto_type):
                    if pymil_type.get_primitive() == proto_type:
                        continue
                raise AssertionError(
                    "Mismatch between var types in specification vs PyMIL"
                )

        _set_outer_op_for_nested_blocks(blocks, output_var[0].op)
Esempio n. 23
0
 def shape(self):
     if types.is_tensor(self._sym_type):
         return self._sym_type.get_shape()
     return tuple()
Esempio n. 24
0
 def _is_compatible(self, v):
     return types.is_tensor(v.sym_type) and v.dtype == types.bool
Esempio n. 25
0
 def dtype(self):
     if types.is_tensor(self._sym_type):
         return self._sym_type.get_primitive()
     return self._sym_type
Esempio n. 26
0
 def _is_compatible(self, v):
     return types.is_tensor(v.sym_type) and v.dtype in SUPPORT_INT_TYPES
Esempio n. 27
0
 def _is_compatible(self, v):
     # We only support scalar string type.
     return types.is_tensor(v.sym_type) and \
         v.sym_type.get_primitive() != types.str
Esempio n. 28
0
def load(prog, weights_dir, resume_on_errors=False, **kwargs):
    if "main" not in prog.functions:
        raise ValueError("main function not found in program")

    mil_passes.mil_backend_passes(prog)

    # if user has specified "ClassifierConfig", then add the "classify" op to the prog
    classifier_config = kwargs.get("classifier_config", None)
    predicted_feature_name = None
    predicted_probabilities_name = None
    if classifier_config is not None:
        predicted_feature_name, predicted_probabilities_name = _add_classify_op(
            prog, classifier_config)

    input_types = prog.main_input_types
    weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME)
    blob_writer = BlobWriter(weight_path)

    function_protos = {}
    for func_name, func in prog.functions.items():
        function_protos[func_name] = convert_function(func, prog.parameters,
                                                      blob_writer)

    proto = pm.Program(
        version=1,
        functions=function_protos,
    )

    input_features = []
    output_features = []
    symbolic_inputs = []
    image_input_names = {
    }  # these are the model inputs marked as image by the user
    input_shape_map = {}

    for input_type in input_types:
        if isinstance(input_type, ImageType):
            image_input_names[input_type.name] = input_type
            # error checking for input(s) marked as images
            if input_type.name not in list(
                    prog.functions["main"].inputs.keys()):
                msg = "Provided image input '{}' is not one of the inputs of the MIL program"
                raise ValueError(msg.format(input_type.name))
        input_shape_map[input_type.name] = input_type

    for name, var in prog.functions["main"].inputs.items():
        input_feature_type = ft.FeatureType()

        # error checking for input(s) marked as images
        # an image input must be of type tensor in program proto
        # (since an image type does not exist in MIL program)
        if name in image_input_names and \
                not types.is_tensor(var.sym_type):
            raise ValueError(
                "For the image input, '{}', its type in the MIL program must be tensor. "
                "Instead it is {}.".format(name, var.sym_type.__type_info__()))

        if types.is_tensor(var.sym_type):
            shape = var.sym_type.get_shape()
            if any_variadic(shape):
                raise ValueError(
                    "Variable rank model inputs are not supported!")
            if any_symbolic(shape):
                symbolic_inputs.append(name)
                # We extract the default input shape given by user first
                if name in input_shape_map:
                    shape = input_shape_map[name].shape.default
                else:
                    logging.warning(
                        "Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead."
                    )
                # If no input shape is provided (ex. auto conversion of -1 in Tensorflow)
                shape = [1 if is_symbolic(d) else d for d in shape]

            if name not in image_input_names:
                # make a feature type of Type "multiArrayType"
                array_type = ft.ArrayFeatureType(
                    shape=shape,
                    dataType=cast_to_framework_io_dtype(var, False))
                input_feature_type.multiArrayType.CopyFrom(array_type)
            else:
                if len(shape) < 3:
                    raise ValueError(
                        "Image input, '{}', must have rank at least 3. Instead it has rank {}"
                        .format(name, len(shape)))
                # make a feature type of Type "imageType"
                input_type = image_input_names[name]
                if not input_type.channel_first:
                    raise ValueError(
                        "Image input, '{}', must be in the channel_first format"
                        .format(name))

                if input_type.color_layout == "G":
                    clr_space = ft.ImageFeatureType.ColorSpace.GRAYSCALE
                elif input_type.color_layout == "BGR":
                    clr_space = ft.ImageFeatureType.ColorSpace.BGR
                else:
                    clr_space = ft.ImageFeatureType.ColorSpace.RGB

                image_type = ft.ImageFeatureType(width=shape[-1],
                                                 height=shape[-2],
                                                 colorSpace=clr_space)
                input_feature_type.imageType.CopyFrom(image_type)

            input_features.append(
                ml.FeatureDescription(name=name, type=input_feature_type))
        elif types.is_scalar(var.sym_type):
            array_type = ft.ArrayFeatureType(
                shape=[1], dataType=cast_to_framework_io_dtype(var, False))
            input_feature_type.multiArrayType.CopyFrom(array_type)
            input_features.append(
                ml.FeatureDescription(name=var.name, type=input_feature_type))
        else:
            raise NotImplementedError()

    for var in prog.functions["main"].outputs:
        output_feature_type = ft.FeatureType()
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            dataType = None
            if classifier_config is None or var.name != predicted_feature_name:
                # Not a classifier output, make sure model output type matches with ML Program type.
                dataType = cast_to_framework_io_dtype(var, True)
            else:
                # Classifier outputs are set up separately, so default to fp32 for now.
                dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32

            array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)
            output_feature_type.multiArrayType.CopyFrom(array_type)
            output_features.append(
                ml.FeatureDescription(name=var.name, type=output_feature_type))
        elif (types.is_dict(var.sym_type)):
            output_feature_type.dictionaryType.MergeFromString(b"")
            keytype, valtype = var.sym_type.T
            if types.is_str(keytype):
                output_feature_type.dictionaryType.stringKeyType.MergeFromString(
                    b"")
            elif (keytype == types_int64):
                output_feature_type.dictionaryType.int64KeyType.MergeFromString(
                    b"")
            else:
                raise ValueError("Dictionary key type not supported.")
            output_features.append(
                ml.FeatureDescription(name=var.name, type=output_feature_type))
        else:
            raise NotImplementedError()

    # Model description
    desc = ml.ModelDescription(input=input_features, output=output_features)
    if classifier_config is not None:
        desc.predictedFeatureName = predicted_feature_name
        desc.predictedProbabilitiesName = predicted_probabilities_name

        # Manually edit output type of predictedFeatureName.
        # It doesn't use MLMultiArray and really uses a "primitive" type.
        for output in desc.output:
            if output.name == predicted_feature_name:
                if type(classifier_config.class_labels[0]) == int:
                    output.type.int64Type.MergeFromString(b"")
                else:
                    output.type.stringType.MergeFromString(b"")
                break

    # Create ML Model
    model = ml.Model(description=desc,
                     specificationVersion=_SPECIFICATION_VERSION_IOS_15)
    model.mlProgram.CopyFrom(proto)

    # Set symbolic shapes
    for input_name in symbolic_inputs:
        input_type = input_shape_map.get(input_name, None)

        if isinstance(input_type, ImageType):
            if isinstance(input_type.shape, EnumeratedShapes):
                enumerated_shapes = []
                for s in input_type.shape.shapes:
                    enumerated_shapes.append(
                        NeuralNetworkImageSize(height=s.shape[-2],
                                               width=s.shape[-1]))
                add_enumerated_image_sizes(model,
                                           input_name,
                                           sizes=enumerated_shapes)
            else:
                img_range = NeuralNetworkImageSizeRange()
                H = input_type.shape.shape[-2]
                W = input_type.shape.shape[-1]

                if isinstance(H, RangeDim):
                    img_range.add_height_range((H.lower_bound, H.upper_bound))
                elif is_symbolic(H):
                    img_range.add_height_range((1, -1))
                else:
                    img_range.add_height_range((H, H))
                if isinstance(W, RangeDim):
                    img_range.add_width_range((W.lower_bound, W.upper_bound))
                elif is_symbolic(W):
                    img_range.add_width_range((1, -1))
                else:
                    img_range.add_width_range((W, W))

                update_image_size_range(model, input_name, img_range)
        elif isinstance(input_type, TensorType):
            if isinstance(input_type.shape, EnumeratedShapes):
                add_multiarray_ndshape_enumeration(
                    model, input_name,
                    [tuple(s.shape) for s in input_type.shape.shapes])
            else:
                lb = []
                ub = []
                for s in input_type.shape.shape:
                    if isinstance(s, RangeDim):
                        lb.append(s.lower_bound)
                        ub.append(s.upper_bound)
                    elif is_symbolic(s):
                        lb.append(1)
                        ub.append(-1)
                    else:
                        lb.append(s)
                        ub.append(s)
                set_multiarray_ndshape_range(model,
                                             input_name,
                                             lower_bounds=lb,
                                             upper_bounds=ub)
        elif input_type is None:
            sym_type = prog.functions["main"].inputs[input_name].sym_type
            lb = []
            ub = []
            for s in sym_type.get_shape():
                if is_symbolic(s):
                    lb.append(1)
                    ub.append(-1)
                else:
                    lb.append(s)
                    ub.append(s)
            set_multiarray_ndshape_range(model,
                                         input_name,
                                         lower_bounds=lb,
                                         upper_bounds=ub)

    # Set optional inputs
    _set_optional_inputs(model, input_types)

    return model
Esempio n. 29
0
 def _is_compatible(self, v):
     return types.is_scalar(v.dtype) or types.is_tensor(v.dtype)
Esempio n. 30
0
def load(prog, **kwargs):
    if "main" not in prog.functions:
        msg = "main function not found in program {}"
        raise ValueError(msg.format(prog))
    if len(prog.functions) != 1:
        msg = ("Program must have exactly one `main` function to "
               "convert to NN. Program: {}")
        raise ValueError(msg.format(prog))

    nn_backend_passes(prog)
    input_types = prog.main_input_types
    output_types = prog.main_output_types

    v1_inputs = []
    symbolic_inputs = {}
    for name, var in prog.functions["main"].inputs.items():
        if types.is_tensor(var.sym_type):
            sym_shape = var.sym_type.get_shape()
            if any_variadic(sym_shape):
                raise NotImplementedError("Variadic rank is not supported")
            if any_symbolic(sym_shape):
                user_specified = False
                for input_type in input_types:
                    if name == input_type.name:
                        sym_shape = input_type.shape.default
                        user_specified = True
                        break
                # Use dummy static shape, and will set it later.
                shape = [1 if is_symbolic(d) else d for d in sym_shape]
                if not user_specified:
                    symbolic_inputs[name] = sym_shape
            else:
                shape = sym_shape
            v1_inputs.append((name, Array(*shape)))
        elif types.is_scalar(var.sym_type):
            v1_inputs.append((name, Array(1)))
        else:
            raise NotImplementedError()

    v1_outputs = []
    for var in prog.functions["main"].outputs:
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            # Disregard the output types
            v1_outputs.append((var.name, None))
        else:
            raise NotImplementedError()

    # create neural network builder
    builder = neural_network.NeuralNetworkBuilder(
        v1_inputs,
        v1_outputs,
        disable_rank5_shape_mapping=True,
        use_float_arraytype=True,
    )

    # const in V2 are added lazily to V1 by each op whenever needed.
    # `const_context` stores the const names we've added so far and avoid
    # adding a const more than once.
    # const_context: list[set of str] (const name for v1 & v2
    # (the same)). Note that in NN in outer layer is visible from the inner
    # layer, so the const_context is simply a stack of set.
    const_context = []
    # Iterate through ops and add to builder
    convert_ops(
        const_context,
        builder,
        prog.functions["main"].operations,
        prog.functions["main"].outputs,
    )

    proto = builder.spec
    # image input
    has_image_input = any([isinstance(s, ImageType) for s in input_types])
    if has_image_input:
        proto = _convert_to_image_input(proto,
                                        input_types,
                                        skip_model_load=kwargs.get(
                                            "skip_model_load", False))

    # image output
    if output_types is not None:
        assert len(output_types) == len(prog.functions["main"].outputs), \
                "number of mil program outputs do not match the number of outputs provided by the user"
        for i, output_proto_desc in enumerate(proto.description.output):
            output_var = prog.functions["main"].outputs[i]
            if isinstance(output_types[i], ImageType):
                if not types.is_tensor(var.sym_type):
                    raise ValueError(
                        "Image output, '{}', is a scalar, but it should be a tensor of rank 4"
                        .format(var.name))
                shape = var.sym_type.get_shape()
                if any_variadic(shape):
                    raise ValueError(
                        "Variable rank model outputs, that are ImageTypes, are not supported"
                    )
                if any([is_symbolic(d) for d in shape]):
                    raise NotImplementedError(
                        "Image output '{}' has symbolic dimensions in its shape"
                        .format(var.name))
                _validate_image_input_output_shapes(
                    output_types[i].color_layout,
                    shape,
                    var.name,
                    is_input=False)
                clr_space = _get_colorspace_enum(output_types[i].color_layout)
                output_proto_desc.type.imageType.colorSpace = clr_space
                output_proto_desc.type.imageType.width = shape[-1]
                output_proto_desc.type.imageType.height = shape[-2]

    # classifier flag
    classifier_config = kwargs.get("classifier_config", None)
    if classifier_config is not None:
        # verify that classifier_config.predicted_probabilities_output if its exists.
        # And if its empty/None, fill it with the last non const op's output
        # this is done in "_get_probability_var_for_classifier()"
        probability_var = _get_probability_var_for_classifier(
            prog, classifier_config)
        if classifier_config.predicted_probabilities_output != probability_var.name:
            classifier_config.predicted_probabilities_output = probability_var.name
        # add classifier related fields to the proto spec
        proto = _convert_to_classifier(proto,
                                       classifier_config,
                                       skip_model_load=kwargs.get(
                                           "skip_model_load", False))

    _set_user_inputs(proto, input_types)
    _set_symbolic_inputs(proto, symbolic_inputs)
    _set_optional_inputs(proto, input_types)

    return proto