def _get_num_splits_and_sizes(self): """ Return: - num_splits: int - sizes: list of int/symbols. Of length num_splits Raise ValueError if num_splits cannot be determined. """ if self.num_splits is None and self.split_sizes is None: msg = ( "At least one of num_splits and split_sizes " + "must be specified in split op {}" ) raise ValueError(msg.format(self.name)) axis = self.axis.val if self.num_splits is not None: num_splits = self.num_splits.val if self.split_sizes is None: # Even split if ( not is_symbolic(self.x.shape[axis]) and self.x.shape[axis] % num_splits != 0 ): msg = "num_split {} does not divide split " + "dim (length = {})" raise ValueError(msg.format(num_splits, self.x.shape[axis])) size = self.x.shape[axis] / num_splits return num_splits, [size] * num_splits # self.split_sizes is not None if self.split_sizes.sym_val is not None: return num_splits, self.split_sizes.sym_val # self.split_size.sym_val is None. sizes = [get_new_symbol() for _ in range(num_splits)] return num_splits, sizes # self.num_splits is None, self.split_sizes is not None if self.split_sizes.sym_val is not None: return len(self.split_sizes.sym_val), self.split_sizes.sym_val # self.num_splits is None, self.split_sizes is not None # self.split_sizes.sym_val is None if any_symbolic(self.split_sizes.shape): raise ValueError("Unable to determine number of splits") num_splits = len(self.split_sizes.shape) sizes = [get_new_symbol() for _ in range(num_splits)] return num_splits, sizes
def type_inference(self): if self.x.rank < 3: raise ValueError( 'input to the "upsample_bilinear" op must have rank at least 3' ) ret_shape = list(self.x.shape) ret_shape[-1] = np.floor(self.scale_factor_width.val * ret_shape[-1]) if not is_symbolic( ret_shape[-1]) else get_new_symbol() ret_shape[-2] = np.floor(self.scale_factor_height.val * ret_shape[-2]) if not is_symbolic( ret_shape[-2]) else get_new_symbol() return types.tensor(self.x.dtype, ret_shape)
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): s0 = get_new_symbol() s_len = get_new_symbol() input_placeholders = { "x": mb.placeholder(shape=(2, s0)), "shape": mb.placeholder(shape=(3, ), dtype=types.int32), "shape2": mb.placeholder(shape=(s_len, ), dtype=types.int32), } def build(x, shape, shape2): return [ mb.reshape(x=x, shape=[2, -1]), mb.reshape(x=x, shape=[1, -1]), mb.reshape(x=x, shape=[2, 1, 1, -1]), mb.reshape(x=x, shape=shape), mb.reshape(x=x, shape=shape2), ] expected_output_types = [ (2, s0, types.fp32), (1, 2 * s0, types.fp32), (2, 1, 1, s0, types.fp32), (UNK_SYM, UNK_SYM, UNK_SYM, types.fp32), (UNK_VARIADIC, types.fp32), ] expected_outputs = [ np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32), np.array([[[[1.0, 2.0, 3.0]]], [[[4.0, 5.0, 6.0]]]], dtype=np.float32), np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), ] input_values = { "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), "shape": np.array([2, 1, 3], dtype=np.float32), "shape2": np.array([2, 1, 3], dtype=np.float32), } run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def test_cast_with_symbolic_value(self): input_shape = [get_new_symbol(), 1] input_placeholders = { "x": mb.placeholder(shape=input_shape), } def build(x): shape = mb.shape(x=x) return mb.cast(x=shape, dtype="int32") with Function(input_placeholders) as ssa_func: output_vars = build(**ssa_func.inputs) assert is_compatible_symbolic_vector(output_vars.sym_val, [get_new_symbol(), 1])
def test_builder_to_backend_symbolic(self, use_cpu_only, backend, input_type): np_type = np.int32 if input_type == "int32" else np.float32 mb_type = types.int32 if input_type == "int32" else types.fp32 s0 = get_new_symbol() # Test variadic (rdar://59559656) input_placeholders = { "x": mb.placeholder(shape=(s0, 4, 5, 6), dtype=mb_type), } def build(x): return [mb.shape(x=x)] input = np.random.rand(10, 4, 5, 6) input = input.astype(np_type) output = np.array([10, 4, 5, 6], dtype=np.int32) expected_output_types = (4, types.int32) expected_outputs = [output] input_values = {"x": input} run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def type_inference(self): builtin_dtype = types.string_to_builtin(self.dtype.val) if builtin_dtype is None: raise ValueError("Unsupported dtype {}".format(self.dtype.val)) # Replace string with symbol elem_shape_sym = [] for s_var in self.elem_shape: # s is str or int s = s_var.val if s is None: msg = 'make_list elem_shape must be tuple of const. ' +\ 'Tuple elem {} is not' raise ValueError(msg.format(s_var.name)) if isinstance(s, str): try: symbol = get_existing_symbol(s) except ValueError: # Must be a new symbol symbol = get_new_symbol(s) elem_shape_sym.append(symbol) else: elem_shape_sym.append(s) elem_type = types.tensor(builtin_dtype, elem_shape_sym) return types.list( elem_type, init_length=self.init_length.val, dynamic_length=self.dynamic_length.val, )
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): s0 = get_new_symbol() val = np.array([[1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} input_values = {"x": val} def build(x): return mb.topk(x=x, k=2, axis=-1, ascending=True) expected_output_types = [ (s0, 2, types.fp32), (s0, 2, types.int32), ] expected_outputs = [ np.array([[-3.0, 1.0], [-5.0, 4.0]], dtype=np.float32), np.array([[2, 0], [1, 0]], dtype=np.float32), ] run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, backend=backend, )
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): s0 = get_new_symbol() # Test variadic (rdar://59559656) input_placeholders = { "x": mb.placeholder(shape=(s0, 4, 5, 6)), } def build(x): return [mb.flatten2d(x=x)] input = np.random.rand(10, 4, 5, 6) output = input.reshape(10, -1) expected_output_types = (s0, 120, types.fp32) expected_outputs = [output] input_values = {"x": input} run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def type_inference(self): on_type = self.on_value.dtype off_type = self.off_value.dtype if on_type != off_type: raise TypeError( "Parameters on_value and off_value must have same input types." ) if self.axis.val < -self.indices.rank - 1 or self.axis.val > self.indices.rank: raise IndexError( "Axis value {} is out of bounds for {} node {}".format( self.axis.val, self.op_type, self.name)) indices_shape = list(self.indices.shape) depth_value = self.one_hot_vector_size.sym_val if depth_value is None: depth_value = get_new_symbol() elif depth_value < 0: raise ValueError( "Parameter one_hot_vector_size must be non-negative") retshape = indices_shape if self.axis.val < 0: cut = len(retshape) + self.axis.val + 1 else: cut = self.axis.val retshape = retshape[0:cut] + [depth_value] + retshape[cut:] return types.tensor(on_type, retshape)
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): # TODO: variadic (rdar://59559656) s0 = get_new_symbol() val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} input_values = {"x": val} def build(x): return [ mb.reduce_argmax(x=x, axis=1, keep_dims=True), mb.reduce_argmin(x=x, axis=0, keep_dims=True), ] expected_output_types = [(s0, 1, types.int32), (1, 3, types.int32)] expected_outputs = [ np.array([[2], [2]], dtype=np.int32), np.array([[0], [0], [0]], dtype=np.int32), ] run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): s0 = get_new_symbol() input_placeholders = { "x": mb.placeholder(shape=(2, s0)), } def build(x): return [ mb.expand_dims(x=x, axes=[-1]), mb.expand_dims(x=x, axes=[1]), ] expected_output_types = [ (2, s0, 1, types.fp32), (2, 1, s0, types.fp32), ] expected_outputs = [ np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), ] input_values = { "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), } run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def type_inference(self): in_shape = self.x.shape ret_shape = list(in_shape) pad = self.pad if len(pad.shape) != 1: raise ValueError("Pad should be a 1D tensor!") if self.mode and not self.mode.val in {'constant', 'reflect', 'replicate'}: raise ValueError("Pad mode should be one of {'constant', 'reflect', 'replicate'}") if pad.val is None: for i in range(self.pad.shape[0]//2): ret_shape[-self.pad.shape[0]//2+i] = get_new_symbol() else: pad = pad.val pad = pad.copy() if len(pad) % 2 != 0: raise ValueError("Number of elements in the argument Pad must be divisible by 2.") pad = pad.reshape(-1, 2) if pad.shape[0] > len(ret_shape): raise ValueError("Number of dimensions specified through pad must less than or equal to rank of input x") for i in range(len(pad)): ret_shape[-len(pad) + i] = ret_shape[-len(pad) + i] + pad[i][0] + pad[i][1] return types.tensor(self.x.dtype, tuple(ret_shape))
def test_builder_to_backend_symbolic(self, use_cpu_only, backend): s0 = get_new_symbol() val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} input_values = {"x": val} def build(x): return [ mb.reverse(x=x, axes=[1]), mb.reverse(x=x, axes=[0]), ] expected_output_types = [ (s0, 3, types.fp32), (s0, 3, types.fp32), ] expected_outputs = [ np.array([[3.0, 2.0, 1.0], [6.0, 5.0, 4.0]], dtype=np.float32), np.array([[4.0, 5.0, 6.0], [1.0, 2.0, 3.0]], dtype=np.float32), ] run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, backend=backend, )
def get_compat_shape(type1, type2): """ For tensor types `type1`, `type2` that are of the same rank, return compat_shape (python list) where compat_shape[i] is integer iff type1 and type2 have the same integer shape on dim i. compat_shape[i] is symbolic otherwise. Return None if `type1`, `type2` have different rank or non-tensor type. """ if not types.is_tensor(type1) or not types.is_tensor(type2): return None s1 = type1.get_shape() s2 = type2.get_shape() if len(s1) != len(s2): return None compat_shape = [] for d1, d2 in zip(s1, s2): if d1 != d2: compat_shape.append(get_new_symbol()) else: compat_shape.append(d1) return compat_shape
def type_inference(self): in_shape = self.x.shape ret_shape = list(in_shape) pad = self.pad if len(pad.shape) != 1: raise ValueError("Pad should be a 1D tensor!") if self.mode and not self.mode.val in { 'constant', 'reflect', 'replicate' }: raise ValueError( "Pad mode should be one of {'constant', 'reflect', 'replicate'}" ) if pad.val is None: for i in range(self.pad.shape[0] // 2): ret_shape[-self.pad.shape[0] // 2 + i] = get_new_symbol() else: pad = pad.val pad = pad.copy() pad = pad.reshape(-1, 2) for i in range(len(pad)): ret_shape[-len(pad) + i] = ret_shape[-len(pad) + i] + pad[i][0] + pad[i][1] return types.tensor(self.x.dtype, tuple(ret_shape))
def type_inference(self): concat_dim_len = 0 if len(self.values) == 0: raise ValueError("Concat {} got 0 values".format(self.name)) # Validate values have the same rank rank = self.values[0].rank for v in self.values: if v.rank != rank: msg = "Input {} has rank {} != other inputs rank {}" raise ValueError(msg.format(v.name, v.rank, rank)) # Check concat axis is within (-rank, rank) concat_axis = self.axis.val if concat_axis < 0: concat_axis += rank if rank > 0 and (concat_axis < 0 or concat_axis >= rank): msg = "In {} of op_type {}: axis out of bound for input " + "(rank {})" raise ValueError(msg.format(self.name, self.op_type, rank)) # Validate primitive types are compatible dtype = self.values[0].dtype for v in self.values[1:]: new_dtype = promoted_primitive_type(v.dtype, dtype) if new_dtype is None: msg = "Incompatible primitive types concat: {} vs {}" raise ValueError(msg.format(v.dtype, dtype)) dtype = new_dtype # validate that non-axis dimensions match retshape = list(self.values[0].shape) for v in self.values[1:]: for i in range(rank): if is_symbolic(retshape[i]) or is_symbolic(v.shape[i]): continue if i != concat_axis and retshape[i] != v.shape[i]: msg = 'Dimension mismatch in {} ("{}"): shapes {} vs. {}' raise ValueError( msg.format(self.op_type, self.name, retshape, v.shape) ) # Get length of concat dim concat_dim_len = 0 for v in self.values: if len(v.shape) == 0: taxis = 1 else: taxis = v.shape[concat_axis] if is_symbolic(taxis): concat_dim_len = get_new_symbol() break concat_dim_len += taxis if len(retshape) == 0: retshape = [concat_dim_len] else: retshape[concat_axis] = concat_dim_len return types.tensor(dtype, retshape)
def get_prelu_pattern(): """ x1 = transpose(perm=(0,2,3,1))(x) y = a * relu(-1 * x1) + relu(x1) when x is rank 4, and "a" is of shape (C,) or (1, C) or (1,1,C) or (1,1,1,C), this is equivalent to prelu with alpha = -a.flatten(), followed by a transpose with perm (0,2,3,1) """ @mb.program(input_specs=[mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()])), ]) def prelu_pattern(x): # perm value can be anything, it will be checked in "is_var_constraint_satisifed" method x = mb.transpose(x=x, perm=[0,1,2,3], name="transpose") return _prelu_pattern(x) return prelu_pattern
def _create_placeholder(node): node.parse_from_attr() shape = [] dtype = node.attr["dtype"] if types.is_tensor(node.datatype): shape = node.datatype.get_shape() shape = tuple(get_new_symbol() if s is None or s < 0 else s for s in shape) return mb.placeholder(shape, dtype=dtype)
def __init__(self, shape, default=None): """ The basic shape class to be set in InputType. Attribute: shape: list of (int), symbolic values, RangeDim object The valid shape of the input default: tuple of int or None The default shape that is used for initiating the model, and set in the metadata of the model file. If None, then `shape` would be used. """ from coremltools.converters.mil.mil import get_new_symbol if not isinstance(shape, (list, tuple)): raise ValueError( "Shape should be list or tuple, got type {} instead".format( type(shape))) self.symbolic_shape = [] shape = list(shape) for idx, s in enumerate(shape): if s is None or s == -1 or isinstance(s, RangeDim): sym = get_new_symbol() self.symbolic_shape.append(sym) if s is None or s == -1: shape[idx] = sym elif isinstance(s, (np.generic, six.integer_types)) or is_symbolic(s): self.symbolic_shape.append(s) else: raise ValueError( "Unknown type {} to build symbolic shape.".format(type(s))) self.shape = tuple(shape) if default is not None: if not isinstance(default, (list, tuple)): raise ValueError( "Default shape should be list or tuple, got type {} instead" .format(type(default))) for idx, s in enumerate(default): if not isinstance( s, (np.generic, six.integer_types)) and not is_symbolic(s): raise ValueError( "Default shape invalid, got error at index {} which is {}" .format(idx, s)) else: default = [] for idx, s in enumerate(self.shape): if isinstance(s, RangeDim): default.append(s.default) elif s is None or s == -1: default.append(self.symbolic_shape[idx]) else: default.append(s) self.default = tuple(default)
def type_inference(self): if self.begin.rank != 1: raise ValueError( "begin should be 1-D tensor, got {}-D tensor instead".format( self.begin.rank ) ) if self.size.rank != 1: raise ValueError( "size should be 1-D tensor, got {}-D tensor instead".format( self.size.rank ) ) if self.x.rank != self.begin.shape[0]: raise ValueError( "Length of begin {} doesn't equal to input rank {}.".format( len(self.begin.shape[0]), len(self.x.rank) ) ) if self.x.rank != self.size.shape[0]: raise ValueError( "Length of size {} doesn't equal to input rank {}.".format( len(self.size.shape[0]), len(self.x.rank) ) ) x_shape = self.x.shape ret_shape = [] if self.size.sym_val is None: ret_shape = [get_new_symbol() for _ in range(self.x.rank)] return types.tensor(self.x.dtype, tuple(ret_shape)) for idx, s in enumerate(self.size.sym_val): if is_symbolic(s): ret_shape.append(s) elif s != -1: ret_shape.append(s) elif self.begin.sym_val is not None: ret_shape.append(x_shape[idx] - self.begin.sym_val[idx]) else: ret_shape.append(get_new_symbol()) return types.tensor(self.x.dtype, tuple(ret_shape))
def test_builder_to_backend_smoke(self, use_cpu_only, backend, is_symbolic): x = np.array( [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=np.float32, ).reshape(1, 1, 4, 4) input_shape = list(x.shape) placeholder_input_shape = input_shape if is_symbolic: # set batch and channel dimension symbolic placeholder_input_shape[0] = get_new_symbol() placeholder_input_shape[1] = get_new_symbol() input_placeholder_dict = { "x": mb.placeholder(shape=placeholder_input_shape) } input_value_dict = {"x": x} def build(x): return mb.crop(x=x, crop_height=[0, 1], crop_width=[1, 1]) expected_output_type = ( placeholder_input_shape[0], placeholder_input_shape[1], 3, 2, types.fp32, ) expected_output = (np.array([2, 3, 6, 7, 10, 11], dtype=np.float32).reshape(1, 1, 3, 2), ) run_compare_builder( build, input_placeholder_dict, input_value_dict, expected_output_type, expected_output, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def type_inference(self): if any_symbolic(self.shape.shape): # We can't infer any shape if shape has variable length. return types.tensor(self.x.dtype, (get_new_variadic_symbol(),)) # shape has fixed length here. if self.shape.sym_val is None: shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) return types.tensor(self.x.dtype, shape) t, _ = self._get_type_val() return t
def type_inference(self): if any_symbolic(self.shape.shape): # We can't infer any shape if shape has variable length. return types.tensor(types.fp32, (get_new_variadic_symbol(),)) # shape has fixed length here. if self.shape.sym_val is None: ret_shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) return types.tensor(types.fp32, ret_shape) return types.tensor(self.value.dtype, tuple(self.shape.sym_val.tolist()))
def test_builder_to_backend_smoke(self, use_cpu_only, backend, is_symbolic): x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) input_shape = x.shape if is_symbolic: input_shape = [get_new_symbol(), get_new_symbol()] input_placeholders = {"x": mb.placeholder(shape=input_shape)} input_values = {"x": x} def build(x): return [ mb.transpose(x=x, perm=(0, 1)), mb.transpose(x=x, perm=(1, 0)), mb.transpose(x=x, perm=(-1, 0)), mb.transpose(x=x, perm=(-2, -1)), ] d0 = input_shape[0] d1 = input_shape[1] expected_output_types = [ (d0, d1, types.fp32), (d1, d0, types.fp32), (d1, d0, types.fp32), (d0, d1, types.fp32), ] expected_outputs = [x, x.T, x.T, x] run_compare_builder( build, input_placeholders, input_values, expected_output_types, expected_outputs, use_cpu_only=use_cpu_only, frontend_only=False, backend=backend, )
def _get_type_val(self): x_type = self.x.dtype x_shape = self.x.shape x_vol = np.prod(x_shape) # shape is const, and thus sym_val is not None sym_shape = self.shape.sym_val sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape] ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape) ret_val = None if self.x.val is not None and all(isscalar(a) for a in ret_shape): ret_val = reshape_with_symbol(self.x.val, ret_shape) return types.tensor(x_type, tuple(ret_shape)), ret_val
def value_inference(self): is_all_rank_zero = all([v.rank == 0 for v in self.values]) values = [ v.sym_val if v.sym_val is not None else get_new_symbol() for v in self.values ] if any([is_symbolic(v) for v in values]) and not is_all_rank_zero: return None return np.stack(values, self.axis.val)
def test_invalid_pattern3(self): ''' One of the reduction dim is symbolic ''' @mb.program(input_specs=[mb.TensorSpec(shape=(3, get_new_symbol(), 6))]) def prog(x): x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) x1 = mb.real_div(x=x1, y=30.0) return x1 pass_name = "common::fuse_reduce_mean" PASS_REGISTRY[pass_name](prog) assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"]
def __init__(self, lower_bound=1, upper_bound=-1, default=None, symbol=None): """ A class that can be used to give a range of accepted shapes. Parameters ---------- lower_bound: (int) The minimum valid value for the shape. upper_bound: (int) The maximum valid value for the shape. Set to ``-1`` if there's no upper limit. default: (int) or None The default value that is used for initiating the model, and set in input shape field of the model file. If set to ``None``, ``lower_bound`` would be used as default. symbol: (str) Optional symbol name for the dim. Autogenerate a symbol name if not specified. """ if symbol is None: from coremltools.converters.mil.mil import get_new_symbol self.symbol = get_new_symbol() else: from coremltools.converters.mil.mil import Symbol self.symbol = Symbol(symbol) self.lower_bound = lower_bound self.upper_bound = upper_bound if default is None: self.default = lower_bound else: if default < lower_bound: raise ValueError( "Default value {} is less than minimum value ({}) for range".format( default, lower_bound ) ) if upper_bound > 0 and default > upper_bound: raise ValueError( "Default value {} is greater than maximum value ({}) for range".format( default, upper_bound ) ) self.default = default