def scatter_add(g, self, dim, index, src): if symbolic_helper.is_caffe2_aten_fallback(): return g.at("scatter", self, dim, index, src, overload_name="src") src_type = src.type().scalarType() src_sizes = symbolic_helper._get_tensor_sizes(src) index_sizes = symbolic_helper._get_tensor_sizes(index) if src_sizes != index_sizes: return symbolic_helper._unimplemented( "scatter_add", f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})", ) src = symbolic_helper._maybe_get_scalar(src) if symbolic_helper._is_value(src): return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add") else: # Check if scalar "src" has same type as self (PyTorch allows different # type for scalar src (but not when src is tensor)). If not, insert Cast node. if self.type().scalarType() != src_type: src = g.op( "Cast", src, to_i=symbolic_helper.cast_pytorch_to_onnx[self.type().scalarType()], ) return g.op( "ScatterElements", self, index, src, axis_i=dim, reduction_s="add", )
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): output = g.op( "org.pytorch.aten::ATen", weight, indices, padding_idx, scale_grad_by_freq, sparse, operator_s="embedding" ) indices_shape = _get_tensor_sizes(indices) if indices_shape is not None and hasattr(weight.type(), "with_sizes"): output_type = weight.type().with_sizes(indices_shape + [_get_tensor_dim_size(weight, 1)]) output.setType(output_type) return output
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): output = g.op("com.microsoft::ATenOp", weight, indices, padding_idx, scale_grad_by_freq, sparse, name_s='aten::embedding') indices_shape = _get_tensor_sizes(indices) if indices_shape is not None and hasattr(weight.type(), 'with_sizes'): output_type = weight.type().with_sizes( indices_shape + [_get_tensor_dim_size(weight, 1)]) output.setType(output_type) return output
def prelu(g, self, weight): self_rank = symbolic_helper._get_tensor_rank(self) weight_sizes = symbolic_helper._get_tensor_sizes(weight) if self_rank is not None and self_rank > 2: weight = g.op("Unsqueeze", weight, axes_i=list(range(1, self_rank - 1))) elif self_rank == 0 and weight_sizes == [1]: # self and weight are both scalar but weight has rank == 1, squeeze weight. weight = symbolic_helper._squeeze_helper(g, weight, [0]) if symbolic_helper._try_get_scalar_type(self): old_type, self, weight = _try_cast_integer_to_float(g, self, weight) return _cast_to_type(g, g.op("PRelu", self, weight), old_type) else: return g.op("PRelu", self, weight)
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): custom_attributes_json = ( '{' f'"padding_idx":{str(padding_idx)},' f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},' f'"sparse":{str(sparse).lower()}' '}' ) output = g.op("com.microsoft::ATenOp", weight, indices, name_s='aten::embedding', custom_attributes_json_s=custom_attributes_json) # do shape inference and set it via setType indices_shape = _get_tensor_sizes(indices) if indices_shape is not None and hasattr(weight.type(), 'with_sizes'): output_type = weight.type().with_sizes(indices_shape + [_get_tensor_dim_size(weight, 1)]) output.setType(output_type) return output
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): custom_attributes_json = ( "{" f'"padding_idx":{str(padding_idx)},' f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},' f'"sparse":{str(sparse).lower()}' "}") output = g.at( "embedding", weight, indices, custom_attributes_json_s=custom_attributes_json, ) # do shape inference and set it via setType indices_shape = _get_tensor_sizes(indices) if indices_shape is not None and hasattr(weight.type(), "with_sizes"): output_type = weight.type().with_sizes( indices_shape + [_get_tensor_dim_size(weight, 1)]) output.setType(output_type) return output
def repeat_interleave(g, self, repeats, dim=None): from torch.onnx.symbolic_opset9 import reshape input = self final_dim = dim # if dim is None flatten # By default, use the flattened input array, and return a flat output array if sym_help._is_none(dim): input = reshape(g, self, g.op("Constant", value_t=torch.tensor([-1]))) dim = 0 else: dim = sym_help._maybe_get_scalar(dim) repeats_dim = sym_help._get_tensor_rank(repeats) repeats_sizes = sym_help._get_tensor_sizes(repeats) input_sizes = sym_help._get_tensor_sizes(input) if repeats_dim is None: raise RuntimeError( 'Unsupported: ONNX export of repeat_interleave for unknown ' 'repeats rank.') if repeats_sizes is None: raise RuntimeError( 'Unsupported: ONNX export of repeat_interleave for unknown ' 'repeats size.') if input_sizes is None: raise RuntimeError( 'Unsupported: ONNX export of repeat_interleave for unknown ' 'input size.') # Handle cases where dim is negative if dim < 0: dim += len(input_sizes) output_sizes = input_sizes.copy() perm_i = [0] for idx, input_size in enumerate(input_sizes): perm_i.append(idx + 1) if input_size is None: output_sizes[idx], input_sizes[idx] = 0, -1 perm_i[0], perm_i[dim] = perm_i[dim], perm_i[0] # Cases when repeats is a single value tensor and dim has unknown input size if (repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1)) and output_sizes[dim] == 0: if not sym_help._is_tensor(repeats): repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) reps = sym_help._size_helper(g, input, dim) reps = unsqueeze(g, reps, 0) repeats = g.op("Expand", repeats, reps) # There are cases when the repeats are 1-d tensor with multiple repeats, but dim # provided along one of the dynamic axes provided. A simple example would be # input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2 # Now, repeat interleaving can be performed in pytorch when the value of * matches # with the number of elements in repeat, for example if * -> 2, number of repeats # should be 2 as well. else: return torch.onnx.symbolic_opset9.repeat_interleave( g, self, repeats, final_dim) reps_like = g.op("ConstantOfShape", g.op("Shape", repeats), value_t=torch.tensor([1], dtype=torch.long)) r_splits = split(g, repeats, reps_like, 0) i_splits = split(g, input, reps_like, dim) output_sizes[dim], input_sizes[dim] = -1, 1 # Create a loop to iterate over each value along the dimension # and perform individual interleaving using the repeats tensor # Loop is of the following pattern # input (trip_count, cond) # int trip_count = ...; # bool cond = ...; # for (int i=0; i < trip_count && cond; ++i) { # cond = ...; # } # Loop conditions loop_condition = g.op("Constant", value_t=torch.tensor(1)) loop_condition = g.op("Cast", loop_condition, to_i=9) loop_len = reps loop = g.op("Loop", loop_len, loop_condition) # Loop inputs loop_block = _add_block(loop.node()) block_input_iter = _add_input_to_block(loop_block) cond = _add_input_to_block(loop_block) r_split = loop_block.op("SequenceAt", r_splits, block_input_iter) i_split = loop_block.op("SequenceAt", i_splits, block_input_iter) i_split = unsqueeze(loop_block, i_split, dim + 1) r_concat = [ loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[:dim + 1])), r_split, loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1:])) ] r_concat = loop_block.op("Concat", *r_concat, axis_i=0) i_split = expand(loop_block, i_split, r_concat, None) i_split = reshape(loop_block, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes))) # Loop outputs cond_out = loop_block.op("Cast", loop_condition, to_i=9) _add_output_to_block(loop_block, cond_out) _add_output_to_block(loop_block, i_split) loop_out = loop.node().output() # In this loop, the outputs are scan outputs and are concatenated along # the zero'th dimension (by default). In order to avoid this and concatenate # along the dimension provided, some post-processing is required loop_out = g.op("Transpose", loop_out, perm_i=perm_i) return reshape(g, loop_out, g.op("Constant", value_t=torch.LongTensor(output_sizes)))
def repeat_interleave(g, self, repeats, dim=None, output_size=None): input = self final_dim = dim # if dim is None flatten # By default, use the flattened input array, and return a flat output array if sym_help._is_none(dim): input = sym_help._reshape_helper( g, self, g.op("Constant", value_t=torch.tensor([-1]))) dim = 0 else: dim = sym_help._maybe_get_scalar(dim) repeats_dim = sym_help._get_tensor_rank(repeats) repeats_sizes = sym_help._get_tensor_sizes(repeats) input_sizes = sym_help._get_tensor_sizes(input) if repeats_dim is None: raise RuntimeError( "Unsupported: ONNX export of repeat_interleave for unknown " "repeats rank.") if repeats_sizes is None: raise RuntimeError( "Unsupported: ONNX export of repeat_interleave for unknown " "repeats size.") if input_sizes is None: raise RuntimeError( "Unsupported: ONNX export of repeat_interleave for unknown " "input size.") # Handle cases where dim is negative if dim < 0: dim += len(input_sizes) output_sizes = input_sizes.copy() for idx, input_size in enumerate(input_sizes): if input_size is None: output_sizes[idx], input_sizes[idx] = 0, -1 print(output_sizes, input_sizes) cond_dynamic_repeats = (repeats_dim == 1 and repeats_sizes[0] is None) # If input size is dynamic or repeats vector is dynamic if output_sizes[dim] == 0 or cond_dynamic_repeats: reps = sym_help._size_helper(g, input, dim) reps = unsqueeze(g, reps, 0) # Check if repeats vector is a single integer value # or a single dimension tensor with non-dynamic values if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): if not sym_help._is_tensor(repeats): repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) repeats = g.op("Expand", repeats, reps) # Check if repeats is dynamic # As repeats is dynamic, we use a where node as a substitute for the if statement # If repests_dim = 1, expand repeats otherwise use original tensor elif cond_dynamic_repeats: repeat_dim = sym_help._size_helper( g, repeats, g.op("Constant", value_t=torch.LongTensor([0]))) repeat_cond = g.op("Equal", repeat_dim, g.op("Constant", value_t=torch.LongTensor([1]))) repeats = where(g, repeat_cond, g.op("Expand", repeats, reps), repeats) # There are cases when the repeats are 1-d tensor with multiple repeats, but dim # provided along one of the dynamic axes provided. A simple example would be # input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2 # Now, repeat interleaving can be performed in pytorch when the value of * matches # with the number of elements in repeat, for example if * -> 2, number of repeats # should be 2 as well. else: return torch.onnx.symbolic_opset9.repeat_interleave( g, self, repeats, final_dim) reps_like = g.op("ConstantOfShape", g.op("Shape", repeats), value_t=torch.tensor([1], dtype=torch.long)) r_splits = split(g, repeats, reps_like, 0) i_splits = split(g, input, reps_like, dim) output_sizes[dim], input_sizes[dim] = -1, 1 # Create a loop to iterate over each value along the dimension # and perform individual interleaving using the repeats tensor # Loop is of the following pattern # input (trip_count, cond) # int trip_count = ...; # bool cond = ...; # for (int i=0; i < trip_count && cond; ++i) { # cond = ...; # } # Loop conditions loop_condition = g.op("Constant", value_t=torch.tensor(1)) loop_condition = g.op("Cast", loop_condition, to_i=9) loop_len = reps # Create an empty sequence to store final expansions final_splits = g.op("SequenceEmpty") loop = g.op("Loop", loop_len, loop_condition, final_splits) # Loop inputs loop_block = _add_block(loop.node()) block_input_iter = _add_input_to_block(loop_block) cond = _add_input_to_block(loop_block) final_splits = _add_input_to_block(loop_block) r_split = loop_block.op("SequenceAt", r_splits, block_input_iter) i_split = loop_block.op("SequenceAt", i_splits, block_input_iter) i_split = unsqueeze(loop_block, i_split, dim + 1) r_concat = [ loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[:dim + 1])), r_split, loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1:])) ] r_concat = loop_block.op("Concat", *r_concat, axis_i=0) i_split = expand(loop_block, i_split, r_concat, None) i_split = sym_help._reshape_helper( loop_block, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes))) final_splits = loop_block.op("SequenceInsert", final_splits, i_split) # Loop outputs cond_out = loop_block.op("Cast", loop_condition, to_i=9) _add_output_to_block(loop_block, cond_out) _add_output_to_block(loop_block, final_splits) loop_out = loop.node().output() loop_out = g.op("ConcatFromSequence", loop_out, axis_i=dim) return loop_out