def __getitem_(g, self, i): if sym_help._is_tensor_list(self): # SequenceAt requires that the input be a List of Tensors return g.op("SequenceAt", self, i) else: from torch.onnx.symbolic_opset9 import __getitem_ as getitem return getitem(g, self, i)
def _len(g, self): if ( symbolic_helper._is_tensor_list(self) or self.node().kind() == "onnx::SplitToSequence" ): return g.op("SequenceLength", self) sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0]))) return symbolic_helper._squeeze_helper(g, sz_0, [0])
def add(g, self, other, alpha=None): if sym_help._is_value(self) and sym_help._is_tensor_list(self): tensor_list_node = other.node() if tensor_list_node.kind() != "prim::ListConstruct": return _unimplemented("add", "does not support adding dynamic tensor list to another") tensors = sym_help._unpack_list(other) l = self for t in tensors: l = g.op("SequenceInsert", l, t) return l return torch.onnx.symbolic_opset9.add(g, self, other, alpha)
def verify_inferred_shape(graph): # Check every node in graph has type properly assigned. for n in graph.nodes(): for out in n.outputs(): if not _is_tensor_list(out) and not _is_tensor( out) and not _is_none(out): raise RuntimeError( "Output of node is neither type Tensor nor type list of Tensor: ", out) if _is_tensor(out) and out.type().scalarType() is None: raise RuntimeError( "Output of node does not have type assigned", out) if _is_tensor(out) and out.type().dim() is None: raise RuntimeError( "Output of node does not have shape assigned", out)
def _len(g, self): if _is_tensor_list(self) or self.node().kind() == "onnx::SplitToSequence": return g.op("SequenceLength", self) return g.op("Size", self)
def _len(g, self): if _is_tensor_list(self) or self.node().kind() == "onnx::SplitToSequence": return g.op("SequenceLength", self) sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0]))) return g.op('Squeeze', sz_0, axes_i=[0])