def ctc_loss( logits: NodeInput, logit_length: NodeInput, labels: NodeInput, label_length: NodeInput, blank_index: Optional[NodeInput] = None, preprocess_collapse_repeated: bool = False, ctc_merge_repeated: bool = True, unique: bool = False, name: Optional[str] = None, ) -> Node: """Return a node which performs CTCLoss. @param logits: 3-D tensor of logits. @param logit_length: 1-D tensor of lengths for each object from a batch. @param labels: 2-D tensor of labels for which likelihood is estimated using logits. @param label_length: 1-D tensor of length for each label sequence. @param blank_index: Scalar used to mark a blank index. @param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. @param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. @param unique: Flag to find unique elements in a target. @return The new node which performs CTCLoss """ if blank_index is not None: inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) else: inputs = as_nodes(logits, logit_length, labels, label_length) attributes = { "preprocess_collapse_repeated": preprocess_collapse_repeated, "ctc_merge_repeated": ctc_merge_repeated, "unique": unique, } return _get_node_factory_opset4().create("CTCLoss", inputs, attributes)
def ctc_greedy_decoder_seq_len( data: NodeInput, sequence_length: NodeInput, blank_index: Optional[NodeInput] = None, merge_repeated: bool = True, classes_index_type: str = "i32", sequence_length_type: str = "i32", name: Optional[str] = None, ) -> Node: """Return a node which performs CTCGreedyDecoderSeqLen. @param data: The input 3D tensor. Shape: [batch_size, seq_length, num_classes] @param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size] @param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class. Optional parameter. Default value is num_classes-1. @return: The new node which performs CTCGreedyDecoderSeqLen. """ if blank_index is not None: inputs = as_nodes(data, sequence_length, blank_index) else: inputs = as_nodes(data, sequence_length) attributes = { "merge_repeated": merge_repeated, "classes_index_type": classes_index_type, "sequence_length_type": sequence_length_type } return _get_node_factory_opset6().create("CTCGreedyDecoderSeqLen", inputs, attributes)
def deformable_convolution( data: NodeInput, offsets: NodeInput, filters: NodeInput, strides: List[int], pads_begin: List[int], pads_end: List[int], dilations: List[int], mask: Optional[NodeInput] = None, auto_pad: str = "EXPLICIT", group: int = 1, deformable_group: int = 1, bilinear_interpolation_pad: bool = False, name: Optional[str] = None, ) -> Node: """Return a node which performs deformable convolution operation. @param data: The node providing data batch tensor. @param offsets: The node providing offset tensor. @param filters: The node providing filters tensor. @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. @param pads_begin: The number of pixels to add to the beginning along each axis. @param pads_end: The number of pixels to add to the end along each axis. @param dilations: The distance in width and height between elements (weights) in the filter. @param mask: The node providing modulation scalar (mask) tensor. @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. @param group: The number of groups which both output and input should be split into. @param deformable_group: The number of groups which deformable values and output should be split into along the channel axis. @param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation execution. @param name: The optional new name for output node. @return New node performing deformable convolution operation. """ if mask is None: inputs = as_nodes(data, offsets, filters) else: inputs = as_nodes(data, offsets, filters, mask) return _get_node_factory_opset8().create( "DeformableConvolution", inputs, { "strides": strides, "pads_begin": pads_begin, "pads_end": pads_end, "dilations": dilations, "auto_pad": auto_pad, "group": group, "deformable_group": deformable_group, "bilinear_interpolation_pad": bilinear_interpolation_pad }, )
def non_max_suppression( boxes: NodeInput, scores: NodeInput, max_output_boxes_per_class: Optional[NodeInput] = None, iou_threshold: Optional[NodeInput] = None, score_threshold: Optional[NodeInput] = None, soft_nms_sigma: Optional[NodeInput] = None, box_encoding: str = "corner", sort_result_descending: bool = True, output_type: str = "i64", name: Optional[str] = None, ) -> Node: """Return a node which performs NonMaxSuppression. @param boxes: Tensor with box coordinates. @param scores: Tensor with box scores. @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes to be selected per class. @param iou_threshold: Tensor specifying intersection over union threshold @param score_threshold: Tensor specifying minimum score to consider box for the processing. @param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. @param box_encoding: Format of boxes data encoding. @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected boxes across batches or not. @param output_type: Output element type. @return: The new node which performs NonMaxSuppression """ if max_output_boxes_per_class is None: max_output_boxes_per_class = make_constant_node(0, np.int64) if iou_threshold is None: iou_threshold = make_constant_node(0, np.float32) if score_threshold is None: score_threshold = make_constant_node(0, np.float32) if soft_nms_sigma is None: inputs = as_nodes( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold ) else: inputs = as_nodes( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma ) attributes = { "box_encoding": box_encoding, "sort_result_descending": sort_result_descending, "output_type": output_type, } return _get_node_factory_opset5().create("NonMaxSuppression", inputs, attributes)
def roi_align( data: NodeInput, rois: NodeInput, batch_indices: NodeInput, pooled_h: int, pooled_w: int, sampling_ratio: int, spatial_scale: float, mode: str, name: Optional[str] = None, ) -> Node: """Return a node which performs ROIAlign. @param data: Input data. @param rois: RoIs (Regions of Interest) to pool over. @param batch_indices: Tensor with each element denoting the index of the corresponding image in the batch. @param pooled_h: Height of the ROI output feature map. @param pooled_w: Width of the ROI output feature map. @param sampling_ratio: Number of bins over height and width to use to calculate each output feature map element. @param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. @param mode: Method to perform pooling to produce output feature map elements. @return The new node which performs ROIAlign """ inputs = as_nodes(data, rois, batch_indices) attributes = { "pooled_h": pooled_h, "pooled_w": pooled_w, "sampling_ratio": sampling_ratio, "spatial_scale": spatial_scale, "mode": mode, } return _get_node_factory_opset3().create("ROIAlign", inputs, attributes)
def scatter_elements_update( data: NodeInput, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None, ) -> Node: """Return a node which produces a ScatterElementsUpdate operation. @param data: The input tensor to be updated. @param indices: The tensor with indexes which will be updated. @param updates: The tensor with update values. @param axis: The axis for scatter. @return ScatterElementsUpdate node ScatterElementsUpdate creates a copy of the first input tensor with updated elements specified with second and third input tensors. For each entry in `updates`, the target index in `data` is obtained by combining the corresponding entry in `indices` with the index of the entry itself: the index-value for dimension equal to `axis` is obtained from the value of the corresponding entry in `indices` and the index-value for dimension not equal to `axis` is obtained from the index of the entry itself. """ return _get_node_factory_opset3().create( "ScatterElementsUpdate", as_nodes(data, indices, updates, axis))
def mvn( data: Node, axes: Node, normalize_variance: bool, eps: float, eps_mode: str, name: Optional[str] = None, ) -> Node: """Return a node which performs MeanVarianceNormalization (MVN). @param data: The node with data tensor. @param axes: The node with axes to reduce on. @param normalize_variance: Denotes whether to perform variance normalization. @param eps: The number added to the variance to avoid division by zero when normalizing the value. Scalar value. @param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`) @param name: Optional output node name. @return The new node performing a MVN operation on input tensor. """ inputs = as_nodes(data, axes) attributes = { "normalize_variance": normalize_variance, "eps": eps, "eps_mode": eps_mode } return _get_node_factory_opset6().create("MVN", inputs, attributes)
def topk( data: NodeInput, k: NodeInput, axis: int, mode: str, sort: str, index_element_type: str = "i32", name: Optional[str] = None, ) -> Node: """Return a node which performs TopK. @param data: Input data. @param k: K. @param axis: TopK Axis. @param mode: Compute TopK largest ('max') or smallest ('min') @param sort: Order of output elements (sort by: 'none', 'index' or 'value') @param index_element_type: Type of output tensor with indices. @return The new node which performs TopK (both indices and values) """ return _get_node_factory_opset3().create( "TopK", as_nodes(data, k), { "axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type }, )
def random_uniform(output_shape: NodeInput, min_val: NodeInput, max_val: NodeInput, output_type: str, global_seed: int = 0, op_seed: int = 0) -> Node: """Return a node which generates sequence of random values from uniform distribution. @param output_shape: Tensor with shape of the output tensor. @param min_val: Tensor with the lower bound on the range of random values to generate. @param max_val: Tensor with the upper bound on the range of random values to generate. @param output_type: Specifies the output tensor type, possible values: 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. @param global_seed: Specifies global seed value. Required to be a positive integer or 0. @param op_seed: Specifies operational seed value. Required to be a positive integer or 0. @return The new node which performs generation of random values from uniform distribution. """ inputs = as_nodes(output_shape, min_val, max_val) if global_seed < 0: raise RuntimeError( "global_seed should be positive or 0. Got: {}".format(global_seed)) if op_seed < 0: raise RuntimeError( "op_seed should be positive or 0. Got: {}".format(op_seed)) attributes = { "output_type": output_type, "global_seed": global_seed, "op_seed": op_seed, } return _get_node_factory_opset8().create("RandomUniform", inputs, attributes)
def hswish(data: NodeInput, name: Optional[str] = None,) -> Node: """Return a node which performs HSwish (hard version of Swish). @param data: Tensor with input data floating point type. @return The new node which performs HSwish """ return _get_node_factory_opset4().create("HSwish", as_nodes(data), {})
def softplus(data: NodeInput, name: Optional[str] = None) -> Node: """Apply SoftPlus operation on each element of input tensor. @param data: The tensor providing input data. @return The new node with SoftPlus operation applied on each element. """ return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {})
def hsigmoid(data: NodeInput, name: Optional[str] = None,) -> Node: """Return a node which performs HSigmoid. @param data: Tensor with input data floating point type. @return: The new node which performs HSigmoid """ return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {})
def matrix_nms( boxes: NodeInput, scores: NodeInput, sort_result_type: str = "none", sort_result_across_batch: bool = False, output_type: str = "i64", score_threshold: float = 0.0, nms_top_k: int = -1, keep_top_k: int = -1, background_class: int = -1, decay_function: str = "linear", gaussian_sigma: float = 2.0, post_threshold: float = 0.0, normalized: bool = True ) -> Node: """Return a node which performs MatrixNms. @param boxes: Tensor with box coordinates. @param scores: Tensor with box scores. @param sort_result_type: Specifies order of output elements, possible values: 'class': sort selected boxes by class id (ascending) 'score': sort selected boxes by score (descending) 'none': do not guarantee the order. @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes across batches or not @param output_type: Specifies the output tensor type, possible values: 'i64', 'i32' @param score_threshold: Specifies minimum score to consider box for the processing @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning to keep all boxes @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 meaning to keep all boxes @param background_class: Specifies the background class id, -1 meaning to keep all classes @param decay_function: Specifies decay function used to decay scores, possible values: 'gaussian', 'linear' @param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function @param post_threshold: Specifies threshold to filter out boxes with low confidence score after decaying @param normalized: Specifies whether boxes are normalized or not @return: The new node which performs MatrixNms """ inputs = as_nodes(boxes, scores) attributes = { "sort_result_type": sort_result_type, "sort_result_across_batch": sort_result_across_batch, "output_type": output_type, "score_threshold": score_threshold, "nms_top_k": nms_top_k, "keep_top_k": keep_top_k, "background_class": background_class, "decay_function": decay_function, "gaussian_sigma": gaussian_sigma, "post_threshold": post_threshold, "normalized": normalized } return _get_node_factory_opset8().create("MatrixNms", inputs, attributes)
def idft( data: NodeInput, axes: NodeInput, signal_size: Optional[NodeInput] = None, ) -> Node: """Return a node which performs IDFT operation. @param data: Tensor with transformed data. @param axes: Tensor with axes to transform. @param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. @return: The new node which performs IDFT operation on the input data tensor. """ if signal_size is None: inputs = as_nodes(data, axes) else: inputs = as_nodes(data, axes, signal_size) return _get_node_factory_opset7().create("IDFT", inputs)
def adaptive_avg_pool(data: NodeInput, output_shape: NodeInput) -> Node: """Return a node which performs AdaptiveAvgPool operation. @param data: The list of input nodes @param output_shape: the shape of spatial dimentions after operation @return: The new node performing AdaptiveAvgPool operation on the data """ inputs = as_nodes(data, output_shape) return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs)
def rnn_sequence( X: NodeInput, initial_hidden_state: NodeInput, sequence_lengths: NodeInput, W: NodeInput, R: NodeInput, B: NodeInput, hidden_size: int, direction: str, activations: List[str] = None, activations_alpha: List[float] = None, activations_beta: List[float] = None, clip: float = 0.0, name: Optional[str] = None, ) -> Node: """Return a node which performs RNNSequence operation. @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. @param initial_hidden_state: The hidden state tensor. Shape: [batch_size, num_directions, hidden_size]. @param sequence_lengths: Specifies real sequence lengths for each batch element. Shape: [batch_size]. Integer type. @param W: Tensor with weights for matrix multiplication operation with input portion of data. Shape: [num_directions, hidden_size, input_size]. @param R: The tensor with weights for matrix multiplication operation with hidden state. Shape: [num_directions, hidden_size, hidden_size]. @param B: The sum of biases (weight and recurrence). Shape: [num_directions, hidden_size]. @param hidden_size: Specifies hidden state size. @param direction: Specifies if the RNN is forward, reverse, or bidirectional. @param activations: The list of three activation functions for gates. @param activations_alpha: The list of alpha parameters for activation functions. @param activations_beta: The list of beta parameters for activation functions. @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. @param name: An optional name of the output node. @return: The new node represents RNNSequence. Node outputs count: 2. """ if activations is None: activations = ["tanh"] if activations_alpha is None: activations_alpha = [] if activations_beta is None: activations_beta = [] inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) attributes = { "hidden_size": hidden_size, "direction": direction.lower(), "activations": activations, "activations_alpha": activations_alpha, "activations_beta": activations_beta, "clip": clip, } return _get_node_factory_opset5().create("RNNSequence", inputs, attributes)
def rnn_cell( X: NodeInput, initial_hidden_state: NodeInput, W: NodeInput, R: NodeInput, B: NodeInput, hidden_size: int, activations: List[str], activations_alpha: List[float], activations_beta: List[float], clip: float = 0.0, name: Optional[str] = None, ) -> Node: """Perform RNNCell operation on tensor from input node. It follows notation and equations defined as in ONNX standard: https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN Note this class represents only single *cell* and not whole RNN *layer*. @param X: The input tensor with shape: [batch_size, input_size]. @param initial_hidden_state: The hidden state tensor at current time step with shape: [batch_size, hidden_size]. @param W: The weight tensor with shape: [hidden_size, input_size]. @param R: The recurrence weight tensor with shape: [hidden_size, hidden_size]. @param B: The sum of biases (weight and recurrence) with shape: [hidden_size]. @param hidden_size: The number of hidden units for recurrent cell. Specifies hidden state size. @param activations: The vector of activation functions used inside recurrent cell. @param activation_alpha: The vector of alpha parameters for activation functions in order respective to activation list. @param activation_beta: The vector of beta parameters for activation functions in order respective to activation list. @param clip: The value defining clipping range [-clip, clip] on input of activation functions. @param name: Optional output node name. @return The new node performing a RNNCell operation on tensor from input node. """ if activations is None: activations = ["tanh"] if activations_alpha is None: activations_alpha = [] if activations_beta is None: activations_beta = [] input_nodes = as_nodes(X, initial_hidden_state, W, R, B) attributes = { "hidden_size": hidden_size, "activations": activations, "activations_alpha": activations_alpha, "activations_beta": activations_beta, "clip": clip, } return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes)
def loop( trip_count: NodeInput, execution_condition: NodeInput, inputs: List[Node], graph_body: GraphBody, slice_input_desc: List[TensorIteratorSliceInputDesc], merged_input_desc: List[TensorIteratorMergedInputDesc], invariant_input_desc: List[TensorIteratorInvariantInputDesc], body_output_desc: List[TensorIteratorBodyOutputDesc], concat_output_desc: List[TensorIteratorConcatOutputDesc], body_condition_output_idx: int, current_iteration_input_idx: int = -1, name: Optional[str] = None, ) -> Node: """Perform recurrent execution of the network described in the body, iterating through the data. @param trip_count: A scalar or 1D tensor with 1 element specifying maximum number of iterations. @param execution_condition: A scalar or 1D tensor with 1 element specifying whether to execute the first iteration or not. @param inputs: The provided to TensorIterator operator. @param graph_body: The graph representing the body we execute. @param slice_input_desc: The descriptors describing sliced inputs, that is nodes representing tensors we iterate through, processing single data slice in one iteration. @param merged_input_desc: The descriptors describing merged inputs, that is nodes representing variables with initial value at first iteration, which may be changing through iterations. @param invariant_input_desc: The descriptors describing invariant inputs, that is nodes representing variable with persistent value through all iterations. @param body_output_desc: The descriptors describing body outputs from specified iteration. @param concat_output_desc: The descriptors describing specified output values through all the iterations concatenated into one node. @param body_condition_output_idx: Determines the purpose of the corresponding result in the graph_body. This result will determine the dynamic exit condition. If the value of this result is False, then iterations stop. @param current_iteration_input_idx: Determines the purpose of the corresponding parameter in the graph_body. This parameter will be used as an iteration counter. Optional. @return: The new node which performs Loop. """ attributes = { "body": graph_body.serialize(), "input_descriptions": {"slice_input_desc": [desc.serialize() for desc in slice_input_desc], "merged_input_desc": [desc.serialize() for desc in merged_input_desc], "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc]}, "output_descriptions": {"body_output_desc": [desc.serialize() for desc in body_output_desc], "concat_output_desc": [desc.serialize() for desc in concat_output_desc]}, "special_body_ports": {"body_condition_output_idx": body_condition_output_idx, "current_iteration_input_idx": current_iteration_input_idx} } return _get_node_factory_opset5().create("Loop", as_nodes(trip_count, execution_condition, *inputs), attributes)
def slice(data: NodeInput, start: NodeInput, stop: NodeInput, step: NodeInput, axes: NodeInput = None) -> Node: """Return a node which generates Slice operation. @param data: The node providing input data. @param start: The node providing start indices (inclusively). @param stop: The node providing stop indices (exclusively). @param step: The node providing step values. @param axes: The optional node providing axes to slice, default [0, 1, ..., len(start)-1]. """ if axes is None: inputs = as_nodes(data, start, stop, step) else: inputs = as_nodes(data, start, stop, step, axes) return _get_node_factory_opset8().create("Slice", inputs)
def einsum(inputs: List[Node], equation: str) -> Node: """Return a node which performs Einsum operation. @param inputs: The list of input nodes @param equation: Einsum equation @return: The new node performing Einsum operation on the inputs """ attributes = {"equation": equation} return _get_node_factory_opset7().create("Einsum", as_nodes(*inputs), attributes)
def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = None) -> Node: """Apply Round operation on each element of input tensor. @param data: The tensor providing input data. @param mode: Rule to round halfway cases. If set to 'half_to_even' then halfs round to the nearest even integer or rounding in such a way that the result heads away from zero if `mode` attribute is 'half_away_from_zero`. @param name: An optional name of the output node. @return: The new node with Round operation applied on each element. """ return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()})
def multiclass_nms( boxes: NodeInput, scores: NodeInput, sort_result_type: str = "none", sort_result_across_batch: bool = False, output_type: str = "i64", iou_threshold: float = 0.0, score_threshold: float = 0.0, nms_top_k: int = -1, keep_top_k: int = -1, background_class: int = -1, nms_eta: float = 1.0, normalized: bool = True ) -> Node: """Return a node which performs MulticlassNms. @param boxes: Tensor with box coordinates. @param scores: Tensor with box scores. @param sort_result_type: Specifies order of output elements, possible values: 'class': sort selected boxes by class id (ascending) 'score': sort selected boxes by score (descending) 'none': do not guarantee the order. @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes across batches or not @param output_type: Specifies the output tensor type, possible values: 'i64', 'i32' @param iou_threshold: Specifies intersection over union threshold @param score_threshold: Specifies minimum score to consider box for the processing @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning to keep all boxes @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 meaning to keep all boxes @param background_class: Specifies the background class id, -1 meaning to keep all classes @param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] @param normalized: Specifies whether boxes are normalized or not @return: The new node which performs MuticlassNms """ inputs = as_nodes(boxes, scores) attributes = { "sort_result_type": sort_result_type, "sort_result_across_batch": sort_result_across_batch, "output_type": output_type, "iou_threshold": iou_threshold, "score_threshold": score_threshold, "nms_top_k": nms_top_k, "keep_top_k": keep_top_k, "background_class": background_class, "nms_eta": nms_eta, "normalized": normalized } return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes)
def swish( data: NodeInput, beta: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). @param data: Tensor with input data floating point type. @return The new node which performs Swish """ if beta is None: beta = make_constant_node(1.0, np.float32) return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {})
def reduce_l2( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: """L2-reduction operation on input tensor, eliminating the specified reduction axes. @param node: The tensor we want to mean-reduce. @param reduction_axes: The axes to eliminate through mean operation. @param keep_dims: If set to True it holds axes that are used for reduction @param name: Optional name for output node. @return The new node performing mean-reduction operation. """ return _get_node_factory_opset4().create( "ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} )
def roll( data: NodeInput, shift: NodeInput, axes: NodeInput, ) -> Node: """Return a node which performs Roll operation. @param data: The node with data tensor. @param shift: The node with the tensor with numbers of places by which elements are shifted. @param axes: The node with the tensor with axes along which elements are shifted. @return The new node performing a Roll operation on the input tensor. """ inputs = as_nodes(data, shift, axes) return _get_node_factory_opset7().create("Roll", inputs)
def lstm_cell( X: NodeInput, initial_hidden_state: NodeInput, initial_cell_state: NodeInput, W: NodeInput, R: NodeInput, B: NodeInput, hidden_size: int, activations: List[str] = None, activations_alpha: List[float] = None, activations_beta: List[float] = None, clip: float = 0.0, name: Optional[str] = None, ) -> Node: """Return a node which performs LSTMCell operation. @param X: The input tensor with shape: [batch_size, input_size]. @param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. @param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. @param W: The weight tensor with shape: [4*hidden_size, input_size]. @param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. @param B: The bias tensor for gates with shape: [4*hidden_size]. @param hidden_size: Specifies hidden state size. @param activations: The list of three activation functions for gates. @param activations_alpha: The list of alpha parameters for activation functions. @param activations_beta: The list of beta parameters for activation functions. @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. @param name: An optional name of the output node. @return The new node represents LSTMCell. Node outputs count: 2. """ if activations is None: activations = ["sigmoid", "tanh", "tanh"] if activations_alpha is None: activations_alpha = [] if activations_beta is None: activations_beta = [] node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) attributes = { "hidden_size": hidden_size, "activations": activations, "activations_alpha": activations_alpha, "activations_beta": activations_beta, "clip": clip, } return _get_node_factory_opset4().create("LSTMCell", node_inputs, attributes)
def scatter_update(data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None) -> Node: """Return a node which produces a ScatterUpdate operation. ScatterUpdate sets new values to slices from data addressed by indices. @param data: The input tensor to be updated. @param indices: The tensor with indexes which will be updated. @param updates: The tensor with update values. @param axis: The axis at which elements will be updated. @return ScatterUpdate node """ return _get_node_factory_opset3().create( "ScatterUpdate", as_nodes(data, indices, updates, axis))
def gelu( data: Node, approximation_mode: str, name: Optional[str] = None, ) -> Node: """Return a node which performs Gelu activation function. @param data: The node with data tensor. @param approximation_mode: defines which approximation to use ('tanh' or 'erf') @param name: Optional output node name. @return The new node performing a Gelu activation with the input tensor. """ inputs = as_nodes(data) attributes = {"approximation_mode": approximation_mode} return _get_node_factory_opset7().create("Gelu", inputs, attributes)
def gather( data: NodeInput, indices: NodeInput, axis: NodeInput, batch_dims: Optional[int] = 0, ) -> Node: """Return a node which performs Gather. @param data: N-D tensor with data for gathering @param indices: N-D tensor with indices by which data is gathered @param axis: axis along which elements are gathered @param batch_dims: number of batch dimensions @return: The new node which performs Gather """ inputs = as_nodes(data, indices, axis) attributes = {"batch_dims": batch_dims} return _get_node_factory_opset7().create("Gather", inputs, attributes)
def gather_nd( data: NodeInput, indices: NodeInput, batch_dims: Optional[int] = 0, name: Optional[str] = None, ) -> Node: """Return a node which performs GatherND. @param data: N-D tensor with data for gathering @param indices: K-D tensor of tuples with indices by which data is gathered @param batch_dims: Scalar value of batch dimensions @return: The new node which performs GatherND """ inputs = as_nodes(data, indices) attributes = {"batch_dims": batch_dims} return _get_node_factory_opset8().create("GatherND", inputs, attributes)