Пример #1
0
def Selu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    # f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, f(x) = gamma * x for x > 0
    x = ng_inputs[0]
    alpha = onnx_node.get_attribute_value('alpha', 1.6732)
    gamma = onnx_node.get_attribute_value('gamma', 1.0507)

    return gamma * (ng.maximum(x, 0) + alpha *
                    (ng.exp(-ng.maximum(-x, 0)) - 1))
Пример #2
0
def Selu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the scaled exponential linear unit function to the input tensor elementwise.

    f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, f(x) = gamma * x for x > 0
    """
    x = ng_inputs[0]
    alpha = onnx_node.get_attribute_value('alpha', 1.6732)
    gamma = onnx_node.get_attribute_value('gamma', 1.0507)

    return (gamma * (ng.maximum(x, 0) + alpha * (ng.exp(ng.negative(ng.maximum(ng.negative(x), 0))) - 1)))
Пример #3
0
def Elu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    # f(x) = alpha * (exp(x) - 1.) for x < 0, f(x) = x for x >= 0
    x = ng_inputs[0]
    alpha = onnx_node.get_attribute_value('alpha', 1)

    if not alpha < 0:
        logger.warning(
            'Elu node (%s): alpha value should be positive, but is: %s',
            onnx_node.name, alpha)

    return ng.maximum(x, 0) + alpha * (ng.exp(-ng.maximum(-x, 0)) - 1)
Пример #4
0
def Elu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the exponential linear unit function to the input tensor elementwise.

    f(x) = alpha * (exp(x) - 1.) for x < 0, f(x) = x for x >= 0
    """
    x = ng_inputs[0]
    alpha = onnx_node.get_attribute_value('alpha', 1)

    if not alpha < 0:
        logger.warning('Elu node (%s): alpha value should be positive, but is: %s',
                       onnx_node.name, alpha)

    return (ng.maximum(x, 0) + alpha * (ng.exp(ng.negative(ng.maximum(ng.negative(x), 0))) - 1))
Пример #5
0
def PRelu(onnx_node,
          ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the Parametric Relu function to the input tensor elementwise.

    f(x) = slope * x for x < 0, f(x) = x for x >= 0
    The slope parameter is passed to the node as its second input.
    """
    x, slope = ng_inputs
    if len(slope.shape) == 0:
        return ng.maximum(slope * x, x)
    elif slope.shape[0] == 1:
        slope = ng.broadcast_to(slope, [x.shape[0], 1])
        slope = ng.reshape(slope, [x.shape[0]])
        return ng.maximum(ng.broadcast_to(slope, x.shape, 0) * x, x)
    else:
        return ng.maximum(ng.broadcast_to(slope, x.shape, 1) * x, x)
Пример #6
0
def clip_gradient_norm(grad_list, clip_norm=None):
    """
    Returns a scaling factor to apply to the gradients.

    The scaling factor is computed such that the root mean squared
    average of the scaled gradients across all layers will be less than
    or equal to the provided clip_norm value. This factor is always <1, so
    never scales up the gradients.

    Arguments:
        param_list (list): List of layer parameters
        clip_norm (float, optional): Target norm for the gradients. If not provided
                                     the returned scale_factor will equal 1.


    Returns:
        Computed scale factor (float)
    """
    if clip_norm is None:
        return 1
    else:
        s = None
        for param in grad_list:
            term = ng.squared_L2(param, out_axes=None)
            if s is None:
                s = term
            else:
                s = s + term

        s = ng.sqrt(s)
        return clip_norm / ng.maximum(s, clip_norm)
Пример #7
0
def LeakyRelu(onnx_node,
              ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    alpha = onnx_node.get_attribute_value('alpha', 0.01)
    if not 0 <= alpha <= 1:
        logger.warning(
            'LeakyRelu node (%s): alpha value should be in range (0,1), but is: %s',
            onnx_node.name, alpha)
    return ng.maximum(alpha * ng_inputs[0], ng_inputs[0])
Пример #8
0
def PRelu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    slope = onnx_node.get_attribute_value('slope', 0.01)
    if not 0 <= slope <= 1:
        logger.warning(
            'PRelu node (%s): slope value should be in range (0,1), but is: %s',
            onnx_node.name, slope)

    return ng.maximum(slope * ng_inputs[0], ng_inputs[0])
Пример #9
0
def PRelu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the Parametric Relu function to the input tensor elementwise.

    f(x) = slope * x for x < 0, f(x) = x for x >= 0
    The slope parameter is passed to the node as its second input.
    """
    x, slope = ng_inputs
    return ng.maximum(slope * x, x)
Пример #10
0
def LeakyRelu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the Leaky Relu function to the input tensor elementwise.

    f(x) = alpha * x for x < 0, f(x) = x for x >= 0
    """
    alpha = onnx_node.get_attribute_value('alpha', 0.01)
    if not 0 <= alpha <= 1:
        logger.warning('LeakyRelu node (%s): alpha value should be in range (0,1), but is: %s',
                       onnx_node.name, alpha)
    return ng.maximum(alpha * ng_inputs[0], ng_inputs[0])
Пример #11
0
    def ReLU(self, cntk_op, inputs):
        """
        Returns element-wise rectified linear of inputs[0].

        Arguments:
            inputs: List of inputs to this node.

        Returns:
            A ngraph Op.
        """
        return ng.maximum(inputs[0], 0.).named(cntk_op.uid)
Пример #12
0
    def __call__(self, x):
        """
        Returns the Rectified Linear activation

        Arguments:
            x (Tensor or optree): Input value

        Returns:
            Tensor or optree: output activation
        """
        return ng.maximum(x, 0) + self.slope * ng.minimum(0, x)
Пример #13
0
    def __call__(self, x):
        """
        Returns the Exponential Linear activation

        Arguments:
            x (Tensor or optree): input value

        Returns:
            Tensor or optree: output activation
        """
        return ng.maximum(x, 0) + self.alpha * (ng.exp(ng.minimum(x, 0)) - 1)
Пример #14
0
def Clip(onnx_node,
         ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Limit input tensor values within specified interval."""
    data = ng_inputs[0]
    data_elem_dtype = get_dtype(data.get_element_type())
    max_value = onnx_node.get_attribute_value('max',
                                              np.finfo(data_elem_dtype).max)
    min_value = onnx_node.get_attribute_value('min',
                                              np.finfo(data_elem_dtype).min)

    return ng.minimum(
        ng.maximum(data, ng.constant(min_value, data_elem_dtype)),
        ng.constant(max_value, data_elem_dtype))
Пример #15
0
    def ReLU(self, cntk_op, inputs):
        """
        Returns element-wise rectified linear of inputs[0].

        Arguments:
            cntk_op: CNTK operation to be imported.
            inputs: List of inputs to this node.

        Returns:
            A ngraph Op.
        """
        assert len(inputs) == 1

        return ng.maximum(inputs[0], 0.).named(cntk_op.uid)
Пример #16
0
    def Relu(self, tf_node, inputs):
        """
        Computes rectified linear: `max(features, 0)`.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            features, name
        """
        return ng.maximum(inputs[0], 0.).named(tf_node.name)
Пример #17
0
def HardSigmoid(
        onnx_node,
        ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply f(x) = max(0, min(1, alpha * x + beta)) function to tensor element-wise.

    :param onnx_node: The ONNX node representing this operation.
    :param ng_inputs: The input tensors.
    :return: The tensor with applied HardSigmoid operation.
    """
    data = ng_inputs[0]
    data_type = get_dtype(data.get_element_type()).type
    alpha = onnx_node.get_attribute_value('alpha', float(0.2))
    beta = onnx_node.get_attribute_value('beta', float(0.5))
    return ng.maximum(data_type(0),
                      ng.minimum(data_type(1), alpha * data + beta))
Пример #18
0
def clip_gradient_value(grad, clip_value=None):
    """
    Element-wise clip a gradient tensor to between ``-clip_value`` and ``+clip_value``.

    Arguments:
        grad (Tensor): List of gradients for a single layer
        clip_value (float, optional): Value to element-wise clip gradients. Default: no clipping

    Returns:
        grad (list): List of clipped gradients.
    """
    if clip_value is None:
        return grad
    else:
        return ng.minimum(ng.maximum(grad, -abs(clip_value)), abs(clip_value))
Пример #19
0
    def Relu(self, c2_op, inputs):
        """
        Computes rectified linear: `max(features, 0)`.

        Arguments:
            c2_op: NodeDef object, the caffe2 node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the caffe2 node.

        Inputs to c2_op:
            features, name
        """
        assert 1 == len(inputs)
        return ng.maximum(inputs[0], 0.).named(c2_op.name)
Пример #20
0
def test_clip(transformer_factory):
    H = ng.make_axis(length=5)
    W = ng.make_axis(length=4)
    axes = ng.make_axes([W, H])

    p_x = ng.placeholder(axes)
    x = (2 * rng.uniform(0, 1, axes) - 1) * 20
    clip_value = 10

    clip_func = ng.minimum(ng.maximum(p_x, -abs(clip_value)), abs(clip_value))

    # numpy results as expected results
    expected_result = np.clip(x, -abs(clip_value), abs(clip_value))

    with ExecutorFactory() as ex:
        costfunc = ex.executor(clip_func, p_x)
        result = costfunc(x)
        ng.testing.assert_allclose(result, expected_result)
Пример #21
0
def clip_weight_value(weight, clip_value=None, min_value_override=None):
    """
    Element-wise clip a weight tensor to between ``min_value_override`` and ``clip_value``.

    Arguments:
        weight (Tensor): List of gradients for a single layer
        clip_value (float, optional): Value to element-wise clip weights. Default: no clipping
        min_value (float, optional): Value to minimum value to element-wise clip
                                     weights. Default: -abs(clip_value)

    Returns:
        weight (list): List of clipped weights.
    """
    if clip_value is None:
        return weight
    else:
        if min_value_override is None:
            min_value_override = -abs(clip_value)
        return ng.minimum(ng.maximum(weight, min_value_override),
                          abs(clip_value))
Пример #22
0
def binary_op(op_str, a, b):

    if op_str == '+':
        return a + b
    elif op_str == 'Add':
        return ng.add(a, b)
    elif op_str == '-':
        return a - b
    elif op_str == 'Sub':
        return ng.subtract(a, b)
    elif op_str == '*':
        return a * b
    elif op_str == 'Mul':
        return ng.multiply(a, b)
    elif op_str == '/':
        return a / b
    elif op_str == 'Div':
        return ng.divide(a, b)
    elif op_str == 'Dot':
        return Dot(a, b)
    elif op_str == 'Equal':
        return ng.equal(a, b)
    elif op_str == 'Greater':
        return ng.greater(a, b)
    elif op_str == 'GreaterEq':
        return ng.greater_equal(a, b)
    elif op_str == 'Less':
        return ng.less(a, b)
    elif op_str == 'LessEq':
        return ng.less_equal(a, b)
    elif op_str == 'Maximum':
        return ng.maximum(a, b)
    elif op_str == 'Minimum':
        return ng.minimum(a, b)
    elif op_str == 'NotEqual':
        return ng.not_equal(a, b)
    elif op_str == 'Power':
        return ng.power(a, b)
Пример #23
0
def binary_op(op_str, a, b):

    if op_str == "+":
        return a + b
    elif op_str == "Add":
        return ng.add(a, b)
    elif op_str == "-":
        return a - b
    elif op_str == "Sub":
        return ng.subtract(a, b)
    elif op_str == "*":
        return a * b
    elif op_str == "Mul":
        return ng.multiply(a, b)
    elif op_str == "/":
        return a / b
    elif op_str == "Div":
        return ng.divide(a, b)
    elif op_str == "Equal":
        return ng.equal(a, b)
    elif op_str == "Greater":
        return ng.greater(a, b)
    elif op_str == "GreaterEq":
        return ng.greater_equal(a, b)
    elif op_str == "Less":
        return ng.less(a, b)
    elif op_str == "LessEq":
        return ng.less_equal(a, b)
    elif op_str == "Maximum":
        return ng.maximum(a, b)
    elif op_str == "Minimum":
        return ng.minimum(a, b)
    elif op_str == "NotEqual":
        return ng.not_equal(a, b)
    elif op_str == "Power":
        return ng.power(a, b)
Пример #24
0
def relu(x, name=None):
    return ng.maximum(x, 0.).named(name)
Пример #25
0
REC = ng.make_axis(length=max_question, name='REC')
# Axis with length of hidden unit size
F = ng.make_axis(length=hidden_size, name='F')
# Axis with length of embedding size
F_embed = ng.make_axis(length=300, name='F_embed')
# Axis with length 1
dummy_axis = ng.make_axis(length=1, name='dummy_axis')
# Axis with length of answer span
span = ng.make_axis(length=2, name='span')


# Set up drop out layer
dropout_val = ng.slice_along_axis(inputs['dropout_val'], N, 0)
dropout_1 = Dropout_Modified(keep=dropout_val)
dropout_2 = Dropout_Modified(keep=dropout_val)
drop_pointer = ng.maximum(dropout_val, ng.constant(const=0.8, axes=[]))
dropout_3 = Dropout_Modified(keep=drop_pointer)
dropout_4 = Dropout_Modified(keep=drop_pointer)

# Constants required for masking
const_LSTM = ng.constant(axes=[F, dummy_axis], const=1)
const_loss = ng.constant(axes=[ax.Y, dummy_axis], const=1)
const_LSTM_embed = ng.constant(axes=[F_embed, dummy_axis], const=1)

# Create masks
reorder_para_mask = ng.axes_with_order(
    inputs['para_len'], axes=[
        dummy_axis, inputs['para_len'].axes[2], N])

reorder_ques_mask = ng.axes_with_order(
    inputs['question_len'], axes=[
Пример #26
0
# Axis with length of max question
REC = ng.make_axis(length=max_question, name='REC')
# Axis with length of hidden unit size
F = ng.make_axis(length=hidden_size, name='F')
# Axis with length of embedding size
F_embed = ng.make_axis(length=300, name='F_embed')
# Axis with length 1
dummy_axis = ng.make_axis(length=1, name='dummy_axis')
# Axis with length of answer span
span = ng.make_axis(length=2, name='span')

# Set up drop out layer
dropout_val = ng.slice_along_axis(inputs['dropout_val'], N, 0)
dropout_1 = Dropout_Modified(keep=dropout_val)
dropout_2 = Dropout_Modified(keep=dropout_val)
drop_pointer = ng.maximum(dropout_val, ng.constant(const=0.8, axes=[]))
dropout_3 = Dropout_Modified(keep=drop_pointer)
dropout_4 = Dropout_Modified(keep=drop_pointer)

# Constants required for masking
const_LSTM = ng.constant(axes=[F, dummy_axis], const=1)
const_loss = ng.constant(axes=[ax.Y, dummy_axis], const=1)
const_LSTM_embed = ng.constant(axes=[F_embed, dummy_axis], const=1)

# Create masks
reorder_para_mask = ng.axes_with_order(
    inputs['para_len'], axes=[dummy_axis, inputs['para_len'].axes[2], N])

reorder_ques_mask = ng.axes_with_order(
    inputs['question_len'],
    axes=[dummy_axis, inputs['question_len'].axes[2], N])
Пример #27
0
def PRelu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    x, slope = ng_inputs
    x = ng.broadcast(x, x.axes + slope.axes)
    slope = ng.broadcast(slope, axes=x.axes)
    return ng.maximum(slope * x, x)
Пример #28
0
def Relu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    return ng.maximum(ng_inputs[0], 0.)
Пример #29
0
def Relu(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the Relu function, f(x) = max(0, x) to the input tensor elementwise."""
    return ng.maximum(ng_inputs[0], 0)