示例#1
0
def _convert_sub(converter: ChainerConverter,
                 c_op: "chainer.functions.math.basic_math.Sub"):
    x0 = converter.get_variable(c_op.inputs[0])
    x1 = converter.get_variable(c_op.inputs[1])
    check_broadcast_constraints(x0, x1)
    y = x0 - x1
    converter.set_variable(c_op.outputs[0](), y)
示例#2
0
def _convert_linear_function(
        converter: ChainerConverter,
        c_op: "chainer.functions.connection.linear.LinearFunction"):
    x = converter.get_variable(c_op.inputs[0])
    w = converter.get_variable(c_op.inputs[1])  # type: ConstantVariable

    x2, = Reshape(None,
                  in_order=x.order,
                  out_order=OrderNC,
                  out_shape=[x.shape[0], mul(x.shape[1:])])(x)
    w2, = ReinterpretAxis(None, in_order=w.order, out_order=OrderNC)(w)
    w2, = Transpose(None)(w2)
    w2.change_order(OrderCN)

    y, = Linear(None)(x2, w2)
    y, = ReinterpretAxis(None,
                         in_order=y.order,
                         out_order=Order([x.order.axes[0],
                                          w.order.axes[0]]))(y)

    if len(c_op.inputs) == 3:
        # with bias
        b = converter.get_variable(c_op.inputs[2])
        check_broadcast_constraints(y, b)
        y = y + b

    converter.set_variable(c_op.outputs[0](), y)
示例#3
0
def sub_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(a, b)

    c = a - b
    converter.set_variable(tf_op.outputs[0], c)
示例#4
0
def squared_difference_handler(converter: TensorFlowConverter,
                               tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    converter.set_variable(tf_op.outputs[0], (x - y)**2)
示例#5
0
def less_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    z, = GreaterEqual(None)(y, x)
    converter.set_variable(tf_op.outputs[0], z)
示例#6
0
def less_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    z = y > x
    converter.set_variable(tf_op.outputs[0], z)
示例#7
0
def minimum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    tmp, = Greater(None)(x, y)
    z = x * (1 - tmp) + y * tmp
    converter.set_variable(tf_op.outputs[0], z)
示例#8
0
def maximum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    tmp = x > y
    z = x * tmp + y * (1 - tmp)
    converter.set_variable(tf_op.outputs[0], z)
示例#9
0
文件: util.py 项目: newpouy/webdnn
    def handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
        a = converter.get_variable(tf_op.inputs[0])
        b = converter.get_variable(tf_op.inputs[1])

        # Broadcasting
        check_broadcast_constraints(a, b)

        c, = OperatorClass(None)(a, b)
        converter.set_variable(tf_op.outputs[0], c)
示例#10
0
文件: math.py 项目: zhangaz1/webdnn
def _convert_minimum(converter: ChainerConverter,
                     c_op: "chainer.functions.Minimum"):
    x = converter.get_variable(c_op.inputs[0])
    y = converter.get_variable(c_op.inputs[1])

    check_broadcast_constraints(x, y)

    tmp = x > y
    z = x * (1 - tmp) + y * tmp
    converter.set_variable(c_op.outputs[0](), z)
示例#11
0
def _convert_maximum(converter: ChainerConverter,
                     c_op: "chainer.functions.Maximum"):
    x = converter.get_variable(c_op.inputs[0])
    y = converter.get_variable(c_op.inputs[1])

    check_broadcast_constraints(x, y)

    tmp, = Greater(None)(x, y)
    z = x * tmp + y * (1 - tmp)
    converter.set_variable(c_op.outputs[0](), z)
示例#12
0
文件: util.py 项目: zhangaz1/webdnn
    def handler(converter: ChainerConverter, c_op: "chainer.Function"):
        a = converter.get_variable(c_op.inputs[0])
        b = converter.get_variable(c_op.inputs[1])

        # Broadcasting
        check_broadcast_constraints(a, b)

        c, = OperatorClass(None)(a, b)

        # Each chainer function holds output variables as weak reference
        converter.set_variable(c_op.outputs[0](), c)
示例#13
0
def _convert_linear_function(
        converter: ChainerConverter,
        c_op: "chainer.functions.connection.linear.LinearFunction"):
    x = converter.get_variable(c_op.inputs[0])
    w = converter.get_variable(c_op.inputs[1])  # type: ConstantVariable

    y, = Tensordot(None, axes=[x.order.axes[1:], w.order.axes[1]])(x, w)

    if len(c_op.inputs) == 3:
        # with bias
        b = converter.get_variable(c_op.inputs[2])
        check_broadcast_constraints(y, b)
        y = y + b

    converter.set_variable(c_op.outputs[0](), y)
示例#14
0
    def handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
        a = converter.get_variable(tf_op.inputs[0])
        b = converter.get_variable(tf_op.inputs[1])
        c = None
        if ScalarOperatorClass is not None:
            # Scalar operation if one of the input is constant and scalar
            if isinstance(a, ConstantVariable) and a.size == 1:
                c, = ScalarOperatorClass(None, value=float(a.data[0]))(b)
            elif isinstance(b, ConstantVariable) and b.size == 1:
                c, = ScalarOperatorClass(None, value=float(b.data[0]))(a)

        if c is None:
            # Broadcasting
            check_broadcast_constraints(a, b)

            c, = OperatorClass(None)(a, b)
        converter.set_variable(tf_op.outputs[0], c)
示例#15
0
def _convert_linear_function(
        converter: ChainerConverter,
        c_op: "chainer.functions.connection.linear.LinearFunction"):
    x = converter.get_variable(c_op.inputs[0])
    w = converter.get_variable(c_op.inputs[1])  # type: ConstantVariable

    x2 = x.reshape([x.shape[0], mul(x.shape[1:])], OrderNC)
    w2 = w.reinterpret_axes(OrderNC)

    y, = Linear(None)(x2, w2)
    y = y.reinterpret_axes(Order([x.order.axes[0], w.order.axes[0]]))

    if len(c_op.inputs) == 3:
        # with bias
        b = converter.get_variable(c_op.inputs[2])
        check_broadcast_constraints(y, b)
        y = y + b

    converter.set_variable(c_op.outputs[0](), y)
示例#16
0
def select_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    cond = converter.get_variable(tf_op.inputs[0])
    x1 = converter.get_variable(tf_op.inputs[1])
    x2 = converter.get_variable(tf_op.inputs[2])

    check_broadcast_constraints(cond, x1)
    check_broadcast_constraints(cond, x2)
    check_broadcast_constraints(x1, x2)

    y, = Select(None)(cond, x1, x2)
    converter.set_variable(tf_op.outputs[0], y)