예제 #1
0
def test_kernel_cache(transformer_factory):
    X = ng.make_axis(32)
    Y = ng.make_axis(32)
    C = ng.make_axis(16384)
    axes = ng.make_axes([
        X,
        Y
    ])
    bcast_axes = ng.make_axes([
        X,
        Y,
        C
    ])

    x_val = np.absolute(np.random.randn(*axes.lengths))
    y_val = np.absolute(np.random.randn(*bcast_axes.lengths))
    z_val = np.absolute(np.random.randn(*bcast_axes.lengths))

    x = ng.constant(x_val, axes)
    y = ng.constant(y_val, bcast_axes)
    z = ng.constant(z_val, bcast_axes)

    out = ng.add(ng.add(x, y), z)

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.add(np.add(x_val.reshape(32, 32, 1), y_val), z_val)
    np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def test_kernel_cache(transformer_factory):
    X = ng.make_axis(32)
    Y = ng.make_axis(32)
    C = ng.make_axis(16384)
    axes = ng.make_axes([X, Y])
    bcast_axes = ng.make_axes([X, Y, C])

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(axes, clip_max=clip_val)
    y_val = rng.randn_abs_clip(bcast_axes, clip_max=clip_val)
    z_val = rng.randn_abs_clip(bcast_axes, clip_max=clip_val)

    x = ng.constant(x_val, axes)
    y = ng.constant(y_val, bcast_axes)
    z = ng.constant(z_val, bcast_axes)

    out = ng.add(ng.add(x, y), z)

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.add(np.add(x_val.reshape(32, 32, 1), y_val), z_val)
    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4, atol_multiplier=2)
예제 #3
0
def create_simple_if_with_two_outputs(condition_val):
    condition = ng.constant(condition_val, dtype=np.bool)

    # then_body
    X_t = ng.parameter([], np.float32, "X")
    Y_t = ng.parameter([], np.float32, "Y")
    Z_t = ng.parameter([], np.float32, "Z")

    add_t = ng.add(X_t, Y_t)
    mul_t = ng.multiply(Y_t, Z_t)
    then_body_res_1 = ng.result(add_t)
    then_body_res_2 = ng.result(mul_t)
    then_body = GraphBody([X_t, Y_t, Z_t], [then_body_res_1, then_body_res_2])
    then_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1),
        TensorIteratorInvariantInputDesc(3, 2)
    ]
    then_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    # else_body
    X_e = ng.parameter([], np.float32, "X")
    Z_e = ng.parameter([], np.float32, "Z")
    W_e = ng.parameter([], np.float32, "W")

    add_e = ng.add(X_e, W_e)
    pow_e = ng.power(W_e, Z_e)
    else_body_res_1 = ng.result(add_e)
    else_body_res_2 = ng.result(pow_e)
    else_body = GraphBody([X_e, Z_e, W_e], [else_body_res_1, else_body_res_2])
    else_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(3, 1),
        TensorIteratorInvariantInputDesc(4, 2)
    ]
    else_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    X = ng.constant(15.0, dtype=np.float32)
    Y = ng.constant(-5.0, dtype=np.float32)
    Z = ng.constant(4.0, dtype=np.float32)
    W = ng.constant(2.0, dtype=np.float32)
    if_node = ng.if_op(condition, [X, Y, Z, W], (then_body, else_body),
                       (then_body_inputs, else_body_inputs),
                       (then_body_outputs, else_body_outputs))
    return if_node
예제 #4
0
파일: ops_nn.py 프로젝트: rsumner31/ngraph
    def FC(self, c2_op, inputs):
        """
        Multiplies matrix `a` by matrix `b`. The inputs must be two-dimensional,
        the inner dimensions must match (possibly after transpose).

        Arguments:
            c2_op: OperatorDef object, the caffe2 node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the caffe2 node.

        Inputs to c2_op:
            a, b, transpose_a, transpose_b, a_is_sparse, b_is_sparse, name
        """
        # get inputs
        left, right, bias = inputs
        # check shape
        assert left.axes[1].length == right.axes[1].length
        # cast axis
        left_cast = ng.cast_axes(left, [left.axes[0], right.axes[1]])
        # add op
        dot_op = ng.dot(left_cast, right)
        # cast bias axis
        bias_cast = ng.cast_axes(bias, [dot_op.axes[-1]])
        # result op
        result_op = ng.add(dot_op, bias_cast)
        return result_op
예제 #5
0
def simple_if(condition_val):
    condition = ng.constant(condition_val, dtype=np.bool)
    # then_body
    X_t = ng.parameter([2], np.float32, "X")
    Y_t = ng.parameter([2], np.float32, "Y")

    then_mul = ng.multiply(X_t, Y_t)
    then_body_res_1 = ng.result(then_mul)
    then_body = GraphBody([X_t, Y_t], [then_body_res_1])
    then_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1)
    ]
    then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]

    # else_body
    X_e = ng.parameter([2], np.float32, "X")
    Y_e = ng.parameter([2], np.float32, "Y")
    add_e = ng.add(X_e, Y_e)
    else_body_res_1 = ng.result(add_e)
    else_body = GraphBody([X_e, Y_e], [else_body_res_1])
    else_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1)
    ]
    else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]

    X = ng.constant([3, 4], dtype=np.float32)
    Y = ng.constant([2, 1], dtype=np.float32)
    if_node = ng.if_op(condition, [X, Y], (then_body, else_body),
                       (then_body_inputs, else_body_inputs),
                       (then_body_outputs, else_body_outputs))
    relu = ng.relu(if_node)
    return relu
예제 #6
0
def test_add_with_mul():

    element_type = Type.f32
    shape = Shape([2, 2])
    A = Parameter(element_type, shape)
    B = Parameter(element_type, shape)
    C = Parameter(element_type, shape)
    parameter_list = [A, B, C]
    function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    b = backend.create_tensor(element_type, shape)
    c = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, shape)

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)
    b.write(util.numpy_to_c(np.array([5, 6, 7, 8], dtype=np.float32)), 16)
    c.write(util.numpy_to_c(np.array([9, 10, 11, 12], dtype=np.float32)), 16)

    result_arr = np.array([0, 0, 0, 0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 16)
    handle = backend.compile(function)
    handle.call([result], [a, b, c])
    result.read(util.numpy_to_c(result_arr), 16)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
    c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
    result_arr_ref = (a_arr + b_arr) * c_arr

    assert np.allclose(result_arr, result_arr_ref)
예제 #7
0
def create_function_with_memory(input_shape, data_type):
    input_data = ng.parameter(input_shape, name="input_data", dtype=data_type)
    rv = ng.read_value(input_data, "var_id_667")
    add = ng.add(rv, input_data, name="MemoryAdd")
    node = ng.assign(add, "var_id_667")
    res = ng.result(add, "res")
    func = Function(results=[res], sinks=[node], parameters=[input_data], name="name")
    caps = Function.to_capsule(func)
    return caps
예제 #8
0
    def Eltwise(self, layer, inputs):
        """
        To support the Eltwise layer of caffe.

        Arguments:
            layer: Layer which needs to be be mapped to ngrpah op
            inputs: input ops on which current op depends on
        return:
            ngraph output operation corresponding to the given layer
        """
        operation = layer.eltwise_param.operation

        if operation == caffe_pb2.EltwiseParameter.SUM:
            ax = inputs[0].axes
            out = ng.add(inputs[0], ng.cast_axes(inputs[1], ax))
            for inp in inputs[2:]:
                out = ng.add(out, ng.cast_axes(inp, ax))
            out.named = layer.name
            return out
예제 #9
0
def test_node_factory_wrapper_add():
    shape = [2, 2]
    dtype = np.int8
    parameter_a = ng.parameter(shape, dtype=dtype, name="A")
    parameter_b = ng.parameter(shape, dtype=dtype, name="B")

    node = ng.add(parameter_a, parameter_b, name="TestNode")

    assert node.get_type_name() == "Add"
    assert node.get_output_size() == 1
    assert list(node.get_output_shape(0)) == [2, 2]
    assert node.friendly_name == "TestNode"
예제 #10
0
def test_4d_elementwise(transformer_factory, input_axes):
    x_val = np.absolute(np.random.randn(*input_axes.lengths))
    y_val = np.absolute(np.random.randn(*input_axes.lengths))
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    out = ng.add(x, y)

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.add(x_val, y_val)
    np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
예제 #11
0
def test_node_factory_wrapper_add():
    shape = [2, 2]
    dtype = np.int8
    parameter_a = ng.parameter(shape, dtype=dtype, name='A')
    parameter_b = ng.parameter(shape, dtype=dtype, name='B')

    node = ng.add(parameter_a, parameter_b, name='TestNode')

    assert node.get_type_name() == 'Add'
    assert node.get_output_size() == 1
    assert list(node.get_output_shape(0)) == [2, 2]
    assert node.name == 'TestNode'
예제 #12
0
파일: ops_binary.py 프로젝트: ugiwgh/ngraph
    def Plus(self, cntk_op, inputs):
        """
        Returns input[0] + input[1] element-wise.

        Arguments:
            cntk_op: CNTK operation to be imported.
            inputs: List of inputs to this node.

        Returns:
            A ngraph Op.
        """
        cast_0, cast_1 = self._cast_for_binary_op(inputs)
        return ng.add(cast_0, cast_1).named(cntk_op.uid)
예제 #13
0
def test_4d_chained(transformer_factory, input_axes):
    x_val = np.absolute(np.random.randn(*input_axes.lengths))
    y_val = np.absolute(np.random.randn(*input_axes.lengths))
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    im = ng.reciprocal(x)
    out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0])

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0)
    np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def test_4d_chained(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    # Limitting minimum absolute value for tensors being input to reciprocal operation to 1/7.9
    #
    # This is consequence of the above and flexpoint accuracy.
    # Numbers very small have poor absolute accuracy. When reciprocal of them is calculated the
    # results becomes very large and has even worse accuracy. When small numbers would be accepted
    # as an input to reciprocal in the test the absolute maximum value of the result is undefined
    # and so absolute tolerance.
    # To have possibility to set atol in the test and test could pass with it minimum element of
    # the tensor that is input to reciprocal operation has to be limited.

    is_flex = is_flex_factory(transformer_factory)
    clip_val_max = 7.9 if is_flex else 0
    clip_val_min = 1.0 / 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes,
                               clip_min=clip_val_min,
                               clip_max=clip_val_max)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val_max)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    im = ng.reciprocal(x)
    out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0])

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0)

    # atol_multiplier = 15 * x_val.shape[0]
    #
    # x_val.shape[0] is number elements added together in operation
    # ng.sum(X, reduction_axes=input_axes[0])
    #
    # 15 is calculated the following way:
    #
    # Input tensor has values from the range 1/7.9 - 7.9
    # For DEC=12 absolute error is equal to 0.5*2^-12 = 0.000122
    # 1/7.9 = 0.126582 with this error becomes 0.126704
    # Reciprocal of 1/7.9 is 7.9
    # Reciprocal of 1/7.9 + err = 7.892389
    # Absolute difference is 0.007611
    # It is 15.2 times larger then atol limit 5e-4 from Argon transformer
    ng.testing.assert_allclose(graph_val,
                               np_val,
                               rtol=1e-4,
                               atol_multiplier=15 * x_val.shape[0])
예제 #15
0
def test_cputensor_add_constant(transformer_factory):
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.add(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.add(a, b)
    with executor(c) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_c)
예제 #16
0
    def construct_batchnorm_fprop_pattern(self):
        """
        Generate graph op that represents a pattern for batchnorm fprop operation.
        self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))) + self.beta
        Returns:
               Single pattern that matches batchnorm fprop op
        """
        self.batchnorm_fprop_input_tensor_label = "in_obj"
        self.batchnorm_fprop_gamma_label = "gamma"
        self.batchnorm_fprop_beta_label = "beta"
        self.batchnorm_fprop_variance_label = "variance"
        self.batchnorm_fprop_epsilon_label = "epsilon"
        self.batchnorm_fprop_mean_label = "mean"

        # bind the label to the op's which needed to be updated in the dict
        in_obj = PatternLabelOp(self.batchnorm_fprop_input_tensor_label,
                                (lambda op: isinstance(op, ContiguousOp)))
        flatten_tensor = PatternSkipOp(in_obj,
                                       (lambda op: isinstance(op, Flatten)))
        gamma = PatternLabelOp(self.batchnorm_fprop_gamma_label,
                               (lambda op: isinstance(op, BroadcastOp)))
        beta = PatternLabelOp(self.batchnorm_fprop_beta_label,
                              (lambda op: isinstance(op, BroadcastOp)))
        variance = PatternLabelOp(self.batchnorm_fprop_variance_label,
                                  (lambda op: isinstance(op, Divide)))
        epsilon = PatternLabelOp(self.batchnorm_fprop_epsilon_label,
                                 (lambda op: isinstance(op, BroadcastOp)))
        mean = PatternLabelOp(self.batchnorm_fprop_mean_label,
                              (lambda op: isinstance(op, Divide)))

        # construct the fprop batchnorm pattern matching the computation graph
        # ng.sqrt(xvar + self.eps)
        SqrtofVarianceAndEps = ng.sqrt(ng.add(variance, epsilon))
        # ng.reciprocal(ng.sqrt(xvar + self.eps))
        reciprocal_op = ng.reciprocal(SqrtofVarianceAndEps)
        reciprocal_op_w_braodcast = ng.PatternSkipOp(reciprocal_op,
                                                     lambda op: isinstance(op, BroadcastOp))

        mean_bcast = ng.PatternSkipOp(mean, lambda op: isinstance(op, BroadcastOp))
        # (in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))
        mul_op_1 = ng.multiply(ng.subtract(flatten_tensor, mean_bcast), reciprocal_op_w_braodcast)
        # "self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps)))
        MultiplyGamma = ng.multiply(mul_op_1, gamma)
        # self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))) + self.beta
        AddBeta = ng.Unflatten(ng.Add(MultiplyGamma, beta))
        return AddBeta
예제 #17
0
def test_cputensor_fusion(transformer_factory):
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_d)
예제 #18
0
def test_idempotent_axes_c():
    """
    Test test axes transformations with autodiff, case c, with broadcast,
    slice, cast and dim-shuffle
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])
        result_axes = [ng.make_axis(length=axis.length) for axis in axes]

        # variable
        w = ng.variable(axes, initial_value=np.ones((3, 1)))

        # broadcast l / r, introducing dummy length 1 axes
        l = ng.broadcast(w, axes)
        r = ng.broadcast(w, axes)

        # slice
        axes_slice = [slice(None, None, None), slice(None, None, None)]
        l_sliced = ng.tensor_slice(l, axes_slice)
        r_sliced = ng.tensor_slice(r, axes_slice)

        # cast r
        r_sliced_casted = ng.cast_axes(r_sliced, axes)

        # perform add
        result = ng.add(l_sliced, r_sliced_casted)

        # cast / dimshuffle
        result = ng.cast_axes(result, result_axes)
        result = ng.axes_with_order(result, result_axes)

        # cost and grad
        cost = ng.sum(result, reduction_axes=result.axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        cost_comp_ng = cost_comp()
        grad_comp_ng = grad_comp()
        grad_comp_np = np.ones((3, 1)) * 2.
        assert cost_comp_ng == 6.0
        assert np.array_equal(grad_comp_ng, grad_comp_np)
예제 #19
0
def test_sink_function_ctor():
    input_data = ng.parameter([2, 2], name="input_data", dtype=np.float32)
    rv = ng.read_value(input_data, "var_id_667")
    add = ng.add(rv, input_data, name="MemoryAdd")
    node = ng.assign(add, "var_id_667")
    res = ng.result(add, "res")
    function = Function(results=[res], sinks=[node], parameters=[input_data], name="TestFunction")

    ordered_ops = function.get_ordered_ops()
    op_types = [op.get_type_name() for op in ordered_ops]
    assert op_types == ["Parameter", "ReadValue", "Add", "Assign", "Result"]
    assert len(function.get_ops()) == 5
    assert function.get_output_size() == 1
    assert function.get_output_op(0).get_type_name() == "Result"
    assert function.get_output_element_type(0) == input_data.get_element_type()
    assert list(function.get_output_shape(0)) == [2, 2]
    assert (function.get_parameters()[0].get_partial_shape()) == PartialShape([2, 2])
    assert len(function.get_parameters()) == 1
    assert len(function.get_results()) == 1
    assert function.get_friendly_name() == "TestFunction"
def test_4d_elementwise(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    out = ng.add(x, y)

    with executor(out) as ex:
        graph_val = ex()

    np_val = np.add(x_val, y_val)

    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
예제 #21
0
def test_idempotent_axes_b():
    """
    Test test axes transformations with autodiff, case b, with broadcast applied
    to the same tensor
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

        w = ng.variable(axes, initial_value=np.ones((3, 1)))
        l = ng.broadcast(w, axes)
        r = ng.broadcast(w, axes)
        result = ng.add(l, r)

        result = ng.cast_axes(result, axes)
        cost = ng.sum(result, reduction_axes=axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        assert cost_comp() == 6.0
        assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
예제 #22
0
def binary_op(op_str, a, b):

    if op_str == '+':
        return a + b
    elif op_str == 'Add':
        return ng.add(a, b)
    elif op_str == '-':
        return a - b
    elif op_str == 'Sub':
        return ng.subtract(a, b)
    elif op_str == '*':
        return a * b
    elif op_str == 'Mul':
        return ng.multiply(a, b)
    elif op_str == '/':
        return a / b
    elif op_str == 'Div':
        return ng.divide(a, b)
    elif op_str == 'Dot':
        return Dot(a, b)
    elif op_str == 'Equal':
        return ng.equal(a, b)
    elif op_str == 'Greater':
        return ng.greater(a, b)
    elif op_str == 'GreaterEq':
        return ng.greater_equal(a, b)
    elif op_str == 'Less':
        return ng.less(a, b)
    elif op_str == 'LessEq':
        return ng.less_equal(a, b)
    elif op_str == 'Maximum':
        return ng.maximum(a, b)
    elif op_str == 'Minimum':
        return ng.minimum(a, b)
    elif op_str == 'NotEqual':
        return ng.not_equal(a, b)
    elif op_str == 'Power':
        return ng.power(a, b)
예제 #23
0
def test_set_argument():
    runtime = get_runtime()

    data1 = np.array([1, 2, 3])
    data2 = np.array([4, 5, 6])
    data3 = np.array([7, 8, 9])

    node1 = ng.constant(data1, dtype=np.float32)
    node2 = ng.constant(data2, dtype=np.float32)
    node3 = ng.constant(data3, dtype=np.float32)
    node_add = ng.add(node1, node2)

    # Original arguments
    computation = runtime.computation(node_add)
    output = computation()
    assert np.allclose(data1 + data2, output)

    # Arguments changed by set_argument
    node_add.set_argument(1, node3.output(0))
    output = computation()
    assert np.allclose(data1 + data3, output)

    # Arguments changed by set_argument
    node_add.set_argument(0, node3.output(0))
    output = computation()
    assert np.allclose(data3 + data3, output)

    # Arguments changed by set_argument(OutputVector)
    node_add.set_arguments([node2.output(0), node3.output(0)])
    output = computation()
    assert np.allclose(data2 + data3, output)

    # Arguments changed by set_arguments(NodeVector)
    node_add.set_arguments([node1, node2])
    output = computation()
    assert np.allclose(data1 + data2, output)
예제 #24
0
def binary_op(op_str, a, b):

    if op_str == "+":
        return a + b
    elif op_str == "Add":
        return ng.add(a, b)
    elif op_str == "-":
        return a - b
    elif op_str == "Sub":
        return ng.subtract(a, b)
    elif op_str == "*":
        return a * b
    elif op_str == "Mul":
        return ng.multiply(a, b)
    elif op_str == "/":
        return a / b
    elif op_str == "Div":
        return ng.divide(a, b)
    elif op_str == "Equal":
        return ng.equal(a, b)
    elif op_str == "Greater":
        return ng.greater(a, b)
    elif op_str == "GreaterEq":
        return ng.greater_equal(a, b)
    elif op_str == "Less":
        return ng.less(a, b)
    elif op_str == "LessEq":
        return ng.less_equal(a, b)
    elif op_str == "Maximum":
        return ng.maximum(a, b)
    elif op_str == "Minimum":
        return ng.minimum(a, b)
    elif op_str == "NotEqual":
        return ng.not_equal(a, b)
    elif op_str == "Power":
        return ng.power(a, b)
예제 #25
0
def test_add_with_mul():

    element_type = Type.f32
    shape = Shape([4])
    A = Parameter(element_type, shape)
    B = Parameter(element_type, shape)
    C = Parameter(element_type, shape)
    parameter_list = [A, B, C]
    function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, A, B, C)
    result = computation(
        np.array([1, 2, 3, 4], dtype=np.float32),
        np.array([5, 6, 7, 8], dtype=np.float32),
        np.array([9, 10, 11, 12], dtype=np.float32),
    )[0]

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
    c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
    result_arr_ref = (a_arr + b_arr) * c_arr

    assert np.allclose(result, result_arr_ref)
예제 #26
0
def test_node_version():
    node = ng.add([1], [2])

    assert node.get_version() == 1
    assert node.version == 1
def create_ngraph_function(args) -> Function:
    weights = np.fromfile(args.model, dtype=np.float32)
    weights_offset = 0
    padding_begin = [0, 0]
    padding_end = [0, 0]

    # input
    input_shape = [64, 1, 28, 28]
    param_node = ngraph.parameter(input_shape, np.float32, 'Parameter')

    # convolution 1
    conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5])
    conv_1_kernel = ngraph.constant(
        weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape))
    weights_offset += conv_1_kernel_length
    conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 1
    add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1])
    add_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_1_kernel_length].reshape(add_1_kernel_shape))
    weights_offset += add_1_kernel_length
    add_1_node = ngraph.add(conv_1_node, add_1_kernel)

    # maxpool 1
    maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # convolution 2
    conv_2_kernel_shape, conv_2_kernel_length = shape_and_length(
        [50, 20, 5, 5])
    conv_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                conv_2_kernel_length].reshape(conv_2_kernel_shape))
    weights_offset += conv_2_kernel_length
    conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 2
    add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1])
    add_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_2_kernel_length].reshape(add_2_kernel_shape))
    weights_offset += add_2_kernel_length
    add_2_node = ngraph.add(conv_2_node, add_2_kernel)

    # maxpool 2
    maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # reshape 1
    reshape_1_dims, reshape_1_length = shape_and_length([2])
    # workaround to get int64 weights from float32 ndarray w/o unnecessary copying
    dtype_weights = np.frombuffer(weights[weights_offset:weights_offset +
                                          2 * reshape_1_length],
                                  dtype=np.int64)
    reshape_1_kernel = ngraph.constant(dtype_weights)
    weights_offset += 2 * reshape_1_length
    reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True)

    # matmul 1
    matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length(
        [500, 800])
    matmul_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_1_kernel_length].reshape(matmul_1_kernel_shape))
    weights_offset += matmul_1_kernel_length
    matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True)

    # add 3
    add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500])
    add_3_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_3_kernel_length].reshape(add_3_kernel_shape))
    weights_offset += add_3_kernel_length
    add_3_node = ngraph.add(matmul_1_node, add_3_kernel)

    # ReLU
    relu_node = ngraph.relu(add_3_node)

    # reshape 2
    reshape_2_kernel = ngraph.constant(dtype_weights)
    reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True)

    # matmul 2
    matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500])
    matmul_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_2_kernel_length].reshape(matmul_2_kernel_shape))
    weights_offset += matmul_2_kernel_length
    matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True)

    # add 4
    add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10])
    add_4_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_4_kernel_length].reshape(add_4_kernel_shape))
    weights_offset += add_4_kernel_length
    add_4_node = ngraph.add(matmul_2_node, add_4_kernel)

    # softmax
    softmax_axis = 1
    softmax_node = ngraph.softmax(add_4_node, softmax_axis)

    # result
    result_node = ngraph.result(softmax_node)

    # nGraph function
    function = Function(result_node, [param_node], 'lenet')

    return function
예제 #28
0
def test_tensor_iterator():
    from ngraph.utils.tensor_iterator_types import (
        GraphBody,
        TensorIteratorSliceInputDesc,
        TensorIteratorMergedInputDesc,
        TensorIteratorInvariantInputDesc,
        TensorIteratorBodyOutputDesc,
        TensorIteratorConcatOutputDesc,
    )

    #  Body parameters
    body_timestep = ng.parameter([], np.int32, "timestep")
    body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in")
    body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma")
    body_const_one = ng.parameter([], np.int32, "body_const_one")

    # CMA = cumulative moving average
    prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma)
    curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0]))
    elem_cnt = ng.add(body_const_one, body_timestep)
    curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32"))
    cma_hist = ng.unsqueeze(curr_cma, [0])

    # TI inputs
    data = ng.parameter([16, 2, 2], np.float32, "data")
    # Iterations count
    zero = ng.constant(0, dtype=np.int32)
    one = ng.constant(1, dtype=np.int32)
    initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32),
                              dtype=np.float32)
    iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
    ti_inputs = [iter_cnt, data, initial_cma, one]

    graph_body = GraphBody(
        [body_timestep, body_data_in, body_prev_cma, body_const_one],
        [curr_cma, cma_hist])
    ti_slice_input_desc = [
        # timestep
        # input_idx, body_param_idx, start, stride, part_size, end, axis
        TensorIteratorSliceInputDesc(0, 0, 0, 1, 1, -1, 0),
        # data
        TensorIteratorSliceInputDesc(1, 1, 0, 1, 1, -1, 0),
    ]
    ti_merged_input_desc = [
        # body prev/curr_cma
        TensorIteratorMergedInputDesc(2, 2, 0),
    ]
    ti_invariant_input_desc = [
        # body const one
        TensorIteratorInvariantInputDesc(3, 3),
    ]

    # TI outputs
    ti_body_output_desc = [
        # final average
        TensorIteratorBodyOutputDesc(0, 0, -1),
    ]
    ti_concat_output_desc = [
        # history of cma
        TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0),
    ]

    node = ng.tensor_iterator(
        ti_inputs,
        graph_body,
        ti_slice_input_desc,
        ti_merged_input_desc,
        ti_invariant_input_desc,
        ti_body_output_desc,
        ti_concat_output_desc,
    )

    assert node.get_type_name() == "TensorIterator"
    assert node.get_output_size() == 2
    # final average
    assert list(node.get_output_shape(0)) == [2, 2]
    # cma history
    assert list(node.get_output_shape(1)) == [16, 2, 2]
def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
    """Create a network on the fly from the source code using ngraph"""
    def shape_and_length(shape: list) -> typing.Tuple[list, int]:
        length = reduce(lambda x, y: x * y, shape)
        return shape, length

    weights = np.fromfile(args.model, dtype=np.float32)
    weights_offset = 0
    padding_begin = padding_end = [0, 0]

    # input
    input_shape = [64, 1, 28, 28]
    param_node = ngraph.parameter(input_shape, np.float32, 'Parameter')

    # convolution 1
    conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5])
    conv_1_kernel = ngraph.constant(
        weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape))
    weights_offset += conv_1_kernel_length
    conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 1
    add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1])
    add_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_1_kernel_length].reshape(add_1_kernel_shape), )
    weights_offset += add_1_kernel_length
    add_1_node = ngraph.add(conv_1_node, add_1_kernel)

    # maxpool 1
    maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # convolution 2
    conv_2_kernel_shape, conv_2_kernel_length = shape_and_length(
        [50, 20, 5, 5])
    conv_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                conv_2_kernel_length].reshape(conv_2_kernel_shape), )
    weights_offset += conv_2_kernel_length
    conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 2
    add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1])
    add_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_2_kernel_length].reshape(add_2_kernel_shape), )
    weights_offset += add_2_kernel_length
    add_2_node = ngraph.add(conv_2_node, add_2_kernel)

    # maxpool 2
    maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # reshape 1
    reshape_1_dims, reshape_1_length = shape_and_length([2])
    # workaround to get int64 weights from float32 ndarray w/o unnecessary copying
    dtype_weights = np.frombuffer(
        weights[weights_offset:weights_offset + 2 * reshape_1_length],
        dtype=np.int64,
    )
    reshape_1_kernel = ngraph.constant(dtype_weights)
    weights_offset += 2 * reshape_1_length
    reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True)

    # matmul 1
    matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length(
        [500, 800])
    matmul_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_1_kernel_length].reshape(matmul_1_kernel_shape), )
    weights_offset += matmul_1_kernel_length
    matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True)

    # add 3
    add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500])
    add_3_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_3_kernel_length].reshape(add_3_kernel_shape), )
    weights_offset += add_3_kernel_length
    add_3_node = ngraph.add(matmul_1_node, add_3_kernel)

    # ReLU
    relu_node = ngraph.relu(add_3_node)

    # reshape 2
    reshape_2_kernel = ngraph.constant(dtype_weights)
    reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True)

    # matmul 2
    matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500])
    matmul_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_2_kernel_length].reshape(matmul_2_kernel_shape), )
    weights_offset += matmul_2_kernel_length
    matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True)

    # add 4
    add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10])
    add_4_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_4_kernel_length].reshape(add_4_kernel_shape), )
    weights_offset += add_4_kernel_length
    add_4_node = ngraph.add(matmul_2_node, add_4_kernel)

    # softmax
    softmax_axis = 1
    softmax_node = ngraph.softmax(add_4_node, softmax_axis)

    # result
    result_node = ngraph.result(softmax_node)
    return ngraph.impl.Function(result_node, [param_node], 'lenet')
예제 #30
0
def Add(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Perform element-wise binary addition."""
    left, right = broadcast_for_binary_operation(onnx_node, ng_inputs)
    return ng.add(left, right)