def visit(self, op, input):
     self.computation.set_op_rank(op)
     # TODO - is treating TensorSizeOp as constants, okay?
     # Construct constant list with number of elements = reduction axes size
     constant_tensor = [op.reduction_axes.size]
     constant_op = Constant(Type.f32, Shape([]), constant_tensor)
     self.computation.register_cpp_op(op, constant_op)
Beispiel #2
0
def test_reduce():

    float_element_type = Type.f32

    AddParam1 = Parameter(float_element_type, Shape([]))
    AddParam2 = Parameter(float_element_type, Shape([]))
    constant_op = Constant(float_element_type, Shape([]), [0.])
    reduce_function = Function(NodeVector([Add(AddParam1, AddParam2)]),
                               [AddParam1, AddParam2], 'add')

    A = Parameter(float_element_type, Shape([2, 2, 2]))
    parameter_list = [A]

    function = Function(
        NodeVector([Reduce(A, constant_op, reduce_function, AxisSet({0}))]),
        parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(float_element_type, Shape([2, 2, 2]))
    result = backend.make_primary_tensor_view(float_element_type, Shape([2,
                                                                         2]))

    a.write(util.numpy_to_c(np.arange(8, dtype=np.float32).reshape(2, 2, 2)),
            0, 32)

    result_arr = np.zeros((2, 2), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 16)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 16)

    a_arr = np.arange(8).reshape(2, 2, 2)
    result_arr_ref = np.add.reduce(a_arr)

    assert np.allclose(result_arr, result_arr_ref)
 def visit(self, op, input):
     self.computation.set_op_rank(op)
     input_axes = list(input.axes.lengths)
     constant_op = Constant(Type.f32, Shape(input_axes), [1])
     ngraph_cpp_reciprocal_op = constant_op \
         / self.computation.lookup_cpp_op(input)
     self.computation.register_cpp_op(op, ngraph_cpp_reciprocal_op)
Beispiel #4
0
def make_constant_node(value, dtype=None):  # type: (NumericData, NumericType) -> Constant
    """Return an ngraph Constant node with the specified value."""
    ndarray = get_ndarray(value)
    if dtype:
        element_type = get_element_type(dtype)
    else:
        element_type = get_element_type(ndarray.dtype)

    return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist())
Beispiel #5
0
def test_constant():
    element_type = Type.f32
    parameter_list = []
    function = Function([Constant(element_type, Shape([3, 3]), list(range(9)))], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation()[0]

    expected = np.arange(9).reshape(3, 3)
    assert np.allclose(result, expected)
Beispiel #6
0
def make_scalar_constant(elem_type, scalar, shape=None, axis_set=None):
    # type: (int, float, List[int], Set[int]) -> float
    """Create a Constant node for scalar value."""
    if shape is None:
        shape = Shape([])
    if axis_set is None:
        axis_set = AxisSet(set())
    scalar_shape = Shape([])  # type: List[int]
    constant_op = Constant(elem_type, scalar_shape, [scalar])
    constant_broadcast = Broadcast(constant_op, shape, axis_set)
    return constant_broadcast
    def visit(self, op):
        # Can be visited in the most trivial computation we only a variable is created
        if not self.computation.has_cpp_op(op):
            if op.is_constant:
                # FIXME: make tensors based on data type
                constant_op = Constant(Type.f32, Shape(list(op.axes.lengths)),
                                       list(self.flatten(op.const.tolist())))

                self.computation.register_cpp_op(op, constant_op)
            else:
                op_element_type = Parameter(Type.f32,
                                            Shape(list(op.axes.lengths)))
                self.computation.register_cpp_op(op, op_element_type)
                if not op.is_placeholder:
                    self.computation.neon_variable_list.append(op)
 def visit(self, op):
     self.computation.set_op_rank(op)
     tensor = op.tensor
     if not self.computation.has_cpp_op(op):
         if tensor.is_constant:
             # FIXME: make tensors based on data type
             constant_op = Constant(
                 Type.f32, Shape(list(tensor.axes.lengths)),
                 list(self.flatten(tensor.const.tolist())))
             self.computation.register_cpp_op(tensor, constant_op)
         else:
             op_element_type = Parameter(Type.f32,
                                         Shape(list(tensor.axes.lengths)))
             self.computation.register_cpp_op(tensor, op_element_type)
             if not tensor.is_placeholder:
                 self.computation.neon_variable_list.append(tensor)
Beispiel #9
0
def test_constant():

    element_type = Type.f32
    parameter_list = []
    function = Function(NodeVector([Constant(element_type, Shape([3, 3]), list(range(9)))]),
                        parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    result = backend.create_tensor(element_type, Shape([3, 3]))

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    backend.call(backend.compile(function), [result], [])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    result_arr_ref = np.arange(9).reshape(3, 3)

    assert np.allclose(result_arr, result_arr_ref)
Beispiel #10
0
def test_constant():

    element_type = Type.f32
    parameter_list = []
    function = Function(NodeVector([Constant(element_type, Shape([3, 3]), list(range(9)))]),
                        parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    result = backend.make_primary_tensor_view(element_type, Shape([3, 3]))

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    cf.call([], [result])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    result_arr_ref = np.arange(9).reshape(3, 3)

    assert np.allclose(result_arr, result_arr_ref)
 def visit(self, op, tensor):
     self.computation.set_op_rank(op)
     variable = tensor.tensor
     list_size = 1
     for x in list(tensor.axes.lengths):
         list_size *= x
     constant_list = [op.scalar] * list_size
     ngraph_constant_op = Constant(Type.f32,
                                   Shape(list(tensor.axes.lengths)),
                                   constant_list)
     if variable not in self.computation.variables_cpp_op:
         # treat 'op' as the rhs of assignment for forwarding and lookup purposes
         self.computation.variables_cpp_op[variable] = \
             (self.computation.scopemark[op.tensor], op)
         self.computation.register_cpp_op(op,
                                          ngraph_constant_op,
                                          set_name=False)
     else:
         raise RuntimeError("Variable updated more than once!")
Beispiel #12
0
def test_constant():

    element_type = Type.f32
    parameter_list = []
    function = Function(
        [Constant(element_type, Shape([3, 3]), list(range(9)))],
        parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    result = backend.create_tensor(element_type, Shape([3, 3]))

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 36)
    handle = backend.compile(function)
    handle.call([result], [])
    result.read(util.numpy_to_c(result_arr), 36)

    result_arr_ref = np.arange(9).reshape(3, 3)

    assert np.allclose(result_arr, result_arr_ref)