Esempio n. 1
0
 def __init__(self, arr, ctx=None):
     if isinstance(arr, _np.ndarray):
         ctx = ctx if ctx else _nd.cpu(0)
         self.__init_handle_by_constructor__(_vmobj.Tensor,
                                             _nd.array(arr, ctx=ctx))
     elif isinstance(arr, _nd.NDArray):
         self.__init_handle_by_constructor__(_vmobj.Tensor, arr)
     else:
         raise RuntimeError("Unsupported type for tensor object.")
Esempio n. 2
0
def test_sparse_array_tuple():
    dtype, itype = 'float32', 'int32'
    stype = 'csr'
    target = 'llvm'
    ctx = tvm.context(target, 0)
    nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n')
    A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype)
    assert (A.stype == 'csr')
    C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter')
    s = tvm.create_schedule(C.op)
    _nr, _nc = 3, 5
    a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - .6, 0.)
    # convert to sparse array tuple
    source_array = a
    ridx, cidx = np.nonzero(source_array)
    data = source_array[ridx, cidx]
    a_data = _nd.array(data, ctx)
    indices = np.nonzero(source_array)[1].astype(itype)
    a_indices = _nd.array(indices, ctx)
    indptr = [0] + np.apply_along_axis(
        np.count_nonzero, axis=1, arr=source_array).tolist()
    indptr = np.cumsum(np.array(indptr, itype)).astype(itype)
    a_indptr = _nd.array(indptr, ctx)
    a_init = (a_data, a_indices, a_indptr)
    # construct tvm sparse array with tuple
    a = tvmsp.array(a_init, shape=source_array.shape, ctx=ctx)
    assert a.data.dtype == a.dtype
    Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr'])
    Ab.data = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_data')
    Ab.indices = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_indices')
    binds = {A.data: Ab.data, A.indices: Ab.indices}
    f = tvm.build(s, [nr, A.data, C], target, binds=binds)
    c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx)
    c.data = tvm.nd.empty(a.data.shape, dtype)
    c.indices = a.indices
    c.indptr = a.indptr
    f(a.data.shape[0], a.data, c.data)
    np.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
Esempio n. 3
0
    def _run_debug(self):
        """Execute the node specified with index will be executed.
        Each debug output will be copied to the buffer
        Time consumed for each execution will be set as debug output.

        """
        self.debug_datum._time_list = [[float(t) * 1e-6]
                                       for t in self.run_individual(10, 1, 1)]
        for i, node in enumerate(self.debug_datum.get_graph_nodes()):
            num_outputs = self.debug_datum.get_graph_node_output_num(node)
            for j in range(num_outputs):
                out_tensor = self._get_output_by_layer(i, j)
                out_tensor = array(out_tensor)
                self.debug_datum._output_tensor_list.append(out_tensor)
Esempio n. 4
0
def test_sparse_array_tuple():
    dtype, itype = 'float32', 'int32'
    stype = 'csr'
    target = 'llvm'
    ctx = tvm.context(target, 0)
    nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n')
    A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype)
    assert(A.stype == 'csr')
    C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter')
    s = tvm.create_schedule(C.op)
    _nr, _nc = 3, 5
    a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype)-.6, 0.)
    # convert to sparse array tuple
    source_array = a
    ridx, cidx = np.nonzero(source_array)
    data = source_array[ridx, cidx]
    a_data = _nd.array(data, ctx)
    indices = np.nonzero(source_array)[1].astype(itype)
    a_indices = _nd.array(indices, ctx)
    indptr = [0]+np.apply_along_axis(np.count_nonzero, axis=1, arr=source_array).tolist()
    indptr = np.cumsum(np.array(indptr, itype)).astype(itype)
    a_indptr = _nd.array(indptr, ctx)
    a_init = (a_data, a_indices, a_indptr)
    # construct tvm sparse array with tuple
    a = tvmsp.array(a_init, shape=source_array.shape, ctx=ctx)
    assert a.data.dtype == a.dtype
    Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr'])
    Ab.data = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_data')
    Ab.indices = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_indices')
    binds = {A.data: Ab.data, A.indices: Ab.indices}
    f = tvm.build(s, [nr, A.data, C], target, binds=binds)
    c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx)
    c.data = tvm.nd.empty(a.data.shape, dtype)
    c.indices = a.indices
    c.indptr = a.indptr
    f(a.data.shape[0], a.data, c.data)
    tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
Esempio n. 5
0
    def set_params(self, params):
        """Set constant parameters for the model.

        Parameters
        ----------
        params : dict of str to NDArray
            Input parameters to the graph that do not change
            during inference time. Used for constant folding.
        """
        inputs = {}
        for name, param in params.items():
            if isinstance(param, np.ndarray):
                param = _nd.array(param)
            inputs[name] = _expr.const(param)
        self._set_params_func(inputs)
Esempio n. 6
0
    def _run_debug(self):
        """Execute the node specified with index will be executed.
        Each debug output will be copied to the buffer
        Time consumed for each execution will be set as debug output.

        """
        self.debug_datum._time_list = []

        for i, node in enumerate(self.debug_datum.get_graph_nodes()):
            start_time = datetime.now().time()
            time_stamp = self._debug_run(i)
            end_time = datetime.now().time()
            self.debug_datum._time_list.append([time_stamp, start_time, end_time])
            num_outputs = self.debug_datum.get_graph_node_output_num(node)
            for j in range(num_outputs):
                out_tensor = self._get_output_by_layer(i, j)
                out_tensor = array(out_tensor)
                self.debug_datum._output_tensor_list.append(out_tensor)
Esempio n. 7
0
    def _run_debug(self):
        """Execute the node spcified with index will be executed.
        Each debug output will be copied to the buffer
        Time consumed for each execuion will be set as debug output.

        """

        for i, node in enumerate(self.debug_datum.get_graph_nodes()):
            start_time = datetime.now().time()
            time_stamp = self._debug_run(i)
            end_time = datetime.now().time()
            self.debug_datum._time_list.append(
                [time_stamp, start_time, end_time])
            num_outputs = self.debug_datum.get_graph_node_output_num(node)
            for j in range(num_outputs):
                out_tensor = self._get_output_by_layer(i, j)
                out_tensor = array(out_tensor)
                self.debug_datum._output_tensor_list.append(out_tensor)
Esempio n. 8
0
def tensor_object(arr, ctx=_nd.cpu(0)):
    """Create a tensor object from source arr.

    Parameters
    ----------
    arr : numpy.ndarray or tvm.nd.NDArray
        The source array.

    ctx :  TVMContext, optional
        The device context to create the array

    Returns
    -------
    ret : TensorObject
        The created object.
    """
    if isinstance(arr, _np.ndarray):
        tensor = _vmobj.Tensor(_nd.array(arr, ctx))
    elif isinstance(arr, _nd.NDArray):
        tensor = _vmobj.Tensor(arr)
    else:
        raise RuntimeError("Unsupported type for tensor object.")
    return tensor