示例#1
0
def test_sparse(ctx, inc, rng, allclose):
    scipy_sparse = pytest.importorskip("scipy.sparse")

    # -- prepare initial conditions on host
    if 0:  # pylint: disable=using-constant-test
        # diagonal matrix
        shape = (32, 32)
        s = min(shape[0], shape[1])
        data = list(range(s))
        ii = list(range(s))
        jj = list(range(s))[::-1]
        A = scipy_sparse.coo_matrix((data, (ii, jj)), shape=shape).tocsr()
        X = RA([np.arange(1, shape[1] + 1)])
        Y = RA([np.arange(1, shape[0] + 1)])
    else:
        # random sparse matrix
        shape = (500, 500)
        sparsity = 0.002
        mask = rng.uniform(size=shape) < sparsity
        ii, jj = mask.nonzero()
        assert len(ii) > 0
        data = rng.uniform(-1, 1, size=len(ii))
        A = scipy_sparse.coo_matrix((data, (ii, jj)), shape=shape).tocsr()
        X = RA([rng.uniform(-1, 1, size=shape[1])])
        Y = RA([rng.uniform(-1, 1, size=shape[0])])

    # -- prepare initial conditions on device
    queue = cl.CommandQueue(ctx)
    A_data = to_device(queue, A.data.astype(np.float32))
    A_indices = to_device(queue, A.indices.astype(np.int32))
    A_indptr = to_device(queue, A.indptr.astype(np.int32))
    clX = CLRA(queue, X)
    clY = CLRA(queue, Y)
    assert allclose(X, clX)
    assert allclose(Y, clY)

    # -- run cl computation
    plan = plan_sparse_dot_inc(queue,
                               A_indices,
                               A_indptr,
                               A_data,
                               clX,
                               clY,
                               inc=inc)
    plan()

    # -- ensure they match
    ref = (Y[0] if inc else 0) + A.dot(X[0])
    sim = clY[0]
    assert allclose(ref, sim, atol=1e-7)
def test_reset(ctx, rng):
    # Yshapes = [(100,), (10, 17), (3, 3)]
    Yshapes = [(1000000, ), (1000, 1700), (3, 3)]
    values = rng.uniform(size=len(Yshapes)).astype(np.float32)

    queue = cl.CommandQueue(ctx)
    clY = CLRA(queue, RA([np.zeros(shape) for shape in Yshapes]))
    clvalues = to_device(queue, values)

    plan = plan_reset(queue, clY, clvalues)
    with Timer() as t:
        plan()

    print(t.duration)

    # with Timer() as t:
    #     for i in range(len(clY)):
    #         cl.enqueue_fill_buffer(
    #             queue, clY.cl_buf.data, values[i],
    #             clY.starts[i], clY.shape0s[i] * clY.shape1s[i])
    #     queue.finish()

    # print(t.duration)

    for y, v in zip(clY, values):
        assert np.all(y == v)
示例#3
0
def init_rng(queue, seed):

    work_items = queue.device.max_work_group_size
    ranluxcltab = to_device(queue, np.zeros(28 * work_items, dtype='int32'))

    text = """
        #include "pyopencl-ranluxcl.cl"

        ////////// MAIN FUNCTION //////////
        __kernel void init_rng(
            uint ins,
            __global ranluxcl_state_t *ranluxcltab
        )
        {
            ranluxcl_initialization(ins, ranluxcltab);
        }
        """

    textconf = dict()
    text = as_ascii(Template(text, output_encoding='ascii').render(**textconf))

    kernel = cl.Program(queue.context, text).build().init_rng
    gsize = (work_items,)
    lsize = None
    kernel(queue, gsize, lsize, np.uint32(seed), ranluxcltab.data)
    queue.finish()

    return ranluxcltab
def test_reset(rng):
    # Yshapes = [(100,), (10, 17), (3, 3)]
    Yshapes = [(1000000,), (1000, 1700), (3, 3)]
    values = rng.uniform(size=len(Yshapes)).astype(np.float32)

    queue = cl.CommandQueue(ctx)
    clY = CLRA(queue, RA([np.zeros(shape) for shape in Yshapes]))
    clvalues = to_device(queue, values)

    plan = plan_reset(queue, clY, clvalues)
    with Timer() as t:
        plan()

    print(t.duration)

    # with Timer() as t:
    #     for i in range(len(clY)):
    #         cl.enqueue_fill_buffer(
    #             queue, clY.cl_buf.data, values[i],
    #             clY.starts[i], clY.shape0s[i] * clY.shape1s[i])
    #     queue.finish()

    # print(t.duration)

    for y, v in zip(clY, values):
        assert np.all(y == v)
示例#5
0
    def cl_geometry_and_textconf(self, items, padding=4):
        p = self
        max_n_dots = max(len(p.geometry[ii]['dots']) for ii in items)
        n_structure_vars = 4 * max_n_dots + 5
        structure_vars_stride = int(padding *
                                    np.ceil(float(n_structure_vars) / padding))
        gstructure = np.zeros((len(items), structure_vars_stride),
                              dtype='int32')
        A_starts = p.A.starts
        X_starts = p.X.starts
        Y_starts = p.Y.starts
        Y_in_starts = p.Y_in.starts
        A_stride0s = p.A.stride0s
        A_shape1s = p.A.shape1s
        Y_shape0s = p.Y.shape0s

        for bbi, bb in enumerate(items):
            x_js_i = p.X_js[bb]
            A_js_i = p.A_js[bb]
            assert len(x_js_i) == len(A_js_i)
            for ii, (xi, ai) in enumerate(zip(x_js_i, A_js_i)):
                assert xi.size == 1 and ai.size == 1
                xi, ai = xi[0], ai[0]  # to ignore numpy DeprecationWarning
                gstructure[bbi, 0 * max_n_dots + ii] = X_starts[xi]
                gstructure[bbi, 1 * max_n_dots + ii] = A_starts[ai]
                gstructure[bbi, 2 * max_n_dots + ii] = A_stride0s[ai]
                gstructure[bbi, 3 * max_n_dots + ii] = A_shape1s[ai]
            # -- offset of output and input buffers
            gstructure[bbi, 4 * max_n_dots + 0] = Y_in_starts[bb]
            gstructure[bbi, 4 * max_n_dots + 1] = Y_starts[bb]
            # -- number of dots for bb
            gstructure[bbi, 4 * max_n_dots + 2] = len(A_js_i)
            # -- length of Y[bb]
            gstructure[bbi, 4 * max_n_dots + 3] = Y_shape0s[bb]
            gstructure[bbi, 4 * max_n_dots + 4] = bb
        cl_gstructure = to_device(p.queue, gstructure)

        textconf = {
            'n_structure_vars': n_structure_vars,
            'structure_vars_stride': structure_vars_stride,
            'x_starts': 'lstructure[0 * %s + ii]' % max_n_dots,
            'a_starts': 'lstructure[1 * %s + ii]' % max_n_dots,
            'a_s0': 'lstructure[2 * %s + ii]' % max_n_dots,
            'N_i': 'lstructure[3 * %s + ii]' % max_n_dots,
            'y_in_starts': 'lstructure[4 * %s + 0]' % max_n_dots,
            'y_offset': 'lstructure[4 * %s + 1]' % max_n_dots,
            'n_dot_products': 'lstructure[4 * %s + 2]' % max_n_dots,
            'y_len': 'lstructure[4 * %s + 3]' % max_n_dots,
            'bb': 'lstructure[4 * %s + 4]' % max_n_dots,
        }
        return cl_gstructure, textconf
示例#6
0
    def cl_geometry_and_textconf(self, items, padding=4):
        p = self
        max_n_dots = max(len(p.geometry[ii]['dots']) for ii in items)
        n_structure_vars = 4 * max_n_dots + 5
        structure_vars_stride = int(
            padding * np.ceil(float(n_structure_vars) / padding))
        gstructure = np.zeros(
            (len(items), structure_vars_stride), dtype='int32')
        A_starts = p.A.starts
        X_starts = p.X.starts
        Y_starts = p.Y.starts
        Y_in_starts = p.Y_in.starts
        A_stride0s = p.A.stride0s
        A_shape1s = p.A.shape1s
        Y_shape0s = p.Y.shape0s

        for bbi, bb in enumerate(items):
            x_js_i = p.X_js[bb]
            A_js_i = p.A_js[bb]
            assert len(x_js_i) == len(A_js_i)
            for ii, (xi, ai) in enumerate(zip(x_js_i, A_js_i)):
                assert xi.size == 1 and ai.size == 1
                xi, ai = xi[0], ai[0]  # to ignore numpy DeprecationWarning
                gstructure[bbi, 0 * max_n_dots + ii] = X_starts[xi]
                gstructure[bbi, 1 * max_n_dots + ii] = A_starts[ai]
                gstructure[bbi, 2 * max_n_dots + ii] = A_stride0s[ai]
                gstructure[bbi, 3 * max_n_dots + ii] = A_shape1s[ai]
            # -- offset of output and input buffers
            gstructure[bbi, 4 * max_n_dots + 0] = Y_in_starts[bb]
            gstructure[bbi, 4 * max_n_dots + 1] = Y_starts[bb]
            # -- number of dots for bb
            gstructure[bbi, 4 * max_n_dots + 2] = len(A_js_i)
            # -- length of Y[bb]
            gstructure[bbi, 4 * max_n_dots + 3] = Y_shape0s[bb]
            gstructure[bbi, 4 * max_n_dots + 4] = bb
        cl_gstructure = to_device(p.queue, gstructure)

        textconf = {
            'n_structure_vars': n_structure_vars,
            'structure_vars_stride': structure_vars_stride,
            'x_starts': 'lstructure[0 * %s + ii]' % max_n_dots,
            'a_starts': 'lstructure[1 * %s + ii]' % max_n_dots,
            'a_s0': 'lstructure[2 * %s + ii]' % max_n_dots,
            'N_i': 'lstructure[3 * %s + ii]' % max_n_dots,
            'y_in_starts': 'lstructure[4 * %s + 0]' % max_n_dots,
            'y_offset': 'lstructure[4 * %s + 1]' % max_n_dots,
            'n_dot_products': 'lstructure[4 * %s + 2]' % max_n_dots,
            'y_len': 'lstructure[4 * %s + 3]' % max_n_dots,
            'bb': 'lstructure[4 * %s + 4]' % max_n_dots,
        }
        return cl_gstructure, textconf
示例#7
0
def float_cl_clra(queue, arg, cl_dtype, N):
    float_arg = None
    cl_arg = None
    clra_arg = None
    if isinstance(arg, CLRaggedArray):
        clra_arg = arg
        assert arg.dtype == cl_dtype
    elif isinstance(arg, float):
        float_arg = arg
    elif len(set(arg)) == 1:
        float_arg = arg[0]
    else:
        host_arg = np.asarray(arg, cl_dtype)
        assert host_arg.shape == (N, )
        cl_arg = to_device(queue, host_arg)
    return float_arg, cl_arg, clra_arg
示例#8
0
def float_cl_clra(queue, arg, cl_dtype, N):
    float_arg = None
    cl_arg = None
    clra_arg = None
    if isinstance(arg, CLRaggedArray):
        clra_arg = arg
        assert arg.dtype == cl_dtype
    elif isinstance(arg, float):
        float_arg = arg
    elif len(set(arg)) == 1:
        float_arg = arg[0]
    else:
        host_arg = np.asarray(arg, cl_dtype)
        assert host_arg.shape == (N,)
        cl_arg = to_device(queue, host_arg)
    return float_arg, cl_arg, clra_arg
示例#9
0
def _plan_template(  # noqa: C901
    queue,
    name,
    core_text,
    declares="",
    tag=None,
    blockify=True,
    inputs=None,
    outputs=None,
    parameters=None,
):
    """Template for making a plan for vector nonlinearities.
    This template assumes that all inputs and outputs are vectors.
    Parameters
    ----------
    blockify : bool
        If true, divide the inputs up into blocks with a maximum size.
    inputs: dictionary of CLRaggedArrays
        Inputs to the function. RaggedArrays must be a list of vectors.
    outputs: dictionary of CLRaggedArrays
        Outputs of the function. RaggedArrays must be a list of vectors.
    parameters: dictionary of CLRaggedArrays
        Parameters to the function. Each RaggedArray element must be a vector
        of the same length of the inputs, or a scalar (to be broadcasted).
        Providing a float instead of a RaggedArray makes that parameter
        constant.
    """
    inputs = {} if inputs is None else inputs
    outputs = {} if outputs is None else outputs
    parameters = {} if parameters is None else parameters

    input0 = list(inputs.values())[0]  # input to use as reference for lengths

    # split parameters into static and updated params
    static_params = {}  # static params (hard-coded)
    params = {}  # variable params (updated)
    for k, v in parameters.items():
        if isinstance(v, CLRaggedArray):
            params[k] = v
        elif is_number(v):
            static_params[k] = ("float", float(v))
        else:
            raise ValueError(
                "Parameter %r must be CLRaggedArray or float (got %s)" %
                (k, type(v)))

    avars = {}
    bw_per_call = 0
    for vname, v in list(inputs.items()) + list(outputs.items()) + list(
            params.items()):
        assert vname not in avars, "Name clash"
        assert len(v) == len(input0)
        assert (v.shape0s == input0.shape0s).all()
        assert (v.stride0s == v.shape1s).all()  # rows contiguous
        assert (v.stride1s == 1).all()  # columns contiguous
        assert (v.shape1s == 1).all()  # vectors only

        offset = "%(name)s_starts[gind1]" % {"name": vname}
        avars[vname] = (v.ctype, offset)
        bw_per_call += v.nbytes

    ivars = {k: avars[k] for k in inputs}
    ovars = {k: avars[k] for k in outputs}
    pvars = {k: avars[k] for k in params}

    fn_name = str(name)
    textconf = dict(
        fn_name=fn_name,
        declares=declares,
        core_text=core_text,
        ivars=ivars,
        ovars=ovars,
        pvars=pvars,
        static_params=static_params,
    )

    text = """
    ////////// MAIN FUNCTION //////////
    __kernel void ${fn_name}(
% for name, [type, offset] in ivars.items():
        __global const int *${name}_starts,
        __global const ${type} *${name}_buf,
% endfor
% for name, [type, offset] in ovars.items():
        __global const int *${name}_starts,
        __global ${type} *${name}_buf,
% endfor
% for name, [type, offset] in pvars.items():
        __global const int *${name}_starts,
        __global const int *${name}_shape0s,
        __global const ${type} *${name}_buf,
% endfor
        __global const int *sizes
    )
    {
        const int gind0 = get_global_id(0);
        const int gind1 = get_global_id(1);
        if (gind1 >= ${N} || gind0 >= sizes[gind1])
            return;
% for name, [type, offset] in ivars.items():
        ${type} ${name} = ${name}_buf[${offset} + gind0];
% endfor
% for name, [type, offset] in ovars.items():
        ${type} ${name};
% endfor
% for name, [type, offset] in pvars.items():
        const ${type} ${name} = ${name}_buf[${offset} + gind0];
% endfor
% for name, [type, value] in static_params.items():
        const ${type} ${name} = ${value};
% endfor
        //////////////////////////////////////////////////
        //vvvvv USER DECLARATIONS BELOW vvvvv
        ${declares}
        //^^^^^ USER DECLARATIONS ABOVE ^^^^^
        //////////////////////////////////////////////////
        /////vvvvv USER COMPUTATIONS BELOW vvvvv
        ${core_text}
        /////^^^^^ USER COMPUTATIONS ABOVE ^^^^^
% for name, [type, offset] in ovars.items():
        ${name}_buf[${offset} + gind0] = ${name};
% endfor
    }
    """

    if blockify:
        # blockify to help with heterogeneous sizes

        # find best block size
        block_sizes = [16, 32, 64, 128, 256, 512, 1024]
        N = np.inf
        for block_size_i in block_sizes:
            sizes_i, inds_i, _ = blockify_vector(block_size_i, input0)
            if len(sizes_i) < N:
                N = len(sizes_i)
                block_size = block_size_i
                sizes = sizes_i
                inds = inds_i

        clsizes = to_device(queue, sizes)
        get_starts = lambda ras: [
            to_device(queue, starts)
            for starts in blockify_vectors(block_size, ras)[2]
        ]
        Istarts = get_starts(inputs.values())
        Ostarts = get_starts(outputs.values())
        Pstarts = get_starts(params.values())
        Pshape0s = [to_device(queue, x.shape0s[inds]) for x in params.values()]

        lsize = None
        gsize = (block_size, len(sizes))

        full_args = []
        for vstarts, v in zip(Istarts, inputs.values()):
            full_args.extend([vstarts, v.cl_buf])
        for vstarts, v in zip(Ostarts, outputs.values()):
            full_args.extend([vstarts, v.cl_buf])
        for vstarts, vshape0s, v in zip(Pstarts, Pshape0s, params.values()):
            full_args.extend([vstarts, vshape0s, v.cl_buf])
        full_args.append(clsizes)
    else:
        # Allocate more than enough kernels in a matrix
        lsize = None
        gsize = (input0.shape0s.max(), len(input0))

        full_args = []
        for v in inputs.values():
            full_args.extend([v.cl_starts, v.cl_buf])
        for v in outputs.values():
            full_args.extend([v.cl_starts, v.cl_buf])
        for vname, v in params.items():
            full_args.extend([v.cl_starts, v.cl_shape0s, v.cl_buf])
        full_args.append(input0.cl_shape0s)

    textconf["N"] = gsize[1]
    text = as_ascii(Template(text, output_encoding="ascii").render(**textconf))
    fns = cl.Program(queue.context, text).build()
    _fn = getattr(fns, fn_name)
    _fn.set_args(*[arr.data for arr in full_args])

    plan = Plan(queue, _fn, gsize, lsize=lsize, name=name, tag=tag)
    plan.full_args = tuple(full_args)  # prevent garbage-collection
    plan.bw_per_call = bw_per_call
    plan.description = "groups: %d; items: %d; items/group: %0.1f [%d, %d]" % (
        gsize[1],
        input0.sizes.sum(),
        input0.sizes.mean(),
        input0.sizes.min(),
        input0.sizes.max(),
    )
    return plan
示例#10
0
def block_impl(p, items):

    assert p.float_alpha == 1.0
    assert p.float_beta == 1.0
    assert p.float_gamma == 0.0

    if p.clra_alpha is not None:
        raise NotImplementedError()
    if p.clra_gamma is not None:
        raise NotImplementedError()
    if p.clra_beta is not None:
        raise NotImplementedError()
    if p.cl_alpha is not None:
        raise NotImplementedError()
    if p.cl_gamma is not None:
        raise NotImplementedError()
    if not all(s == 1 for s in p.A.stride1s):
        raise NotImplementedError()

    if p.A_js is None:
        # -- easy probably, but not done
        raise NotImplementedError()

    # --- blocking
    # We want to group the dot products into blocks, so that each workgroup
    # is computing a (block_y, block_x) region of a dot product. To do this,
    # we create a temporary output buffer, compute each block to a separate
    # region of this buffer, then reduce across the buffer in a separate kernel

    # block_y = 8
    block_y = 32
    # block_x = 32
    block_x = 128

    shape0s = []
    shape1s = []
    Astride0s = []
    Astride1s = []
    Astarts = []
    Xstride0s = []
    Xstarts = []
    Ybufstarts = []
    Ybufstart = 0

    Yshape0s_reduce = []
    Yinstride0s_reduce = []
    Yinstarts_reduce = []
    Ystride0s_reduce = []
    Ystarts_reduce = []
    Ybufinds_reduce = []
    bw_reduce = 0

    for n in items:
        assert p.Y_in.shape0s[n] == p.Y.shape0s[n]
        shape0n = p.Y.shape0s[n]

        for i in range(0, shape0n, block_y):
            shape0i = min(shape0n - i, block_y)

            Ybufind_reduce = []

            # loop over dot products outputting to same Y
            n_dots = len(p.A_js[n])
            assert len(p.A_js[n]) == len(p.X_js[n])
            for aj, xj in zip(p.A_js[n], p.X_js[n]):
                assert aj.size == 1 and xj.size == 1
                aj, xj = aj[0], xj[0]  # to ignore numpy DeprecationWarning

                assert p.A.shape0s[aj] == shape0n
                assert p.A.shape1s[aj] == p.X.shape0s[xj]
                assert p.X.shape1s[xj] == 1
                shape1n = p.A.shape1s[aj]

                for j in range(0, shape1n, block_x):
                    shape0s.append(shape0i)
                    shape1s.append(min(shape1n - j, block_x))
                    Astride0s.append(p.A.stride0s[aj])
                    Astride1s.append(p.A.stride1s[aj])
                    Astarts.append(p.A.starts[aj] + i * p.A.stride0s[aj] +
                                   j * p.A.stride1s[aj])
                    Xstride0s.append(p.X.stride0s[xj])
                    Xstarts.append(p.X.starts[xj] + j * p.X.stride0s[xj])

                    Ybufstarts.append(Ybufstart)
                    Ybufind_reduce.append(Ybufstart)
                    # Ybufstart += shape0s[-1]
                    Ybufstart += block_y  # keep good offset

            # --- Y-blocking for reduce
            Yshape0s_reduce.append(shape0i)
            Yinstride0s_reduce.append(p.Y_in.stride0s[n])
            Yinstarts_reduce.append(p.Y_in.starts[n] + i * p.Y_in.stride0s[n])
            Ystride0s_reduce.append(p.Y.stride0s[n])
            Ystarts_reduce.append(p.Y.starts[n] + i * p.Y.stride0s[n])
            Ybufinds_reduce.append(Ybufind_reduce)
            bw_reduce += shape0i * (len(Ybufind_reduce) +
                                    1) * p.Y.dtype.itemsize

    # --- create structure
    gstructure = np.column_stack([
        shape0s, shape1s, Astride0s, Astride1s, Astarts, Xstride0s, Xstarts,
        Ybufstarts
    ])
    cl_gstructure = to_device(p.queue, gstructure.astype(np.int32))

    # --- create Y buffer
    clYbuf = to_device(p.queue, np.zeros(Ybufstart, dtype=p.Y.dtype))

    lsize0 = 4
    # lsize0 = 8
    lsize0_log2 = int(np.log2(lsize0))
    assert 2**lsize0_log2 == lsize0

    lsize = (lsize0, block_y, 1)
    gsize = (lsize[0], lsize[1], gstructure.shape[0])
    assert np.prod(lsize) >= block_x

    textconf = dict(
        A=p.A,
        X=p.X,
        Ybuf=clYbuf,
        n_structure_vars=gstructure.shape[1],
        shape0='lstructure[0]',
        shape1='lstructure[1]',
        Astride0='lstructure[2]',
        Astride1='lstructure[3]',
        Astart='lstructure[4]',
        Xstride0='lstructure[5]',
        Xstart='lstructure[6]',
        Ybufstart='lstructure[7]',
        block_y=block_y,
        block_x=block_x,
        lsize0=lsize0,
        lsize0_log2=lsize0_log2,
    )

    full_args = (
        cl_gstructure,
        p.A.cl_buf,
        p.X.cl_buf,
        clYbuf,
    )

    source = """
    __kernel void fn(
        __global const int *gstructure,
        __global const ${A.ctype} *Adata,
        __global const ${X.ctype} *Xdata,
        __global ${Ybuf.ctype} *Ybufdata
        )
    {
        const int j = get_global_id(0);
        const int i = get_global_id(1);
        const int n = get_global_id(2);

        // load structure
        __local int lstructure[${n_structure_vars}];
        const int local_idx = get_local_id(0) + get_local_id(1)*get_local_size(0);
        if (local_idx < ${n_structure_vars})
            lstructure[local_idx] = gstructure[
                n * ${n_structure_vars} + local_idx];
        barrier(CLK_LOCAL_MEM_FENCE);

        __global const ${X.ctype} *x = Xdata + ${Xstart};
        __global ${Ybuf.ctype} *ybuf = Ybufdata + ${Ybufstart};

        // load x into local memory
        __local ${X.ctype} xlocal[${block_x}];
        if (local_idx < ${shape1})
            xlocal[local_idx] = x[local_idx*${Xstride0}];
        barrier(CLK_LOCAL_MEM_FENCE);

        __local ${Ybuf.ctype} sums[${block_y}][${lsize0}];
        sums[i][j] = 0;

        if (i < ${shape0}) {
            __global const ${A.ctype} *Ai = Adata + ${Astart} + i*${Astride0};
            for(int jj = j; jj < ${shape1}; jj += get_global_size(0)) {
                sums[i][j] += Ai[jj*${Astride1}] * xlocal[jj];
            }
        }
        barrier(CLK_LOCAL_MEM_FENCE);

    % for k in range(lsize0_log2 - 1, 0, -1):
        if (j < ${2**k})
            sums[i][j] += sums[i][${2**k} + j];
        barrier(CLK_LOCAL_MEM_FENCE);
    % endfor
        if (i < ${shape0} && j == 0)
            ybuf[i] = sums[i][0] + sums[i][1];
    }
    """

    source = Template(source, output_encoding='ascii').render(**textconf)
    kernel = cl.Program(p.queue.context, source).build().fn
    kernel.set_args(*[arr.data for arr in full_args])

    plan = Plan(
        p.queue,
        kernel,
        gsize,
        lsize,
        name='clra_gemv.block_impl',
        tag=p.tag,
        bw_per_call=bw_from_geometry(p.geometry, items),
        flops_per_call=flops_from_geometry(p.geometry, items),
    )
    plan.full_args = full_args  # prevent GC the args
    plan.description = p.geometry_summary(items)
    plan.Ybuf = clYbuf

    # --- Reduce kernel
    align = False

    Nreduce = len(Yshape0s_reduce)
    clYshape0s_reduce = to_device(p.queue,
                                  np.array(Yshape0s_reduce, dtype=np.int32))
    clYinstride0s_reduce = to_device(
        p.queue, np.array(Yinstride0s_reduce, dtype=np.int32))
    clYinstarts_reduce = to_device(p.queue,
                                   np.array(Yinstarts_reduce, dtype=np.int32))
    clYstride0s_reduce = to_device(p.queue,
                                   np.array(Ystride0s_reduce, dtype=np.int32))
    clYstarts_reduce = to_device(p.queue,
                                 np.array(Ystarts_reduce, dtype=np.int32))
    clYbufinds_reduce = CLRaggedArray.from_arrays(p.queue,
                                                  Ybufinds_reduce,
                                                  dtype=np.int32,
                                                  align=align)
    assert len(clYbufinds_reduce) == Nreduce
    assert (clYbufinds_reduce.shape1s == 1).all()

    textconf_reduce = dict(
        Ybuf=clYbuf,
        Yin=p.Y_in,
        Y=p.Y,
    )

    full_args_reduce = (
        clYshape0s_reduce,
        clYbufinds_reduce.cl_shape0s,
        clYbufinds_reduce.cl_starts,
        clYbufinds_reduce.cl_buf,
        clYbuf,
        clYinstride0s_reduce,
        clYinstarts_reduce,
        p.Y_in.cl_buf,
        clYstride0s_reduce,
        clYstarts_reduce,
        p.Y.cl_buf,
    )

    lsize_reduce = None
    gsize_reduce = (block_y, Nreduce)

    source_reduce = """
    __kernel void reduce(
        __global const int *shape0s,
        __global const int *Ishape0s,
        __global const int *Istarts,
        __global const int *Idata,
        __global ${Ybuf.ctype} *Ybufdata,
        __global const int *Yinstride0s,
        __global const int *Yinstarts,
        __global ${Yin.ctype} *Yindata,
        __global const int *Ystride0s,
        __global const int *Ystarts,
        __global ${Y.ctype} *Ydata
    )
    {
        const int i = get_global_id(0);
        const int n = get_global_id(1);
        if (i >= shape0s[n])
            return;

        const int Ishape0 = Ishape0s[n];

        __global const int *Ybufstart = Idata + Istarts[n];
        __global ${Yin.ctype} *yin = Yindata + Yinstarts[n];
        __global ${Y.ctype} *y = Ydata + Ystarts[n];

        ${Y.ctype} sum = yin[i*Yinstride0s[n]];
        for (int j = 0; j < Ishape0; j++) {
            sum += Ybufdata[Ybufstart[j] + i];
        }

        y[i*Ystride0s[n]] = sum;
    }
    """

    source_reduce = Template(source_reduce,
                             output_encoding='ascii').render(**textconf_reduce)
    kernel_reduce = cl.Program(p.queue.context, source_reduce).build().reduce
    kernel_reduce.set_args(*[arr.data for arr in full_args_reduce])

    plan_reduce = Plan(
        p.queue,
        kernel_reduce,
        gsize_reduce,
        lsize_reduce,
        name='clra_gemv.block_impl_reduce',
        tag=p.tag,
        bw_per_call=bw_reduce,
    )
    plan_reduce.full_args = full_args_reduce  # prevent GC the args
    # plan_reduce.description = p.geometry_summary(items)

    return [plan, plan_reduce]
示例#11
0
def ref_impl(p, items):
    """
    Return an OpenCL function to calculate elements `items` of
    gemv operation `p`.

    In this reference implementation, we create a work item
    per output number, or more specifically, a work grid
    of (max_y_len, len(items)).  Each work item loops over the
    dot products and the elements within each dot product to
    compute the output value Y[global_id(1)][global_id(0)].

    """

    if p.clra_alpha is not None:
        raise NotImplementedError()
    if p.clra_gamma is not None:
        raise NotImplementedError()
    cl_items = to_device(p.queue, np.asarray(items, dtype='int32'))
    if 0:
        if len(items) < 10:
            print('Falling back on reference implementation')
            p.print_geometry_summary(items, full=True)
        else:
            print('Falling back on reference implementation')
            p.print_geometry_summary(items)

    assert all(s == 1 for s in p.A.stride1s)
    assert all(s == 1 for s in p.X.stride1s)
    assert all(s == 1 for s in p.Y.stride0s)
    assert all(s == 1 for s in p.Y.stride1s)
    assert all(s == 1 for s in p.Y_in.stride0s)
    assert all(s == 1 for s in p.Y_in.stride1s)

    text = """
        __kernel void gemv_ref(
            __global int *items,
    % if cl_alpha is not None:
            __global ${cl_alpha.ctype} * alphas,
    % endif
    % if (A_js is not None):
            __global int *A_starts,
            __global int *A_shape1s,
            __global int *A_stride0s,
            __global ${A.cl_buf.ctype} *A_data,
            __global int *A_js_starts,
            __global int *A_js_shape0s,
            __global int *A_js_data,
            __global int *X_starts,
            __global int *X_stride0s,
            __global ${X.cl_buf.ctype} *X_data,
            __global int *X_js_starts,
            __global int *X_js_data,
    % endif
    % if cl_beta is not None:
            __global ${cl_beta.ctype} * betas,
    % endif
    % if clra_beta is not None:
            __global int *beta_starts,
            __global int *beta_data,
    % endif
    % if cl_gamma is not None:
            __global ${cl_gamma.ctype} * gammas,
    % endif
            __global int *Y_in_starts,
            __global ${Y_in.cl_buf.ctype} *Y_in_data,
            __global int *Y_starts,
            __global int *Y_shape0s,
            __global ${Y.cl_buf.ctype} *Y_data)
        {
            const int mm = get_global_id(0);
            const int bb = items[get_global_id(1)];
            const int M = Y_shape0s[bb];
            if (mm < M)
            {
                const int y_offset = Y_starts[bb];
                const int y_in_offset = Y_in_starts[bb];

    % if float_beta is not None:
                const ${Y.cl_buf.ctype} beta = ${float_beta};
    % elif cl_beta is not None:
                const ${cl_beta.ctype} beta = betas[bb];
    % elif clra_beta is not None:
                const int beta_offset = beta_starts[bb];
                const ${clra_beta.cl_buf.ctype} beta
                    = beta_data[beta_offset + mm];
    % endif

    % if float_gamma is not None:
                const ${Y.cl_buf.ctype} gamma = ${float_gamma};
    % elif cl_gamma is not None:
                const ${cl_gamma.ctype} gamma = gammas[bb];
    % endif

                Y_data[y_offset + mm] =
                    gamma + beta * Y_in_data[y_in_offset + mm];

    % if A_js is not None:
                const int n_dot_products = A_js_shape0s[bb];
                X_js_data += X_js_starts[bb];
                A_js_data += A_js_starts[bb];

                ${Y.cl_buf.ctype} y_sum = 0;
                for (int ii = 0; ii < n_dot_products; ++ii)
                {
                    const int x_ji = X_js_data[ii];
                    const int a_ji = A_js_data[ii];
                    const int N_i = A_shape1s[a_ji];
                    const int x_offset = X_starts[x_ji];
                    const int a_offset = A_starts[a_ji];
                    const int AsM = A_stride0s[a_ji];
                    const int XsM = X_stride0s[x_ji];

                    for (int nn = 0; nn < N_i; ++nn)
                    {
                        y_sum += X_data[x_offset + nn * XsM]
                                 * A_data[a_offset + mm * AsM + nn];
                    }
                }
        % if float_alpha is not None:
                Y_data[y_offset + mm] += ${float_alpha} * y_sum;
        % elif cl_alpha is not None:
                Y_data[y_offset + mm] += alphas[bb] * y_sum;
        % endif
    % endif
            }

        }
    """

    text = as_ascii(
        Template(text, output_encoding='ascii').render(**p.__dict__))

    gsize = (max(p.geometry[ii]['y_len'] for ii in items), len(items))
    lsize = None
    fn = cl.Program(p.queue.context, text).build().gemv_ref
    full_args = [cl_items]
    if p.cl_alpha is not None:
        full_args += [p.cl_alpha]
    if p.A_js is not None:
        full_args += [
            p.A.cl_starts,
            p.A.cl_shape1s,
            p.A.cl_stride0s,
            p.A.cl_buf,
            p.A_js.cl_starts,
            p.A_js.cl_shape0s,
            p.A_js.cl_buf,
            p.X.cl_starts,
            p.X.cl_stride0s,
            p.X.cl_buf,
            p.X_js.cl_starts,
            p.X_js.cl_buf,
        ]
    if p.cl_beta is not None:
        full_args += [p.cl_beta]
    elif p.clra_beta is not None:
        full_args += [p.clra_beta.cl_starts, p.clra_beta.cl_buf]

    if p.cl_gamma is not None:
        full_args += [p.cl_gamma]
    elif p.clra_gamma is not None:
        full_args += [p.clra_gamma.cl_starts, p.clra_gamma.cl_buf]

    full_args += [
        p.Y_in.cl_starts, p.Y_in.cl_buf, p.Y.cl_starts, p.Y.cl_shape0s,
        p.Y.cl_buf
    ]

    # print([str(arr.dtype)[0] for arr in full_args])
    fn.set_args(*[arr.data for arr in full_args])
    rval = Plan(p.queue,
                fn,
                gsize,
                lsize,
                name="clra_gemv.ref_impl",
                tag=p.tag,
                bw_per_call=bw_from_geometry(p.geometry, items),
                flops_per_call=flops_from_geometry(p.geometry, items))
    rval.full_args = full_args  # prevent GC the args
    return rval
示例#12
0
def plan_linear_synapse(queue, X, Y, A, B, Xbuf, Ybuf, tag=None):
    """
    Implements a filter of the form

        y[n+1] + a[0] y[n] + ... + a[i] y[n-i] = b[0] x[n] + ... + b[j] x[n-j]
    """
    N = len(X)
    assert len(Y) == N and len(A) == N and len(B) == N

    for arr in [X, Y, A, B, Xbuf, Ybuf]:
        assert (arr.shape1s == arr.stride0s).all()
        assert (arr.stride1s == 1).all()
    for arr in [X, Y, A, B]:  # vectors
        assert (arr.shape1s == 1).all()
    assert (X.shape0s == Y.shape0s).all()

    assert (B.shape0s >= 1).all()
    assert ((B.shape0s == 1) | (Xbuf.shape0s == B.shape0s)).all()
    assert (Xbuf.shape1s == X.shape0s).all()
    assert ((A.shape0s == 1) | (Ybuf.shape0s == A.shape0s)).all()
    assert (Ybuf.shape1s == Y.shape0s).all()

    assert X.ctype == Xbuf.ctype
    assert Y.ctype == Ybuf.ctype

    Xbufpos = to_device(queue, np.zeros(N, dtype='int32'))
    Ybufpos = to_device(queue, np.zeros(N, dtype='int32'))

    text = """
        ////////// MAIN FUNCTION //////////
        __kernel void linear_synapse(
            __global const int *shape0s,
            __global const int *Xstarts,
            __global const ${Xtype} *Xdata,
            __global const int *Ystarts,
            __global ${Ytype} *Ydata,
            __global const int *Ashape0s,
            __global const int *Astarts,
            __global const ${Atype} *Adata,
            __global const int *Bshape0s,
            __global const int *Bstarts,
            __global const ${Btype} *Bdata,
            __global const int *Xbufstarts,
            __global ${Xtype} *Xbufdata,
            __global const int *Ybufstarts,
            __global ${Ytype} *Ybufdata,
            __global int *Xbufpos,
            __global int *Ybufpos
        )
        {
            int i = get_global_id(0);
            const int k = get_global_id(1);
            __global const ${Xtype} *x = Xdata + Xstarts[k];
            __global ${Ytype} *y = Ydata + Ystarts[k];
            __global const ${Atype} *a = Adata + Astarts[k];
            __global const ${Btype} *b = Bdata + Bstarts[k];

            const int n = shape0s[k];
            const int na = Ashape0s[k];
            const int nb = Bshape0s[k];
            if (na == 0 && nb == 1) {
                for (; i < n; i += get_global_size(0))
                    y[i] = b[0] * x[i];
            } else if (na == 1 && nb == 1) {
                for (; i < n; i += get_global_size(0)) {
                    y[i] *= -a[0];
                    y[i] += b[0] * x[i];
                }
            } else {  // general filtering
                __global ${Xtype} *xbuf = Xbufdata + Xbufstarts[k];
                __global ${Ytype} *ybuf = Ybufdata + Ybufstarts[k];
                const int ix = Xbufpos[k];
                const int iy = Ybufpos[k];
                const int ix1 = (ix > 0) ? ix - 1 : nb - 1;
                const int iy1 = (iy > 0) ? iy - 1 : na - 1;

                ${Ytype} yi;
                int j, jj;
                for (; i < n; i += get_global_size(0)) {
                    yi = b[0] * x[i];
                    if (nb > 1) {
                        xbuf[ix*n + i] = x[i];  // copy input to buffer
                        for (j = 1; j < nb; j++) {
                            jj = (ix + j) % nb;
                            yi += b[j] * xbuf[jj*n + i];
                        }
                    }

                    if (na > 0) {
                        yi -= a[0] * y[i];
                        if (na > 1) {
                            for (j = 1; j < na; j++) {
                                jj = (iy + j) % na;
                                yi -= a[j] * ybuf[jj*n + i];
                            }
                            ybuf[iy1*n + i] = yi;  // copy output to buffer
                        }
                    }

                    y[i] = yi;
                }

                Xbufpos[k] = ix1;
                Ybufpos[k] = iy1;
            }
        }
        """

    textconf = dict(
        Xtype=X.ctype, Ytype=Y.ctype,
        Atype=A.ctype, Btype=B.ctype
    )
    text = as_ascii(Template(text, output_encoding='ascii').render(**textconf))

    full_args = (
        X.cl_shape0s,
        X.cl_starts,
        X.cl_buf,
        Y.cl_starts,
        Y.cl_buf,
        A.cl_shape0s,
        A.cl_starts,
        A.cl_buf,
        B.cl_shape0s,
        B.cl_starts,
        B.cl_buf,
        Xbuf.cl_starts,
        Xbuf.cl_buf,
        Ybuf.cl_starts,
        Ybuf.cl_buf,
        Xbufpos,
        Ybufpos,
    )
    _fn = cl.Program(queue.context, text).build().linear_synapse
    _fn.set_args(*[arr.data for arr in full_args])

    max_len = min(max(X.shape0s), queue.device.max_work_group_size)
    gsize = (max_len, N)
    lsize = (max_len, 1)
    rval = Plan(
        queue, _fn, gsize, lsize=lsize, name="cl_linear_synapse", tag=tag)
    rval.full_args = full_args     # prevent garbage-collection
    rval.bw_per_call = (
        X.nbytes + Y.nbytes + A.nbytes + B.nbytes + Xbuf.nbytes + Ybuf.nbytes)
    rval.description = (
        "groups: %d; items: %d; items/group: %0.1f [%d, %d]" %
        (len(Y), Y.sizes.sum(), Y.sizes.mean(), Y.sizes.min(), Y.sizes.max()))
    return rval
示例#13
0
def block_impl(p, items):

    if p.clra_alpha is not None:
        raise NotImplementedError()
    if p.clra_gamma is not None:
        raise NotImplementedError()
    if p.clra_beta is not None:
        raise NotImplementedError()
    if p.cl_alpha is not None:
        raise NotImplementedError()
    if p.cl_beta is not None:
        raise NotImplementedError()
    if p.cl_gamma is not None:
        raise NotImplementedError()
    if not all(s == 1 for s in p.A.stride1s):
        raise NotImplementedError()

    if p.A_js is None:
        # -- easy probably, but not done
        raise NotImplementedError()

    # --- blocking
    # We want to group the dot products into blocks, so that each workgroup
    # is computing a (block_y, block_x) region of a dot product. To do this,
    # we create a temporary output buffer, compute each block to a separate
    # region of this buffer, then reduce across the buffer in a separate kernel

    # block_y = 8
    block_y = 32
    # block_x = 32
    block_x = 128

    shape0s = []
    shape1s = []
    Astride0s = []
    Astride1s = []
    Astarts = []
    Xstride0s = []
    Xstarts = []
    Ybufstarts = []
    Ybufstart = 0

    Yshape0s_reduce = []
    Yinstride0s_reduce = []
    Yinstarts_reduce = []
    Ystride0s_reduce = []
    Ystarts_reduce = []
    Ybufinds_reduce = []
    bw_reduce = 0

    for n in items:
        assert p.Y_in.shape0s[n] == p.Y.shape0s[n]
        shape0n = p.Y.shape0s[n]

        for i in range(0, shape0n, block_y):
            shape0i = min(shape0n - i, block_y)

            Ybufind_reduce = []

            # loop over dot products outputting to same Y
            assert len(p.A_js[n]) == len(p.X_js[n])
            for aj, xj in zip(p.A_js[n], p.X_js[n]):
                assert aj.size == 1 and xj.size == 1
                aj, xj = aj[0], xj[0]  # to ignore numpy DeprecationWarning

                assert p.A.shape0s[aj] == shape0n
                assert p.A.shape1s[aj] == p.X.shape0s[xj]
                assert p.X.shape1s[xj] == 1
                shape1n = p.A.shape1s[aj]

                for j in range(0, shape1n, block_x):
                    shape0s.append(shape0i)
                    shape1s.append(min(shape1n - j, block_x))
                    Astride0s.append(p.A.stride0s[aj])
                    Astride1s.append(p.A.stride1s[aj])
                    Astarts.append(p.A.starts[aj] +
                                   i*p.A.stride0s[aj] + j*p.A.stride1s[aj])
                    Xstride0s.append(p.X.stride0s[xj])
                    Xstarts.append(p.X.starts[xj] + j*p.X.stride0s[xj])

                    Ybufstarts.append(Ybufstart)
                    Ybufind_reduce.append(Ybufstart)
                    # Ybufstart += shape0s[-1]
                    Ybufstart += block_y  # keep good offset

            # --- Y-blocking for reduce
            Yshape0s_reduce.append(shape0i)
            Yinstride0s_reduce.append(p.Y_in.stride0s[n])
            Yinstarts_reduce.append(p.Y_in.starts[n] + i*p.Y_in.stride0s[n])
            Ystride0s_reduce.append(p.Y.stride0s[n])
            Ystarts_reduce.append(p.Y.starts[n] + i*p.Y.stride0s[n])
            Ybufinds_reduce.append(Ybufind_reduce)
            bw_reduce += shape0i*(len(Ybufind_reduce) + 1) * p.Y.dtype.itemsize

    # --- create structure
    gstructure = np.column_stack([shape0s, shape1s, Astride0s, Astride1s,
                                  Astarts, Xstride0s, Xstarts, Ybufstarts])
    cl_gstructure = to_device(p.queue, gstructure.astype(np.int32))

    # --- create Y buffer
    clYbuf = to_device(p.queue, np.zeros(Ybufstart, dtype=p.Y.dtype))

    lsize0 = 4
    # lsize0 = 8
    lsize0_log2 = int(np.log2(lsize0))
    assert 2**lsize0_log2 == lsize0

    lsize = (lsize0, block_y, 1)
    gsize = (lsize[0], lsize[1], gstructure.shape[0])
    assert np.prod(lsize) >= block_x

    textconf = dict(
        A=p.A,
        X=p.X,
        Ybuf=clYbuf,
        n_structure_vars=gstructure.shape[1],
        shape0='lstructure[0]',
        shape1='lstructure[1]',
        Astride0='lstructure[2]',
        Astride1='lstructure[3]',
        Astart='lstructure[4]',
        Xstride0='lstructure[5]',
        Xstart='lstructure[6]',
        Ybufstart='lstructure[7]',
        block_y=block_y,
        block_x=block_x,
        lsize0=lsize0,
        lsize0_log2=lsize0_log2,
        float_alpha=p.float_alpha,
    )

    full_args = (
        cl_gstructure,
        p.A.cl_buf,
        p.X.cl_buf,
        clYbuf,
    )

    text = """
    __kernel void fn(
        __global const int *gstructure,
        __global const ${A.ctype} *Adata,
        __global const ${X.ctype} *Xdata,
        __global ${Ybuf.ctype} *Ybufdata
        )
    {
        const int j = get_global_id(0);
        const int i = get_global_id(1);
        const int n = get_global_id(2);

        // load structure
        __local int lstructure[${n_structure_vars}];
        const int local_idx =
            get_local_id(0) + get_local_id(1)*get_local_size(0);
        if (local_idx < ${n_structure_vars})
            lstructure[local_idx] = gstructure[
                n * ${n_structure_vars} + local_idx];
        barrier(CLK_LOCAL_MEM_FENCE);

        __global const ${X.ctype} *x = Xdata + ${Xstart};
        __global ${Ybuf.ctype} *ybuf = Ybufdata + ${Ybufstart};

        // load x into local memory
        __local ${X.ctype} xlocal[${block_x}];
        if (local_idx < ${shape1})
            xlocal[local_idx] = x[local_idx*${Xstride0}];
        barrier(CLK_LOCAL_MEM_FENCE);

        __local ${Ybuf.ctype} sums[${block_y}][${lsize0}];
        sums[i][j] = 0;

        if (i < ${shape0}) {
            __global const ${A.ctype} *Ai = Adata + ${Astart} + i*${Astride0};
            for(int jj = j; jj < ${shape1}; jj += get_global_size(0)) {
                sums[i][j] += Ai[jj*${Astride1}] * xlocal[jj];
            }
        }
        barrier(CLK_LOCAL_MEM_FENCE);

    % for k in range(lsize0_log2 - 1, 0, -1):
        if (j < ${2**k})
            sums[i][j] += sums[i][${2**k} + j];
        barrier(CLK_LOCAL_MEM_FENCE);
    % endfor

        if (i < ${shape0} && j == 0)
            ybuf[i] = ${float_alpha} * (sums[i][0] + sums[i][1]);
    }
    """

    text = as_ascii(Template(text, output_encoding='ascii').render(**textconf))
    kernel = cl.Program(p.queue.context, text).build().fn
    kernel.set_args(*[arr.data for arr in full_args])

    plan = Plan(p.queue, kernel, gsize, lsize,
                name='clra_gemv.block_impl',
                tag=p.tag,
                bw_per_call=bw_from_geometry(p.geometry, items),
                flops_per_call=flops_from_geometry(p.geometry, items),
                )
    plan.full_args = full_args  # prevent GC the args
    plan.description = p.geometry_summary(items)
    plan.Ybuf = clYbuf

    # --- Reduce kernel
    align = False

    Nreduce = len(Yshape0s_reduce)
    clYshape0s_reduce = to_device(
        p.queue, np.array(Yshape0s_reduce, dtype=np.int32))
    clYinstride0s_reduce = to_device(
        p.queue, np.array(Yinstride0s_reduce, dtype=np.int32))
    clYinstarts_reduce = to_device(
        p.queue, np.array(Yinstarts_reduce, dtype=np.int32))
    clYstride0s_reduce = to_device(
        p.queue, np.array(Ystride0s_reduce, dtype=np.int32))
    clYstarts_reduce = to_device(
        p.queue, np.array(Ystarts_reduce, dtype=np.int32))
    clYbufinds_reduce = CLRaggedArray.from_arrays(
        p.queue, Ybufinds_reduce, dtype=np.int32, align=align)
    assert len(clYbufinds_reduce) == Nreduce
    assert (clYbufinds_reduce.shape1s == 1).all()

    textconf_reduce = dict(
        Ybuf=clYbuf,
        Yin=p.Y_in,
        Y=p.Y,
        float_beta=p.float_beta,
        float_gamma=p.float_gamma,
    )

    full_args_reduce = (
        clYshape0s_reduce,
        clYbufinds_reduce.cl_shape0s,
        clYbufinds_reduce.cl_starts,
        clYbufinds_reduce.cl_buf,
        clYbuf,
        clYinstride0s_reduce,
        clYinstarts_reduce,
        p.Y_in.cl_buf,
        clYstride0s_reduce,
        clYstarts_reduce,
        p.Y.cl_buf,
    )

    lsize_reduce = None
    gsize_reduce = (block_y, Nreduce)

    text_reduce = """
    __kernel void reduce(
        __global const int *shape0s,
        __global const int *Ishape0s,
        __global const int *Istarts,
        __global const int *Idata,
        __global ${Ybuf.ctype} *Ybufdata,
        __global const int *Yinstride0s,
        __global const int *Yinstarts,
        __global ${Yin.ctype} *Yindata,
        __global const int *Ystride0s,
        __global const int *Ystarts,
        __global ${Y.ctype} *Ydata
    )
    {
        const int i = get_global_id(0);
        const int n = get_global_id(1);
        if (i >= shape0s[n])
            return;

        const int Ishape0 = Ishape0s[n];

        __global const int *Ybufstart = Idata + Istarts[n];
        __global ${Yin.ctype} *yin = Yindata + Yinstarts[n];
        __global ${Y.ctype} *y = Ydata + Ystarts[n];

        ${Y.ctype} sum = ${float_beta} * yin[i*Yinstride0s[n]];
        for (int j = 0; j < Ishape0; j++) {
            sum += Ybufdata[Ybufstart[j] + i];
        }

        y[i*Ystride0s[n]] = sum + ${float_gamma};
    }
    """

    text_reduce = as_ascii(Template(
        text_reduce, output_encoding='ascii').render(**textconf_reduce))
    kernel_reduce = cl.Program(p.queue.context, text_reduce).build().reduce
    kernel_reduce.set_args(*[arr.data for arr in full_args_reduce])

    plan_reduce = Plan(p.queue, kernel_reduce, gsize_reduce, lsize_reduce,
                       name='clra_gemv.block_impl_reduce', tag=p.tag)
    plan_reduce.full_args = full_args_reduce  # prevent GC of the args
    plan_reduce.bw_per_call = bw_reduce
    # plan_reduce.description = p.geometry_summary(items)

    return [plan, plan_reduce]
示例#14
0
def ref_impl(p, items):
    """
    Return an OpenCL function to calculate elements `items` of
    gemv operation `p`.

    In this reference implementation, we create a work item
    per output number, or more specifically, a work grid
    of (max_y_len, len(items)).  Each work item loops over the
    dot products and the elements within each dot product to
    compute the output value Y[global_id(1)][global_id(0)].

    """

    if p.clra_alpha is not None:
        raise NotImplementedError()
    if p.clra_gamma is not None:
        raise NotImplementedError()
    cl_items = to_device(p.queue,
                         np.asarray(items, dtype='int32'))
    if 0:
        if len(items) < 10:
            print('Falling back on reference implementation')
            p.print_geometry_summary(items, full=True)
        else:
            print('Falling back on reference implementation')
            p.print_geometry_summary(items)

    assert all(s == 1 for s in p.A.stride1s)
    assert all(s == 1 for s in p.X.stride1s)
    assert all(s == 1 for s in p.Y.stride0s)
    assert all(s == 1 for s in p.Y.stride1s)
    assert all(s == 1 for s in p.Y_in.stride0s)
    assert all(s == 1 for s in p.Y_in.stride1s)

    text = """
        __kernel void gemv_ref(
            __global int *items,
    % if cl_alpha is not None:
            __global ${cl_alpha.ctype} * alphas,
    % endif
    % if (A_js is not None):
            __global int *A_starts,
            __global int *A_shape1s,
            __global int *A_stride0s,
            __global ${A.cl_buf.ctype} *A_data,
            __global int *A_js_starts,
            __global int *A_js_shape0s,
            __global int *A_js_data,
            __global int *X_starts,
            __global int *X_stride0s,
            __global ${X.cl_buf.ctype} *X_data,
            __global int *X_js_starts,
            __global int *X_js_data,
    % endif
    % if cl_beta is not None:
            __global ${cl_beta.ctype} * betas,
    % endif
    % if clra_beta is not None:
            __global int *beta_starts,
            __global int *beta_data,
    % endif
    % if cl_gamma is not None:
            __global ${cl_gamma.ctype} * gammas,
    % endif
            __global int *Y_in_starts,
            __global ${Y_in.cl_buf.ctype} *Y_in_data,
            __global int *Y_starts,
            __global int *Y_shape0s,
            __global ${Y.cl_buf.ctype} *Y_data)
        {
            const int mm = get_global_id(0);
            const int bb = items[get_global_id(1)];
            const int M = Y_shape0s[bb];
            if (mm < M)
            {
                const int y_offset = Y_starts[bb];
                const int y_in_offset = Y_in_starts[bb];

    % if float_beta is not None:
                const ${Y.cl_buf.ctype} beta = ${float_beta};
    % elif cl_beta is not None:
                const ${cl_beta.ctype} beta = betas[bb];
    % elif clra_beta is not None:
                const int beta_offset = beta_starts[bb];
                const ${clra_beta.cl_buf.ctype} beta
                    = beta_data[beta_offset + mm];
    % endif

    % if float_gamma is not None:
                const ${Y.cl_buf.ctype} gamma = ${float_gamma};
    % elif cl_gamma is not None:
                const ${cl_gamma.ctype} gamma = gammas[bb];
    % endif

                Y_data[y_offset + mm] =
                    gamma + beta * Y_in_data[y_in_offset + mm];

    % if A_js is not None:
                const int n_dot_products = A_js_shape0s[bb];
                X_js_data += X_js_starts[bb];
                A_js_data += A_js_starts[bb];

                ${Y.cl_buf.ctype} y_sum = 0;
                for (int ii = 0; ii < n_dot_products; ++ii)
                {
                    const int x_ji = X_js_data[ii];
                    const int a_ji = A_js_data[ii];
                    const int N_i = A_shape1s[a_ji];
                    const int x_offset = X_starts[x_ji];
                    const int a_offset = A_starts[a_ji];
                    const int AsM = A_stride0s[a_ji];
                    const int XsM = X_stride0s[x_ji];

                    for (int nn = 0; nn < N_i; ++nn)
                    {
                        y_sum += X_data[x_offset + nn * XsM]
                                 * A_data[a_offset + mm * AsM + nn];
                    }
                }
        % if float_alpha is not None:
                Y_data[y_offset + mm] += ${float_alpha} * y_sum;
        % elif cl_alpha is not None:
                Y_data[y_offset + mm] += alphas[bb] * y_sum;
        % endif
    % endif
            }

        }
    """

    text = as_ascii(
        Template(text, output_encoding='ascii').render(**p.__dict__))

    gsize = (
        max(p.geometry[ii]['y_len'] for ii in items),
        len(items))
    lsize = None
    fn = cl.Program(p.queue.context, text).build().gemv_ref
    full_args = [cl_items]
    if p.cl_alpha is not None:
        full_args += [p.cl_alpha]
    if p.A_js is not None:
        full_args += [
            p.A.cl_starts,
            p.A.cl_shape1s,
            p.A.cl_stride0s,
            p.A.cl_buf,
            p.A_js.cl_starts,
            p.A_js.cl_shape0s,
            p.A_js.cl_buf,
            p.X.cl_starts,
            p.X.cl_stride0s,
            p.X.cl_buf,
            p.X_js.cl_starts,
            p.X_js.cl_buf,
        ]
    if p.cl_beta is not None:
        full_args += [p.cl_beta]
    elif p.clra_beta is not None:
        full_args += [p.clra_beta.cl_starts, p.clra_beta.cl_buf]

    if p.cl_gamma is not None:
        full_args += [p.cl_gamma]
    elif p.clra_gamma is not None:
        full_args += [p.clra_gamma.cl_starts, p.clra_gamma.cl_buf]

    full_args += [
        p.Y_in.cl_starts,
        p.Y_in.cl_buf,
        p.Y.cl_starts,
        p.Y.cl_shape0s,
        p.Y.cl_buf]

    # print([str(arr.dtype)[0] for arr in full_args])
    fn.set_args(*[arr.data for arr in full_args])
    rval = Plan(p.queue, fn, gsize, lsize, name="clra_gemv.ref_impl",
                tag=p.tag,
                bw_per_call=bw_from_geometry(p.geometry, items),
                flops_per_call=flops_from_geometry(p.geometry, items))
    rval.full_args = full_args  # prevent GC the args
    return rval
示例#15
0
def plan_probes(queue, periods, X, Y, tag=None):
    """
    Parameters
    ----------
    P : raggedarray of ints
        The period (in time-steps) of each probe
    """
    assert len(X) == len(Y)
    assert len(X) == len(periods)
    assert X.ctype == Y.ctype
    N = len(X)

    # N.B.  X[i].shape = (M, N)
    #       Y[i].shape = (buf_len, M * N)
    for arr in [X, Y]:
        assert (arr.stride1s == 1).all()
    assert (X.shape0s * X.shape1s == Y.shape1s).all()
    assert (X.stride0s == X.shape1s).all()
    assert (X.stride1s == 1).all()
    assert (Y.stride0s == Y.shape1s).all()
    assert (Y.stride1s == 1).all()

    periods = np.asarray(periods, dtype='float32')
    cl_periods = to_device(queue, periods)
    cl_countdowns = to_device(queue, periods - 1)
    cl_bufpositions = to_device(queue, np.zeros(N, dtype='int32'))

    text = """
        ////////// MAIN FUNCTION //////////
        __kernel void probes(
            __global ${Ctype} *countdowns,
            __global int *bufpositions,
            __global const ${Ptype} *periods,
            __global const int *Xstarts,
            __global const int *Xshape0s,
            __global const int *Xshape1s,
            __global const ${Xtype} *Xdata,
            __global const int *Ystarts,
            __global ${Ytype} *Ydata
        )
        {
            const int n = get_global_id(1);
            const ${Ctype} countdown = countdowns[n];

            if (countdown <= 0) {
                const int n_dims = Xshape0s[n] * Xshape1s[n];
                __global const ${Xtype} *x = Xdata + Xstarts[n];
                const int bufpos = bufpositions[n];

                __global ${Ytype} *y = Ydata + Ystarts[n] + bufpos * n_dims;

                for (int ii = get_global_id(0);
                         ii < n_dims;
                         ii += get_global_size(0))
                {
                    y[ii] = x[ii];
                }
                // This should *not* cause deadlock because
                // all local threads guaranteed to be
                // in this branch together.
                barrier(CLK_LOCAL_MEM_FENCE);
                if (get_global_id(0) == 0)
                {
                    countdowns[n] = countdown + periods[n] - 1;
                    bufpositions[n] = bufpos + 1;
                }
            }
            else
            {
                barrier(CLK_LOCAL_MEM_FENCE);
                if (get_global_id(0) == 0)
                {
                    countdowns[n] = countdown - 1;
                }
            }
        }
        """

    textconf = dict(N=N,
                    Xtype=X.ctype,
                    Ytype=Y.ctype,
                    Ctype=cl_countdowns.ctype,
                    Ptype=cl_periods.ctype)
    text = as_ascii(Template(text, output_encoding='ascii').render(**textconf))

    full_args = (
        cl_countdowns,
        cl_bufpositions,
        cl_periods,
        X.cl_starts,
        X.cl_shape0s,
        X.cl_shape1s,
        X.cl_buf,
        Y.cl_starts,
        Y.cl_buf,
    )
    _fn = cl.Program(queue.context, text).build().probes
    _fn.set_args(*[arr.data for arr in full_args])

    max_len = min(queue.device.max_work_group_size, max(X.shape0s))
    gsize = (max_len, N,)
    lsize = (max_len, 1)
    rval = Plan(queue, _fn, gsize, lsize=lsize, name="cl_probes", tag=tag)
    rval.full_args = full_args     # prevent garbage-collection
    rval.cl_bufpositions = cl_bufpositions
    rval.Y = Y
    rval.bw_per_call = (X.nbytes + Y.nbytes + cl_periods.nbytes +
                        cl_countdowns.nbytes + cl_bufpositions.nbytes)
    rval.description = (
        "groups: %d; items: %d; items/group: %0.1f [%d, %d]" %
        (len(Y), Y.sizes.sum(), Y.sizes.mean(), Y.sizes.min(), Y.sizes.max()))
    return rval
示例#16
0
 def Array(self, val, dtype=np.float32):
     return to_device(self.queue, np.asarray(val, dtype=dtype))
示例#17
0
 def Array(self, val, dtype=np.float32):
     return to_device(self.queue, np.asarray(val, dtype=dtype))