Exemple #1
0
def main_ufunc():

    ctx = cl.Context(device_type=cl.Device.GPU)

    size = 10
    a = ca.arange(ctx, size, ctype=c_float)
    b = ca.arange(ctx, size, ctype=c_float).reshape([size, 1])

    o1 = add(a, b)

    with o1.map() as view:
        print view

    with a.map() as view:
        print np.sum(view)

    result = add.reduce(a)

    result.queue.finish()

    with a.map() as view:
        print view
        print view.sum()

    print result.item()
Exemple #2
0
def main_ufunc():
    
    ctx = cl.Context(device_type=cl.Device.GPU)
    
    size = 10
    a = ca.arange(ctx, size, ctype=c_float)
    b = ca.arange(ctx, size, ctype=c_float).reshape([size, 1])

    o1 = add(a, b)
    
    with o1.map() as view:
        print view

    with a.map() as view:
        print np.sum(view)

    result = add.reduce(a)
    
    result.queue.finish()
    
    with a.map() as view:
        print view
        print view.sum()
        
    print result.item()
Exemple #3
0
def main_reduce():
    ctx = cl.Context(device_type=cl.Device.GPU)
    
    sum = add.reduce
    
#    for size in range(250, 258):
    size = 1027
        
    a = ca.arange(ctx, size, ctype=cl.cl_int)
    
    result = sum(a)
    
    with a.map() as view:
        print size, view.sum(), result.item()
Exemple #4
0
def main_reduce():
    ctx = cl.Context(device_type=cl.Device.GPU)

    sum = add.reduce

    #    for size in range(250, 258):
    size = 1027

    a = ca.arange(ctx, size, ctype=cl.cl_int)

    result = sum(a)

    with a.map() as view:
        print size, view.sum(), result.item()
Exemple #5
0
def main():
    ctx = cl.Context(device_type=cl.Device.GPU)
    queue = cl.Queue(ctx)
    
    npa = np.arange(1.0 * 12.0, dtype=c_float)
    a = ca.arange(ctx, 12, ctype=c_float)
    
    out = ca.empty_like(a[:])
    output = cl.broadcast(out, a[:].shape)
    
    ca.blitz(queue, lambda: a[:] + a[:] + 1, out=output)
    
    print npa[1:] + npa[:-1]
    
    with out.map() as view:
        print view
Exemple #6
0
def main():
    ctx = cl.Context(device_type=cl.Device.GPU)
    queue = cl.Queue(ctx)

    npa = np.arange(1.0 * 12.0, dtype=c_float)
    a = ca.arange(ctx, 12, ctype=c_float)

    out = ca.empty_like(a[:])
    output = cl.broadcast(out, a[:].shape)

    ca.blitz(queue, lambda: a[:] + a[:] + 1, out=output)

    print npa[1:] + npa[:-1]

    with out.map() as view:
        print view
Exemple #7
0
@author: sean
'''


import opencl as cl
import clyther as cly
import clyther.array as ca
from ctypes import c_float
import numpy as np

#Always have to create a context.
ctx = cl.Context()

#Create an array
a = ca.arange(ctx, 12)

# Multiply is not defined
try:
    c = a * 2
except TypeError as err:
    print 'Expected:', err
    
    
@ca.binary_ufunc
def multuply(x, y):
    return x * y

c = multuply(a, 2)

with c.map() as arr:
Exemple #8
0
Created on Dec 15, 2011

@author: sean
'''

import opencl as cl
import clyther as cly
import clyther.array as ca
from ctypes import c_float
import numpy as np

#Always have to create a context.
ctx = cl.Context()

#Create an array
a = ca.arange(ctx, 12)

# Multiply is not defined
try:
    c = a * 2
except TypeError as err:
    print 'Expected:', err


@ca.binary_ufunc
def multuply(x, y):
    return x * y


c = multuply(a, 2)