def init_dev(dev): global pygpu_activated context = pygpu.init(dev) pygpu.set_default_context(context) pygpu_activated = True if config.print_active_device: print >> sys.stderr, "Using device %s: %s" % (dev, context.devname)
def init_dev(dev): global pygpu_activated context = pygpu.init(dev) pygpu.set_default_context(context) pygpu_activated = True if config.print_active_device: print >> sys.stderr, "Using device %s: %s" % (dev, context.devname) # remember the active device init_dev.device = dev
def init_dev(dev): if pygpu.gpuarray.api_version() != (-10000, 0): raise RuntimeError("Wrong API version for gpuarray:", pygpu.gpuarray.api_version(), "Make sure Theano and libgpuarray/pygpu " "are in sync.") global pygpu_activated context = pygpu.init(dev) pygpu.set_default_context(context) pygpu_activated = True if config.print_active_device: print("Using device %s: %s" % (dev, context.devname), file=sys.stderr) # remember the active device init_dev.device = dev
def init_dev(dev): if pygpu.gpuarray.api_version() != (-10000, 0): raise RuntimeError( "Wrong API version for gpuarray:", pygpu.gpuarray.api_version(), "Make sure Theano and libgpuarray/pygpu " "are in sync.") global pygpu_activated context = pygpu.init(dev) pygpu.set_default_context(context) pygpu_activated = True if config.print_active_device: print("Using device %s: %s" % (dev, context.devname), file=sys.stderr) # remember the active device init_dev.device = dev
oper = 'out = ({odt}) {}(a)'.format(c_func, odt=c_dtype_out) preamble_tpl = mako.template.Template(meta['oper_preamble_tpl']) preamble = preamble_tpl.render(idt=c_dtype_in, odt=c_dtype_out) elif meta['oper_fmt'] is not None: # Case 3: custom implementation with `oper` template oper = meta['oper_fmt'].format(idt=c_dtype_in, odt=c_dtype_out) preamble_tpl = mako.template.Template(meta['oper_preamble_tpl']) preamble = preamble_tpl.render(idt=c_dtype_in, odt=c_dtype_out) else: # Case 4: not implemented raise NotImplementedError('ufunc {!r} not implemented'.format(name)) # --- Generate and run GpuElemwise kernel --- # a_arg = as_argument(a, 'a', read=True) args = [arg('out', out.dtype, write=True), a_arg] ker = GpuElemwise(context, oper, args, preamble=preamble) ker(out, a) return out # %% Test import pygpu ctx = pygpu.init('cuda') pygpu.set_default_context(ctx)
def init_dev(dev): global pygpu_activated context = pygpu.init(dev) pygpu.set_default_context(context) pygpu_activated = True
""" pygpu as backend target for npbackend. """ import numpy as np from .. import bhc from .._util import dtype_name import time import os import pygpu from pygpu.array import gpuarray as elemary from . import target_numpy cxt_string = os.environ.get("GPUARRAY_DEVICE", "opencl0:0") cxt = pygpu.init(cxt_string) # cxt = pygpu.init("cuda0") pygpu.set_default_context(cxt) class Base(target_numpy.Base): """base array handle""" def __init__(self, size, dtype): self.clary = pygpu.empty((size,), dtype=dtype, cls=elemary) super(Base, self).__init__(size, dtype) class View(target_numpy.View): """array view handle""" def __init__(self, ndim, start, shape, strides, base): super(View, self).__init__(ndim, start, shape, strides, base)
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>. # # ----------------------------------------------------------------------- import astra import numpy as np import pygpu import pylab # Initialize pygpu ctx = pygpu.init('cuda') pygpu.set_default_context(ctx) vol_geom = astra.create_vol_geom(128, 128, 128) angles = np.linspace(0, 2 * np.pi, 180, False) proj_geom = astra.create_proj_geom('cone', 1.0, 1.0, 128, 192, angles, 1000, 0) # Create a simple hollow cube phantom, as a pygpu gpuarray vol_gpuarr = pygpu.gpuarray.zeros(astra.functions.geom_size(vol_geom), dtype='float32') vol_gpuarr[17:113, 17:113, 17:113] = 1 vol_gpuarr[33:97, 33:97, 33:97] = 0 # Create a pygpu gpuarray for the output projection data proj_gpuarr = pygpu.gpuarray.zeros(astra.functions.geom_size(proj_geom), dtype='float32') # Create the astra GPULink objects and create astra data3d objects from them z, y, x = proj_gpuarr.shape
""" pygpu as backend target for npbackend. """ import numpy as np from .. import bhc from .._util import dtype_name import time import os import pygpu from pygpu.array import gpuarray as elemary from . import target_numpy cxt_string = os.environ.get("GPUARRAY_DEVICE", "opencl0:0") cxt = pygpu.init(cxt_string) #cxt = pygpu.init("cuda0") pygpu.set_default_context(cxt) class Base(target_numpy.Base): """base array handle""" def __init__(self, size, dtype): self.clary = pygpu.empty((size,), dtype=dtype, cls=elemary) super(Base, self).__init__(size, dtype) class View(target_numpy.View): """array view handle""" def __init__(self, ndim, start, shape, strides, base): super(View, self).__init__(ndim, start, shape, strides, base) self.clary = pygpu.gpuarray.from_gpudata(base.clary.gpudata, offset=self.start,\ dtype=base.dtype, shape=shape, strides=self.strides, writable=True, base=base.clary, cls=elemary) def views2clary(views):