Esempio n. 1
0
def setCudaDevice( devN = None, usingAnimation=False  ):
  global CUDA_initialized
  if CUDA_initialized: return
  import pycuda.autoinit
  nDevices = cuda.Device.count()
  print "\nAvailable Devices:"
  for i in range(nDevices):
    dev = cuda.Device( i )
    print "  Device {0}: {1}".format( i, dev.name() )
  devNumber = 0
  if nDevices > 1:
    if devN == None: 
      devNumber = int(raw_input("Select device number: "))  
    else:
      devNumber = devN 
  cuda.Context.pop()  #Disable previus CUDA context
  dev = cuda.Device( devNumber )
  if usingAnimation:
    import pycuda.gl as cuda_gl
    cuda_gl.make_context(dev)
  else:
    dev.make_context()
  print "Using device {0}: {1}".format( devNumber, dev.name() ) 
  CUDA_initialized = True
  return dev
Esempio n. 2
0
def setCudaDevice(devN=None, usingAnimation=False):
    global CUDA_initialized
    if CUDA_initialized: return
    import pycuda.autoinit
    nDevices = cuda.Device.count()
    print("\nAvailable Devices:")
    for i in range(nDevices):
        dev = cuda.Device(i)
        print("  Device {0}: {1}".format(i, dev.name()))
    devNumber = 0
    if nDevices > 1:
        if devN == None:
            devNumber = int(raw_input("Select device number: "))
        else:
            devNumber = devN
    cuda.Context.pop()  #Disable previus CUDA context
    dev = cuda.Device(devNumber)
    if usingAnimation:
        import pycuda.gl as cuda_gl
        cuda_gl.make_context(dev)
    else:
        dev.make_context()
    print("Using device {0}: {1}".format(devNumber, dev.name()))
    CUDA_initialized = True
    return dev
Esempio n. 3
0
 def cuda_gl_init(self,):
     cuda.init()
     if bool(glutMainLoopEvent):
         # print "----SELECT COMPUTING DEVICE (NVIDIA GPU)----"
         # num_gpu = cuda.Device.count()
         # for i in range(0,num_gpu):
         #     tmp_dev = cuda.Device(i)
         #     print "device_id = ",i,tmp_dev.name()
         # id_gpu = raw_input("Select GPU: ")
         # id_gpu = int(id_gpu)
         curr_gpu = cuda.Device(self.id_selected_gpu)
         print "you have selected ", curr_gpu.name()
         self.ctx_gl = cudagl.make_context(curr_gpu, flags=cudagl.graphics_map_flags.NONE)
     else:
         import pycuda.gl.autoinit
         curr_gpu = cudagl.autoinit.device
         self.ctx_gl = cudagl.make_context(curr_gpu, flags=cudagl.graphics_map_flags.NONE)
Esempio n. 4
0
 def make_context(self):
     start = monotonic()
     cf = driver.ctx_flags
     if self.opengl:
         from pycuda import gl
         self.context = gl.make_context(self.device)
     else:
         self.context = self.device.make_context(flags=cf.SCHED_YIELD
                                                 | cf.MAP_HOST)
     end = monotonic()
     self.context.pop()
     log("cuda context allocation took %ims", 1000 * (end - start))
Esempio n. 5
0
from __future__ import absolute_import
import pycuda.driver as cuda
import pycuda.gl as cudagl

cuda.init()
assert cuda.Device.count() >= 1

from pycuda.tools import make_default_context
context = make_default_context(lambda dev: cudagl.make_context(dev))
device = context.get_device()

import atexit
atexit.register(context.pop)
Esempio n. 6
0
import pycuda.driver as cuda
import pycuda.gl as cudagl
import pycuda.tools

cuda.init()
assert cuda.Device.count() >= 1

device = pycuda.tools.get_default_device()
context = cudagl.make_context(device)

import atexit
atexit.register(context.pop)