コード例 #1
0
ファイル: __init__.py プロジェクト: tsingjinyun/Theano
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True,
        test_driver=True):
    """
    Error and warning about CUDA should be displayed only when this
    function is called.  We need to be able to load this module only
    to check if it is available!

    :param device: string "cpu", "gpu", "gpuN" (N is the device number to use)
    :param force: Will always raise an exception if we can't use the gpu.
    :param default_to_move_computation_to_gpu: If gpu init succeeded, enable by
                                               default optimizations to move
                                               computations to the gpu
    :param move_shared_float32_to_gpu: If gpu init succeeded, put new shared
                                       variables in float32 on the gpu.
    :param enable_cuda: If the gpu is correctly enabled,
                        set the variable cuda_enabled to True.
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError("You forced the use of gpu device '%s', but"
                                   " nvcc was not found. Set it in your PATH "
                                   "environment variable or set the Theano "
                                   "flags 'cuda.root' to its directory"
                                   "" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" %
                                   (device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
                      ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = (" (error: %s)" %
                                  cuda_initialization_error_message)
        except NameError:
            # cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                        device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device < 0:
            return

        # Has PyCUDA already initialized the GPU context
        pycuda_init_dev = False
        if config.pycuda.init:
            import theano.misc.pycuda_init
            pycuda_init_dev = theano.misc.pycuda_init.pycuda_available

        try:
            if (device != 'gpu') and not pycuda_init_dev:
                assert isinstance(device, int)
                gpu_init(device)
                use.device_number = device
                assert active_device_number() == device
            else:
                # This mean the driver should select the GPU.  As we
                # need to get the device number now, we force the
                # selection of the GPU by the driver now and then we
                # query the active GPU. If we check the active GPU before
                # the device is initialized we will always receive 0
                # event if another device is selected later.
                cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((2, 3))
                use.device_number = active_device_number()
                # This is needed to initialize the cublas handle.
                gpu_init(use.device_number)

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver
                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
            if device_properties(use.device_number)["warpSize"] != 32:
                raise ValueError("Your GPU has a warpSize != 32. Currently"
                                 " we have code that depends on this. Email"
                                 " the Theano mailing list to tell us about"
                                 " this new GPU as we don't know any with"
                                 " this property")

            if config.print_active_device:
                print("Using gpu device %d: %s" %
                      (active_device_number(), active_device_name()),
                      file=sys.stderr)
            if device_properties(use.device_number)['regsPerBlock'] < 16384:
                # We will try to use too much register per bloc at many places
                # when there is only 8k register per multi-processor.
                _logger.warning(
                    "You are probably using an old GPU, that Theano"
                    " does not support."
                    " This means GPU code will most likely be slow AND may"
                    " crash when we try to use features"
                    " that your GPU does not support.")

        except (EnvironmentError, ValueError, RuntimeError) as e:
            _logger.error(("ERROR: Not using GPU."
                           " Initialisation of device %s failed:\n%s"),
                          str(device), e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."), )
                raise

    elif use.device_number != device and device != 'gpu':
        _logger.warning(("Ignoring call to use(%s), GPU number %i "
                         "is already in use."), str(device), use.device_number)

    if move_shared_float32_to_gpu:
        handle_shared_float32(True)

    if enable_cuda:
        cuda_enabled = True

    if default_to_move_computation_to_gpu:
        optdb.add_tags('gpu_opt', 'fast_compile', 'fast_run', 'inplace')
        optdb.add_tags('gpu_after_fusion', 'fast_run', 'inplace')

    if force:
        try:
            # in case the device if just gpu,
            # we check that the driver init it correctly.
            cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5))
        except (Exception, NameError) as e:
            # NameError when no gpu present as cuda_ndarray is not loaded.
            e.args += ("ERROR: GPU forced but failed. ", )
            raise
コード例 #2
0
ファイル: __init__.py プロジェクト: 317070/Theano
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True,
        test_driver=True):
    """
    Error and warning about CUDA should be displayed only when this
    function is called.  We need to be able to load this module only
    to check if it is available!

    :param device: string "cpu", "gpu", "gpuN" (N is the device number to use)
    :param force: Will always raise an exception if we can't use the gpu.
    :param default_to_move_computation_to_gpu: If gpu init succeeded, enable by
                                               default optimizations to move
                                               computations to the gpu
    :param move_shared_float32_to_gpu: If gpu init succeeded, put new shared
                                       variables in float32 on the gpu.
    :param enable_cuda: If the gpu is correctly enabled,
                        set the variable cuda_enabled to True.
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError("You forced the use of gpu device '%s', but"
                                   " nvcc was not found. Set it in your PATH "
                                   "environment variable or set the Theano "
                                   "flags 'cuda.root' to its directory"
                                   "" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" % (
                device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
              ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = (" (error: %s)" %
                                  cuda_initialization_error_message)
        except NameError:
# cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device < 0:
            return

        # Has PyCUDA already initialized the GPU context
        pycuda_init_dev = False
        if config.pycuda.init:
            import theano.misc.pycuda_init
            pycuda_init_dev = theano.misc.pycuda_init.pycuda_available

        try:
            if (device != 'gpu') and not pycuda_init_dev:
                assert isinstance(device, int)
                gpu_init(device)
                use.device_number = device
                assert active_device_number() == device
            else:
                # This mean the driver should select the GPU.  As we
                # need to get the device number now, we force the
                # selection of the GPU by the driver now and then we
                # query the active GPU. If we check the active GPU before
                # the device is initialized we will always receive 0
                # event if another device is selected later.
                cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((2, 3))
                use.device_number = active_device_number()
                # This is needed to initialize the cublas handle.
                gpu_init(use.device_number)

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver
                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
            if device_properties(use.device_number)["warpSize"] != 32:
                raise ValueError("Your GPU has a warpSize != 32. Currently"
                                 " we have code that depends on this. Email"
                                 " the Theano mailing list to tell us about"
                                 " this new GPU as we don't know any with"
                                 " this property")

            if config.print_active_device:
                print >> sys.stderr, "Using gpu device %d: %s" % (
                        active_device_number(), active_device_name())
            if device_properties(use.device_number)['regsPerBlock'] < 16384:
                # We will try to use too much register per bloc at many places
                # when there is only 8k register per multi-processor.
                _logger.warning(
                        "You are probably using an old GPU, that Theano"
                        " does not support."
                        " This means GPU code will most likely be slow AND may"
                        " crash when we try to use features"
                        " that your GPU does not support.")

        except (EnvironmentError, ValueError, RuntimeError), e:
            _logger.error(("ERROR: Not using GPU."
                           " Initialisation of device %s failed:\n%s"),
                          str(device), e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."),)
                raise
コード例 #3
0
ファイル: __init__.py プロジェクト: tsingjinyun/Theano
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

# is_nvcc_available called here to initialize global vars in
# nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True

# Global variable to avoid displaying the same warning multiple times.
cuda_warning_is_displayed = False

# This variable is set to True when we enable cuda.(i.e. when use() is called)
コード例 #4
0
AddConfigVar(
    'pycuda.init', """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """, BoolParam(False))

if config.cuda.root == "AUTO":
    # set nvcc_path correctly and get the version
    nvcc_compiler.set_cuda_root()

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True

# Global variable to avoid displaying the same warning multiple times.
cuda_warning_is_displayed = False

#This variable is set to True when we enable cuda.(i.e. when use() is called)
コード例 #5
0
ファイル: __init__.py プロジェクト: 317070/Theano
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
        BoolParam(False),
        in_c_key=False)

AddConfigVar('cublas.lib',
        """Name of the cuda blas library for the linker.""",
        StrParam('cublas'))

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True

# Global variable to avoid displaying the same warning multiple times.
cuda_warning_is_displayed = False

#This variable is set to True when we enable cuda.(i.e. when use() is called)
コード例 #6
0
ファイル: __init__.py プロジェクト: jey/Theano
        """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
        BoolParam(False))

if config.cuda.root == "AUTO":
    # set nvcc_path correctly and get the version
    nvcc_compiler.set_cuda_root()

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True

# Global variable to avoid displaying the same warning multiple times.
cuda_warning_is_displayed = False

#This variable is set to True when we enable cuda.(i.e. when use() is called)
コード例 #7
0
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True,
        test_driver=True):
    """
    Error and warning about CUDA should be displayed only when this
    function is called.  We need to be able to load this module only
    to check if it is available!

    :param device: string "cpu", "gpu", "gpuN" (N is the device number to use)
    :param force: Will always raise an exception if we can't use the gpu.
    :param default_to_move_computation_to_gpu: If gpu init succeeded, enable by
                                               default optimizations to move
                                               computations to the gpu
    :param move_shared_float32_to_gpu: If gpu init succeeded, put new shared
                                       variables in float32 on the gpu.
    :param enable_cuda: If the gpu is correctly enabled,
                        set the variable cuda_enabled to True.
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError("You forced the use of gpu device '%s', but"
                                   " nvcc was not found. Set it in your PATH "
                                   "environment variable or set the Theano "
                                   "flags 'cuda.root' to its directory"
                                   "" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" %
                                   (device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
                      ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = (" (error: %s)" %
                                  cuda_initialization_error_message)
        except NameError:
            # cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                        device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device < 0:
            return
        if device in [None, ""]:
            device = 0
        try:
            if device != 'gpu':
                assert isinstance(device, int)
                gpu_init(device)
                use.device_number = device
            else:
                # This mean the driver should select the GPU.  As we
                # need to get the device number now, we force the
                # selection of the GPU by the driver now and then we
                # query the active GPU. If we check the active GPU before
                # the device is initialized we will always receive 0
                # event if another device is selected later.
                cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((2, 3))
                use.device_number = active_device_number()

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver
                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()

            if move_shared_float32_to_gpu:
                handle_shared_float32(True)

            if enable_cuda:
                cuda_enabled = True
            print >> sys.stderr, "Using gpu device %d: %s" % (
                active_device_number(), active_device_name())
        except (EnvironmentError, ValueError, RuntimeError), e:
            _logger.error(("ERROR: Not using GPU."
                           " Initialisation of device %s failed:\n%s"),
                          str(device), e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."), )
                raise
コード例 #8
0
ファイル: __init__.py プロジェクト: NicolasBouchard/Theano
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True,
        test_driver=True):
    """
    Error and warning about CUDA should be displayed only when this
    function is called.  We need to be able to load this module only
    to check if it is available!

    :param device: string "cpu", "gpu", "gpuN" (N is the device number to use)
    :param force: Will always raise an exception if we can't use the gpu.
    :param default_to_move_computation_to_gpu: If gpu init succeeded, enable by
                                               default optimizations to move
                                               computations to the gpu
    :param move_shared_float32_to_gpu: If gpu init succeeded, put new shared
                                       variables in float32 on the gpu.
    :param enable_cuda: If the gpu is correctly enabled,
                        set the variable cuda_enabled to True.
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError("You forced the use of gpu device '%s', but"
                                   " nvcc was not found. Set it in your PATH "
                                   "environment variable or set the Theano "
                                   "flags 'cuda.root' to its directory"
                                   "" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" % (
                device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
              ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = (" (error: %s)" %
                                  cuda_initialization_error_message)
        except NameError:
# cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device < 0:
            return
        if device in [None, ""]:
            device = 0
        try:
            if device != 'gpu':
                assert isinstance(device, int)
                gpu_init(device)
                use.device_number = device
            else:
                # This mean the driver should select the GPU.  As we
                # need to get the device number now, we force the
                # selection of the GPU by the driver now and then we
                # query the active GPU. If we check the active GPU before
                # the device is initialized we will always receive 0
                # event if another device is selected later.
                cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((2, 3))
                use.device_number = active_device_number()

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver
                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()

            if move_shared_float32_to_gpu:
                handle_shared_float32(True)

            if enable_cuda:
                cuda_enabled = True
            print >> sys.stderr, "Using gpu device %d: %s" % (
                active_device_number(), active_device_name())
        except (EnvironmentError, ValueError), e:
            _logger.error(("ERROR: Not using GPU."
                " Initialisation of device %i failed:\n%s"),
                device, e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."),)
                raise
コード例 #9
0
ファイル: __init__.py プロジェクト: HaniAlmousli/Theano
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True):
    """
    Error and warning about CUDA should be displayed only when this function is called.
    We need to be able to load this module only to check if it is available!
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError("You forced the use of gpu device '%s', but "
                                   "nvcc was not found. Set it in your PATH "
                                   "environment variable or set the Theano "
                                   "flags 'cuda.root' to its directory" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" % (
                device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
              ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = " (error: %s)" % cuda_initialization_error_message
        except NameError: # cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device<0:
            return
        if device in [None,""]:
            device=0
        try:
            if device !='gpu':
                gpu_init(device)

            if move_shared_float32_to_gpu:
                handle_shared_float32(True)
            use.device_number = device

            if enable_cuda:
                cuda_enabled = True
            print >> sys.stderr, "Using gpu device %d: %s" % (active_device_number(), active_device_name())
        except (EnvironmentError, ValueError), e:
            _logger.error(("ERROR: Not using GPU."
                " Initialisation of device %i failed:\n%s"),
                device, e)
            cuda_enabled = False
            if force:
                e.args+=(("You asked to force this device and it failed."
                        " No fallback to the cpu or other gpu device."),)
                raise
コード例 #10
0
ファイル: __init__.py プロジェクト: thiboeri/Theano
def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_float32_to_gpu=True,
        enable_cuda=True):
    """
    Error and warning about CUDA should be displayed only when this function is called.
    We need to be able to load this module only to check if it is available!
    """
    global cuda_enabled, cuda_initialization_error_message
    if force and not cuda_available and device.startswith('gpu'):
        if not nvcc_compiler.is_nvcc_available():
            raise EnvironmentError(
                "You forced the use of gpu device '%s', but "
                "nvcc was not found. Set it in your PATH "
                "environment variable or set the Theano "
                "flags 'cuda.root' to its directory" % device)
        else:
            raise EnvironmentError("You forced the use of gpu device %s, "
                                   "but CUDA initialization failed "
                                   "with error:\n%s" %
                                   (device, cuda_initialization_error_message))
    elif not nvcc_compiler.is_nvcc_available():
        _logger.error('nvcc compiler not found on $PATH.'
                      ' Check your nvcc installation and try again.')
        return
    elif not cuda_available:
        error_addendum = ""
        try:
            if cuda_initialization_error_message:
                error_addendum = " (error: %s)" % cuda_initialization_error_message
        except NameError:  # cuda_initialization_error_message is not available b/c compilation failed
            pass
        _logger.warning('CUDA is installed, but device %s is not available %s',
                        device, error_addendum)
        return

    if device == 'gpu':
        pass
    elif device.startswith('gpu'):
        device = int(device[3:])
    elif device == 'cpu':
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != 'gpu' and device < 0:
            return
        if device in [None, ""]:
            device = 0
        try:
            if device != 'gpu':
                gpu_init(device)

            if move_shared_float32_to_gpu:
                handle_shared_float32(True)
            use.device_number = device

            if enable_cuda:
                cuda_enabled = True
            print >> sys.stderr, "Using gpu device %d: %s" % (
                active_device_number(), active_device_name())
        except (EnvironmentError, ValueError), e:
            _logger.error(("ERROR: Not using GPU."
                           " Initialisation of device %i failed:\n%s"), device,
                          e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."), )
                raise