示例#1
0
from theano.gof.utils import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs,
                                std_include_dirs, dlimport,
                                Compiler,
                                get_lib_extension)
from theano.misc.windows import output_subprocess_Popen

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")

from theano.configparser import (config, AddConfigVar, StrParam,
                                 BoolParam, ConfigParam)

AddConfigVar('nvcc.compiler_bindir',
             "If defined, nvcc compiler driver will seek g++ and gcc"
             " in this directory",
             StrParam(""),
             in_c_key=False)

user_provided_cuda_root = True


def default_cuda_root():
    global user_provided_cuda_root
    v = os.getenv('CUDA_ROOT', "")
    user_provided_cuda_root = False
    if v:
        return v
    return find_cuda_root()

AddConfigVar('cuda.root',
        """directory with bin/, lib/, include/ for cuda utilities.
示例#2
0
from theano.gof.cc import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs,
                                dlimport, get_lib_extension)
from theano.gof.python25 import any
from theano.misc.windows import output_subprocess_Popen

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import (config, AddConfigVar, StrParam, BoolParam,
                                 ConfigParam)

AddConfigVar('nvcc.compiler_bindir',
             "If defined, nvcc compiler driver will seek g++ and gcc"
             " in this directory",
             StrParam(""),
             in_c_key=False)

user_provided_cuda_root = True


def default_cuda_root():
    global user_provided_cuda_root
    v = os.getenv('CUDA_ROOT', "")
    user_provided_cuda_root = False
    if v:
        return v
    return find_cuda_root()


AddConfigVar('cuda.root',
示例#3
0
    return p


compiledir_format_dict['short_platform'] = short_platform()
compiledir_format_keys = ", ".join(sorted(compiledir_format_dict.keys()))
default_compiledir_format = ("compiledir_%(short_platform)s-%(processor)s-"
                             "%(python_version)s-%(python_bitwidth)s")

AddConfigVar("compiledir_format",
             textwrap.fill(
                 textwrap.dedent("""\
                 Format string for platform-dependent compiled
                 module subdirectory (relative to base_compiledir).
                 Available keys: %s. Defaults to %r.
             """ % (compiledir_format_keys, default_compiledir_format))),
             StrParam(default_compiledir_format, allow_override=False),
             in_c_key=False)


def default_compiledirname():
    formatted = config.compiledir_format % compiledir_format_dict
    safe = re.sub("[\(\)\s,]+", "_", formatted)
    return safe


def filter_base_compiledir(path):
    # Expand '~' in path
    return os.path.expanduser(str(path))


def filter_compiledir(path):
示例#4
0
from theano.configparser import config, AddConfigVar, StrParam, BoolParam
import nvcc_compiler

_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.WARNING)

AddConfigVar(
    'cuda.root', """directory with bin/, lib/, include/ for cuda utilities.
        This directory is included via -L and -rpath when linking
        dynamically compiled modules.  If AUTO and nvcc is in the
        path, it will use one of nvcc parent directory.  Otherwise
        /usr/local/cuda will be used.  Leave empty to prevent extra
        linker directives.  Default: environment variable "CUDA_ROOT"
        or else "AUTO".
        """, StrParam(os.getenv('CUDA_ROOT', "AUTO")))

AddConfigVar(
    'pycuda.init', """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """, BoolParam(False))

if config.cuda.root == "AUTO":
    # set nvcc_path correctly and get the version
    nvcc_compiler.set_cuda_root()

#is_nvcc_available called here to initialize global vars in
示例#5
0
            home = os.getenv('USERPROFILE')
    assert home is not None
    return home


# On Windows we should avoid writing temporary files to a directory that is
# part of the roaming part of the user profile. Instead we use the local part
# of the user profile, when available.
if sys.platform == 'win32' and os.getenv('LOCALAPPDATA') is not None:
    default_base_compiledir = os.path.join(os.getenv('LOCALAPPDATA'), 'Theano')
else:
    default_base_compiledir = os.path.join(get_home_dir(), '.theano')

AddConfigVar('base_compiledir',
             "arch-independent cache directory for compiled modules",
             StrParam(default_base_compiledir, allow_override=False))

AddConfigVar(
    'compiledir', "arch-dependent cache directory for compiled modules",
    ConfigParam(os.path.join(os.path.expanduser(config.base_compiledir),
                             default_compiledirname()),
                filter=filter_compiledir,
                allow_override=False))


def print_compiledir_content():
    def flatten(a):
        if isinstance(a, (tuple, list, set)):
            l = []
            for item in a:
                l.extend(flatten(item))
示例#6
0
from theano.configparser import AddConfigVar, StrParam

AddConfigVar('pthreads.inc_dir', "location of pthread.h", StrParam(""))

AddConfigVar('pthreads.lib_dir', "location of library implementing pthreads",
             StrParam(""))

AddConfigVar(
    'pthreads.lib',
    'name of the library that implements pthreads (e.g. "pthreadVC2" if using pthreadVC2.dll/.lib from pthreads-win32)',
    StrParam(""))
示例#7
0
文件: mode.py 项目: yubow/Theano
import numpy

import theano
from theano import gof
import theano.gof.vm
from theano.configparser import config, AddConfigVar, StrParam
from theano.compile.ops import register_view_op_c_code, _output_guard

_logger = logging.getLogger('theano.compile.mode')

AddConfigVar(
    'optimizer_excluding',
    ("When using the default mode, we will remove optimizer with these "
     "tags. Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)
AddConfigVar(
    'optimizer_including',
    ("When using the default mode, we will add optimizer with these tags. "
     "Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)
AddConfigVar(
    'optimizer_requiring',
    ("When using the default mode, we will require optimizer with these "
     "tags. Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)

    for dir in s.split(os.path.pathsep):
        if os.path.exists(os.path.join(dir, "nvcc")):
            return os.path.split(dir)[0]
    return ''


AddConfigVar('cuda.root',
             """directory with bin/, lib/, include/ for cuda utilities.
       This directory is included via -L and -rpath when linking
       dynamically compiled modules.  If AUTO and nvcc is in the
       path, it will use one of nvcc parent directory.  Otherwise
       /usr/local/cuda will be used.  Leave empty to prevent extra
       linker directives.  Default: environment variable "CUDA_ROOT"
       or else "AUTO".
       """,
             StrParam(default_cuda_root),
             in_c_key=False)


def filter_nvcc_flags(s):
    assert isinstance(s, str)
    flags = [flag for flag in s.split(' ') if flag]
    if any([f for f in flags if not f.startswith("-")]):
        raise ValueError(
            "Theano nvcc.flags support only parameter/value pairs without"
            " space between them. e.g.: '--machine 64' is not supported,"
            " but '--machine=64' is supported. Please add the '=' symbol."
            " nvcc.flags value is '%s'" % s)
    return ' '.join(flags)

示例#9
0
AddConfigVar(
    'device',
    ("Default device for computations. If gpu*, change the default to try "
     "to move computation to it and to put shared variable of float32 "
     "on it. Do not use upper case letters, only lower case even if "
     "NVIDIA use capital letters."),
    DeviceParam('cpu', allow_override=False),
    in_c_key=False,
)

AddConfigVar('gpuarray.init_device',
             """
             Device to initialize for gpuarray use without moving
             computations automatically.
             """,
             StrParam(''),
             in_c_key=False)

AddConfigVar(
    'init_gpu_device',
    ("Initialize the gpu device to use, works only if device=cpu. "
     "Unlike 'device', setting this option will NOT move computations, "
     "nor shared variables, to the specified GPU. "
     "It can be used to run GPU-specific tests on a particular GPU."),
    EnumStr('',
            'gpu',
            'gpu0',
            'gpu1',
            'gpu2',
            'gpu3',
            'gpu4',
示例#10
0
from theano.configparser import config, AddConfigVar, StrParam
from theano.gof.python25 import any
try:
    from nose.plugins.skip import SkipTest
except ImportError:

    class SkipTest(Exception):
        """
        Skip this test
        """


AddConfigVar(
    'unittests.rseed',
    "Seed to use for randomized unit tests. Special value 'random' means using a seed of None.",
    StrParam(666),
    in_c_key=False)


def fetch_seed(pseed=None):
    """
    Returns the seed to use for running the unit tests.
    If an explicit seed is given, it will be used for seeding numpy's rng.
    If not, it will use config.unittest.rseed (its default value is 666).
    If config.unittest.rseed is set to "random", it will seed the rng with None,
    which is equivalent to seeding with a random seed.

    Useful for seeding RandomState objects.
    >>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
    """
示例#11
0
    for dir in s.split(os.path.pathsep):
        if os.path.exists(os.path.join(dir, "nvcc")):
            return os.path.dirname(os.path.abspath(dir))
    return ''

AddConfigVar(
    'cuda.root',
    """directory with bin/, lib/, include/ for cuda utilities.
       This directory is included via -L and -rpath when linking
       dynamically compiled modules.  If AUTO and nvcc is in the
       path, it will use one of nvcc parent directory.  Otherwise
       /usr/local/cuda will be used.  Leave empty to prevent extra
       linker directives.  Default: environment variable "CUDA_ROOT"
       or else "AUTO".
       """,
    StrParam(default_cuda_root),
    in_c_key=False)


def filter_nvcc_flags(s):
    assert isinstance(s, str)
    flags = [flag for flag in s.split(' ') if flag]
    if any([f for f in flags if not f.startswith("-")]):
        raise ValueError(
            "Theano nvcc.flags support only parameter/value pairs without"
            " space between them. e.g.: '--machine 64' is not supported,"
            " but '--machine=64' is supported. Please add the '=' symbol."
            " nvcc.flags value is '%s'" % s)
    return ' '.join(flags)

AddConfigVar('nvcc.flags',
示例#12
0
import theano
from theano.gof.cc import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs,
                                dlimport, get_lib_extension, local_bitwidth)
from theano.gof.python25 import any

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import (config, AddConfigVar, StrParam, BoolParam,
                                 ConfigParam)

AddConfigVar(
    'nvcc.compiler_bindir',
    "If defined, nvcc compiler driver will seek g++ and gcc"
    " in this directory", StrParam(""))

AddConfigVar('cuda.nvccflags',
             "DEPRECATED, use nvcc.flags instead",
             StrParam("", allow_override=False),
             in_c_key=False)

if config.cuda.nvccflags != '':
    warnings.warn('Configuration variable cuda.nvccflags is deprecated. '
                  'Please use nvcc.flags instead. You provided value: %s' %
                  config.cuda.nvccflags)


def filter_nvcc_flags(s):
    assert isinstance(s, str)
    flags = [flag for flag in s.split(' ') if flag]
示例#13
0
import sys
import warnings

from theano.gof.cc import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs,
                                dlimport, get_lib_extension, local_bitwidth)

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import config, AddConfigVar, StrParam, BoolParam

AddConfigVar(
    'nvcc.compiler_bindir',
    "If defined, nvcc compiler driver will seek g++ and gcc"
    " in this directory", StrParam(""))

AddConfigVar('cuda.nvccflags',
             "DEPRECATED, use nvcc.flags instead",
             StrParam("", allow_override=False),
             in_c_key=False)

if config.cuda.nvccflags != '':
    warnings.warn('Configuration variable cuda.nvccflags is deprecated. '
                  'Please use nvcc.flags instead. You provided value: %s' %
                  config.cuda.nvccflags)

AddConfigVar('nvcc.flags', "Extra compiler flags for nvcc",
             StrParam(config.cuda.nvccflags))

AddConfigVar('nvcc.fastmath', "", BoolParam(False))
示例#14
0
AddConfigVar(
    'device',
    ("Default device for computations. If gpu*, change the default to try "
     "to move computation to it and to put shared variable of float32 "
     "on it. Do not use upper case letters, only lower case even if "
     "NVIDIA use capital letters."),
    DeviceParam('cpu', allow_override=False),
    in_c_key=False,
)

AddConfigVar('gpuarray.init_device',
             """
             Device to initialize for gpuarray use without moving
             computations automatically.
             """,
             StrParam(''),
             in_c_key=False)

AddConfigVar(
    'init_gpu_device',
    ("Initialize the gpu device to use, works only if device=cpu. "
     "Unlike 'device', setting this option will NOT move computations, "
     "nor shared variables, to the specified GPU. "
     "It can be used to run GPU-specific tests on a particular GPU."),
    EnumStr('',
            'gpu',
            'gpu0',
            'gpu1',
            'gpu2',
            'gpu3',
            'gpu4',
示例#15
0
AddConfigVar(
    'device',
    ("Default device for computations. If gpu*, change the default to try "
     "to move computation to it and to put shared variable of float32 "
     "on it. Do not use upper case letters, only lower case even if "
     "NVIDIA use capital letters."),
    DeviceParam('cpu', allow_override=False),
    in_c_key=False,)

AddConfigVar('gpuarray.init_device',
             """
             Device to initialize for gpuarray use without moving
             computations automatically.
             """,
             StrParam(''),
             in_c_key=False)

AddConfigVar(
    'init_gpu_device',
    ("Initialize the gpu device to use, works only if device=cpu. "
     "Unlike 'device', setting this option will NOT move computations, "
     "nor shared variables, to the specified GPU. "
     "It can be used to run GPU-specific tests on a particular GPU."),
    EnumStr('', 'gpu',
            'gpu0', 'gpu1', 'gpu2', 'gpu3',
            'gpu4', 'gpu5', 'gpu6', 'gpu7',
            'gpu8', 'gpu9', 'gpu10', 'gpu11',
            'gpu12', 'gpu13', 'gpu14', 'gpu15',
            allow_override=False),
    in_c_key=False)
示例#16
0
_logger = logging.getLogger("theano.tests.unittest_tools")


def good_seed_param(seed):
    if seed == "random":
        return True
    try:
        int(seed)
    except Exception:
        return False
    return True


AddConfigVar('unittests.rseed', "Seed to use for randomized unit tests. "
             "Special value 'random' means using a seed of None.",
             StrParam(666, is_valid=good_seed_param),
             in_c_key=False)


def fetch_seed(pseed=None):
    """
    Returns the seed to use for running the unit tests.
    If an explicit seed is given, it will be used for seeding numpy's rng.
    If not, it will use config.unittest.rseed (its default value is 666).
    If config.unittest.rseed is set to "random", it will seed the rng with None,
    which is equivalent to seeding with a random seed.

    Useful for seeding RandomState objects.
    >>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
    """
示例#17
0
_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

# is_nvcc_available called here to initialize global vars in
# nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True
示例#18
0
import subprocess
import sys
import warnings

from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs,
                                dlimport, get_lib_extension, local_bitwidth)

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import config, AddConfigVar, StrParam, BoolParam

AddConfigVar(
    'nvcc.compiler_bindir',
    "If defined, nvcc compiler driver will seek g++ and gcc in this directory",
    StrParam(""))

AddConfigVar('cuda.nvccflags',
             "DEPRECATED, use nvcc.flags instead",
             StrParam("", allow_override=False),
             in_c_key=False)

if config.cuda.nvccflags != '':
    warnings.warn('Configuration variable cuda.nvccflags is deprecated. '
                  'Please use nvcc.flags instead. You provided value: %s' %
                  config.cuda.nvccflags)

AddConfigVar('nvcc.flags', "Extra compiler flags for nvcc",
             StrParam(config.cuda.nvccflags))

AddConfigVar('nvcc.fastmath', "", BoolParam(False))