Exemplo n.º 1
0
    ("Initialize the gpu device to use, works only if device=cpu. "
     "Unlike 'device', setting this option will NOT move computations, "
     "nor shared variables, to the specified GPU. "
     "It can be used to run GPU-specific tests on a particular GPU."),
    EnumStr('', 'gpu',
            'gpu0', 'gpu1', 'gpu2', 'gpu3',
            'gpu4', 'gpu5', 'gpu6', 'gpu7',
            'gpu8', 'gpu9', 'gpu10', 'gpu11',
            'gpu12', 'gpu13', 'gpu14', 'gpu15',
            allow_override=False),
    in_c_key=False)

AddConfigVar(
    'force_device',
    "Raise an error if we can't use the specified device",
    BoolParam(False, allow_override=False),
    in_c_key=False)

AddConfigVar(
    'print_active_device',
    "Print active device at when the GPU device is initialized.",
    BoolParam(True, allow_override=False),
    in_c_key=False)


# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
AddConfigVar(
    'assert_no_cpu_op',
    "Raise an error/warning if there is a CPU op in the computational graph.",
    EnumStr('ignore', 'warn', 'raise', 'pdb', allow_override=True),
Exemplo n.º 2
0
    return f


_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

# is_nvcc_available called here to initialize global vars in
# nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
Exemplo n.º 3
0
            'gpu6',
            'gpu7',
            'gpu8',
            'gpu9',
            'gpu10',
            'gpu11',
            'gpu12',
            'gpu13',
            'gpu14',
            'gpu15',
            allow_override=False),
    in_c_key=False)

AddConfigVar('force_device',
             "Raise an error if we can't use the specified device",
             BoolParam(False, allow_override=False),
             in_c_key=False)

AddConfigVar('print_active_device',
             "Print active device at when the GPU device is initialized.",
             BoolParam(True, allow_override=False),
             in_c_key=False)

# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
AddConfigVar(
    'assert_no_cpu_op',
    "Raise an error/warning if there is a CPU op in the computational graph.",
    EnumStr('ignore', 'warn', 'raise', 'pdb', allow_override=True),
    in_c_key=False)
Exemplo n.º 4
0
import time

import numpy

import theano
from theano.configparser import AddConfigVar, BoolParam, IntParam

import_time = time.time()
config = theano.config

_atexit_print_list = []
_atexit_print_file = sys.stderr

AddConfigVar('profiling.time_thunks',
             """Time individual thunks when profiling""",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('profiling.n_apply',
             "Number of Apply instances to print by default",
             IntParam(20, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('profiling.n_ops',
             "Number of Ops to print by default",
             IntParam(20, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('profiling.min_memory_size',
             """For the memory profile, do not print Apply nodes if the size
             of their outputs (in bytes) is lower than this threshold""",
Exemplo n.º 5
0
                if config.warn.identify_1pexp_bug:
                    warnings.warn(
                        'Although your current code is fine, please note that '
                        'Theano versions prior to 0.5 (more specifically, '
                        'prior to commit 7987b51 on 2011-12-18) may have '
                        'yielded an incorrect result. To remove this warning, '
                        'either set the `warn.identify_1pexp_bug` config '
                        'option to False, or `warn.ignore_bug_before` to at '
                        'least \'0.4.1\'.')
    return None


AddConfigVar('warn.identify_1pexp_bug',
        'Warn if Theano versions prior to 7987b51 (2011-12-18) could have '
        'yielded a wrong result due to a bug in the is_1pexp function',
        BoolParam(theano.configdefaults.warn_default('0.4.1')),
        in_c_key=False)


def is_exp(var):
    """
    Match a variable with either of the `exp(x)` or `-exp(x)` patterns.

    :param var: The Variable to analyze.

    :return: A pair (b, x) with `b` a boolean set to True if `var` is of the
    form `-exp(x)` and False if `var` is of the form `exp(x)`. If `var` cannot
    be cast into either form, then return `None`.
    """
    neg = False
    neg_info = is_neg(var)
Exemplo n.º 6
0
        This directory is included via -L and -rpath when linking
        dynamically compiled modules.  If AUTO and nvcc is in the
        path, it will use one of nvcc parent directory.  Otherwise
        /usr/local/cuda will be used.  Leave empty to prevent extra
        linker directives.  Default: environment variable "CUDA_ROOT"
        or else "AUTO".
        """, StrParam(os.getenv('CUDA_ROOT', "AUTO")))

AddConfigVar(
    'pycuda.init', """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """, BoolParam(False))

if config.cuda.root == "AUTO":
    # set nvcc_path correctly and get the version
    nvcc_compiler.set_cuda_root()

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).

# This variable is True by default, and set to False if nvcc is not
Exemplo n.º 7
0
import sys
import time

import numpy

import theano
from theano.configparser import AddConfigVar, BoolParam

import_time = time.time()
config = theano.config

_atexit_print_list = []
_atexit_print_file = sys.stderr

AddConfigVar('profiling.time_thunks',
             """Time individual thunks when profiling""", BoolParam(True))


def _atexit_print_fn():
    """Print ProfileStat objects in _atexit_print_list to _atexit_print_file
    """
    printed = 0
    for ps in _atexit_print_list:
        if ps.fct_callcount or ps.compile_time > 0:
            ps.summary(file=_atexit_print_file)
            printed += 1
        else:
            print 'Skipping empty Profile'
    if printed > 1:
        # Make a global profile
        cum = copy.copy(_atexit_print_list[0])
Exemplo n.º 8
0
"""
import logging
import sys
import time
import link
from theano.gof.python25 import all

import theano
config = theano.config

from theano.configparser import config, AddConfigVar, BoolParam

logger = logging.getLogger(__name__)

AddConfigVar('profile', "If VM should collect profile information",
             BoolParam(False))
AddConfigVar('profile_optimizer',
             "If VM should collect optimizer profile information",
             BoolParam(False))

raise_with_op = link.raise_with_op


class VM(object):
    """
    A VM object evaluates a Theano program with its __call__ method.

    Attributes:

    call_counts - list of integers, one for each thunk. call_count[i] is the
        number of times thunks[i] was called in the course of computations
Exemplo n.º 9
0
    return f


_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

AddConfigVar(
    'lib.cumem',
    """Do we enable cumem or not.""",
    # We should not mix both allocator, so we can't override
    BoolParam(False, allow_override=False),
    in_c_key=False)

# is_nvcc_available called here to initialize global vars in
# nvcc_compiler module
nvcc_compiler.is_nvcc_available()
Exemplo n.º 10
0
            'gpu6',
            'gpu7',
            'gpu8',
            'gpu9',
            'gpu10',
            'gpu11',
            'gpu12',
            'gpu13',
            'gpu14',
            'gpu15',
            allow_override=False),
    in_c_key=False)

AddConfigVar('force_device',
             "Raise an error if we can't use the specified device",
             BoolParam(False, allow_override=False),
             in_c_key=False)

# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
# The old all capital letter way of working is deprecated as it is not
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar('mode',
             "Default compilation mode",
             EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
                     'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
             in_c_key=False)

# Test whether or not gcc is present: disable C code if it is not.
Exemplo n.º 11
0
def filter_nvcc_flags(s):
    assert isinstance(s, str)
    flags = [flag for flag in s.split(' ') if flag]
    if any([f for f in flags if not f.startswith("-")]):
        raise ValueError(
            "Theano nvcc.flags support only parameter/value pairs without"
            " space between them. e.g.: '--machine 64' is not supported,"
            " but '--machine=64' is supported. Please add the '=' symbol."
            " nvcc.flags value is '%s'" % s)
    return ' '.join(flags)


AddConfigVar('nvcc.flags', "Extra compiler flags for nvcc",
             ConfigParam(config.cuda.nvccflags, filter_nvcc_flags))

AddConfigVar('nvcc.fastmath', "", BoolParam(False))

nvcc_path = 'nvcc'
nvcc_version = None


def is_nvcc_available():
    """Return True iff the nvcc compiler is found."""
    def set_version():
        p = subprocess.Popen([nvcc_path, '--version'],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        p.wait()
        s = p.stdout.readlines()[-1].split(',')[1].strip().split()
        assert s[0] == 'release'
        global nvcc_version