Beispiel #1
0
from theano.gof.cc import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs,
                                std_include_dirs, dlimport,
                                get_lib_extension)
from theano.gof.python25 import any
from theano.misc.windows import call_subprocess_Popen

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import (config, AddConfigVar, StrParam,
                                 BoolParam, ConfigParam)

AddConfigVar('nvcc.compiler_bindir',
             "If defined, nvcc compiler driver will seek g++ and gcc"
             " in this directory",
             StrParam(""),
             in_c_key=False)

AddConfigVar('cuda.nvccflags',
        "DEPRECATED, use nvcc.flags instead",
        StrParam("", allow_override=False),
        in_c_key=False)

if config.cuda.nvccflags != '':
    warnings.warn('Configuration variable cuda.nvccflags is deprecated. '
            'Please use nvcc.flags instead. You provided value: %s'
            % config.cuda.nvccflags)


def filter_nvcc_flags(s):
Beispiel #2
0

def floatX_convert(s):
    if s == "32":
        return "float32"
    elif s == "64":
        return "float64"
    elif s == "16":
        return "float16"
    else:
        return s

AddConfigVar('floatX',
             "Default floating-point precision for python casts.\n"
             "\n"
             "Note: float16 support is experimental, use at your own risk.",
             EnumStr('float64', 'float32', 'float16',
                     convert=floatX_convert,),
             )

AddConfigVar('warn_float64',
             "Do an action when a tensor variable with float64 dtype is"
             " created. They can't be run on the GPU with the current(old)"
             " gpu back-end and are slow with gamer GPUs.",
             EnumStr('ignore', 'warn', 'raise', 'pdb'),
             in_c_key=False,
             )

AddConfigVar('cast_policy',
             'Rules for implicit type casting',
             EnumStr('custom', 'numpy+floatX',
Beispiel #3
0
        name = (kwargs and kwargs.pop('name')) or local_opt.__name__
        gpu_optimizer.register(name, local_opt, 'fast_run', 'fast_compile',
                               'gpu', *tags)
        return local_opt

    return f


_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
Beispiel #4
0
    sr = '-'.join(sp)
    p = p.replace(r, sr)

    return p


compiledir_format_dict['short_platform'] = short_platform()
compiledir_format_keys = ", ".join(sorted(compiledir_format_dict.keys()))
default_compiledir_format = ("compiledir_%(short_platform)s-%(processor)s-"
                             "%(python_version)s-%(python_bitwidth)s")

AddConfigVar("compiledir_format",
             textwrap.fill(
                 textwrap.dedent("""\
                 Format string for platform-dependent compiled
                 module subdirectory (relative to base_compiledir).
                 Available keys: %s. Defaults to %r.
             """ % (compiledir_format_keys, default_compiledir_format))),
             StrParam(default_compiledir_format, allow_override=False),
             in_c_key=False)


def default_compiledirname():
    formatted = config.compiledir_format % compiledir_format_dict
    safe = re.sub("[\(\)\s,]+", "_", formatted)
    return safe


def filter_base_compiledir(path):
    # Expand '~' in path
    return os.path.expanduser(str(path))
Beispiel #5
0
import sys
import time

import numpy

import theano
from theano.configparser import AddConfigVar, BoolParam, IntParam

import_time = time.time()
config = theano.config

_atexit_print_list = []
_atexit_print_file = sys.stderr

AddConfigVar('profiling.time_thunks',
             """Time individual thunks when profiling""",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('profiling.n_apply',
             "Number of Apply instances to print by default",
             IntParam(20, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('profiling.n_ops',
             "Number of Ops to print by default",
             IntParam(20, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('profiling.min_memory_size',
             """For the memory profile, do not print Apply nodes if the size
             of their outputs (in bytes) is lower than this threshold""",
Beispiel #6
0
_logger = logging.getLogger("theano.tests.unittest_tools")


def good_seed_param(seed):
    if seed == "random":
        return True
    try:
        int(seed)
    except Exception:
        return False
    return True


AddConfigVar('unittests.rseed', "Seed to use for randomized unit tests. "
             "Special value 'random' means using a seed of None.",
             StrParam(666, is_valid=good_seed_param),
             in_c_key=False)


def fetch_seed(pseed=None):
    """
    Returns the seed to use for running the unit tests.
    If an explicit seed is given, it will be used for seeding numpy's rng.
    If not, it will use config.unittest.rseed (its default value is 666).
    If config.unittest.rseed is set to "random", it will seed the rng with None,
    which is equivalent to seeding with a random seed.

    Useful for seeding RandomState objects.
    >>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
    """
Beispiel #7
0
import theano
from theano.compile import optdb
from theano.gof.cmodule import get_lib_extension
from theano.gof.compilelock import get_lock, release_lock
from theano.configparser import config, AddConfigVar, StrParam, BoolParam
import nvcc_compiler

_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.WARNING)

AddConfigVar(
    'cuda.root', """directory with bin/, lib/, include/ for cuda utilities.
        This directory is included via -L and -rpath when linking
        dynamically compiled modules.  If AUTO and nvcc is in the
        path, it will use one of nvcc parent directory.  Otherwise
        /usr/local/cuda will be used.  Leave empty to prevent extra
        linker directives.  Default: environment variable "CUDA_ROOT"
        or else "AUTO".
        """, StrParam(os.getenv('CUDA_ROOT', "AUTO")))

AddConfigVar(
    'pycuda.init', """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """, BoolParam(False))

if config.cuda.root == "AUTO":
Beispiel #8
0
        name = (kwargs and kwargs.pop('name')) or local_opt.__name__
        gpu_optimizer.register(name, local_opt, 'fast_run', 'fast_compile',
                               'gpu', *tags, **kwargs)
        return local_opt

    return f


_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
             """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
             BoolParam(False),
             in_c_key=False)

AddConfigVar('cublas.lib', """Name of the cuda blas library for the linker.""",
             StrParam('cublas'))

AddConfigVar(
    'lib.cumem',
    """Do we enable cumem or not.""",
    # We should not mix both allocator, so we can't override
    BoolParam(False, allow_override=False),
    in_c_key=False)
from __future__ import print_function
import collections
import logging

from six.moves import StringIO
import numpy as np

import theano
from theano.configparser import config, AddConfigVar, BoolParam, EnumStr
import theano.tensor as T
import theano.sandbox.cuda as cuda
from theano.compile import Mode

AddConfigVar('NanGuardMode.nan_is_error',
             "Default value for nan_is_error",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('NanGuardMode.inf_is_error',
             "Default value for inf_is_error",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('NanGuardMode.big_is_error',
             "Default value for big_is_error",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('NanGuardMode.action',
             "What NanGuardMode does when it finds a problem",
             EnumStr('raise', 'warn', 'pdb'),
Beispiel #10
0
import theano.tensor as T
from theano.configparser import config, AddConfigVar, StrParam
from theano.gof.python25 import any
try:
    from nose.plugins.skip import SkipTest
except ImportError:

    class SkipTest(Exception):
        """
        Skip this test
        """


AddConfigVar(
    'unittests.rseed',
    "Seed to use for randomized unit tests. Special value 'random' means using a seed of None.",
    StrParam(666),
    in_c_key=False)


def fetch_seed(pseed=None):
    """
    Returns the seed to use for running the unit tests.
    If an explicit seed is given, it will be used for seeding numpy's rng.
    If not, it will use config.unittest.rseed (its default value is 666).
    If config.unittest.rseed is set to "random", it will seed the rng with None,
    which is equivalent to seeding with a random seed.

    Useful for seeding RandomState objects.
    >>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
    """
Beispiel #11
0
import sys
import time
import link
import traceback
from theano.gof.python25 import all

import theano

config = theano.config

from theano.configparser import config, AddConfigVar, BoolParam
from theano import config

logger = logging.getLogger(__name__)

AddConfigVar('profile', "If VM should collect profile information",
             BoolParam(False))

raise_with_op = link.raise_with_op


class VM(object):
    """
    A VM object evaluates a Theano program with its __call__ method.

    Attributes:

    call_counts - list of integers, one for each thunk. call_count[i] is the
        number of times thunks[i] was called in the course of computations
        performed by call_with_timers().

    call_times - list of floats, one for each thunk. call_times[i] is the amount
Beispiel #12
0
import os
import logging
import subprocess
import sys

from theano.configparser import (AddConfigVar, BoolParam, ConfigParam, EnumStr,
                                 IntParam, FloatParam, StrParam,
                                 TheanoConfigParser)

_logger = logging.getLogger('theano.configdefaults')

config = TheanoConfigParser()

AddConfigVar(
    'floatX',
    "Default floating-point precision for python casts",
    EnumStr('float64', 'float32'),
)

AddConfigVar(
    'cast_policy',
    "Rules for implicit type casting",
    EnumStr(
        'custom',
        'numpy+floatX',
        # The 'numpy' policy was originally planned to provide a smooth
        # transition from numpy. It was meant to behave the same as
        # numpy+floatX, but keeping float64 when numpy would. However
        # the current implementation of some cast mechanisms makes it
        # a bit more complex to add than what was expected, so it is
        # currently not available.
Beispiel #13
0
import numpy

import theano
from theano.gof.cc import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs,
                                dlimport, get_lib_extension, local_bitwidth)
from theano.gof.python25 import any

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)

from theano.configparser import (config, AddConfigVar, StrParam, BoolParam,
                                 ConfigParam)

AddConfigVar(
    'nvcc.compiler_bindir',
    "If defined, nvcc compiler driver will seek g++ and gcc"
    " in this directory", StrParam(""))

AddConfigVar('cuda.nvccflags',
             "DEPRECATED, use nvcc.flags instead",
             StrParam("", allow_override=False),
             in_c_key=False)

if config.cuda.nvccflags != '':
    warnings.warn('Configuration variable cuda.nvccflags is deprecated. '
                  'Please use nvcc.flags instead. You provided value: %s' %
                  config.cuda.nvccflags)


def filter_nvcc_flags(s):
    assert isinstance(s, str)
Beispiel #14
0
import collections
import logging

import numpy as np

import theano
from theano.configparser import config, AddConfigVar, BoolParam
import theano.tensor as T
import theano.sandbox.cuda as cuda
from theano.compile import Mode

AddConfigVar('NanGuardMode.nan_is_error',
             "Default value for nan_is_error",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('NanGuardMode.inf_is_error',
             "Default value for inf_is_error",
             BoolParam(True),
             in_c_key=False)

AddConfigVar('NanGuardMode.big_is_error',
             "Default value for big_is_error",
             BoolParam(True),
             in_c_key=False)

logger = logging.getLogger("theano.compile.nanguardmode")


def flatten(l):
    """
Beispiel #15
0
    def f(local_opt):
        name = (kwargs and kwargs.pop('name')) or local_opt.__name__
        gpu_optimizer.register(name, local_opt, 'fast_run', 'fast_compile',
                               'gpu', *tags, **kwargs)
        return local_opt
    return f


_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)

AddConfigVar('pycuda.init',
        """If True, always initialize PyCUDA when Theano want to
           initilize the GPU.  Currently, we must always initialize
           PyCUDA before Theano do it.  Setting this flag to True,
           ensure that, but always import PyCUDA.  It can be done
           manually by importing theano.misc.pycuda_init before theano
           initialize the GPU device.
             """,
        BoolParam(False),
        in_c_key=False)

AddConfigVar('cublas.lib',
        """Name of the cuda blas library for the linker.""",
        StrParam('cublas'))

AddConfigVar('lib.cnmem',
             """Do we enable CNMeM or not (a faster CUDA memory allocator).

             The parameter represent the start size (in MB or % of
             total GPU memory) of the memory pool.
Beispiel #16
0
VMs that run Theano graph computations.
"""
import logging
import sys
import time
import link
from theano.gof.python25 import all

import theano
config = theano.config

from theano.configparser import config, AddConfigVar, BoolParam

logger = logging.getLogger(__name__)

AddConfigVar('profile', "If VM should collect profile information",
             BoolParam(False))
AddConfigVar('profile_optimizer',
             "If VM should collect optimizer profile information",
             BoolParam(False))

raise_with_op = link.raise_with_op


class VM(object):
    """
    A VM object evaluates a Theano program with its __call__ method.

    Attributes:

    call_counts - list of integers, one for each thunk. call_count[i] is the
        number of times thunks[i] was called in the course of computations
Beispiel #17
0
                # function would incorrectly identify it as (1 + exp(x)).
                if config.warn.identify_1pexp_bug:
                    warnings.warn(
                        'Although your current code is fine, please note that '
                        'Theano versions prior to 0.5 (more specifically, '
                        'prior to commit 7987b51 on 2011-12-18) may have '
                        'yielded an incorrect result. To remove this warning, '
                        'either set the `warn.identify_1pexp_bug` config '
                        'option to False, or `warn.ignore_bug_before` to at '
                        'least \'0.4.1\'.')
    return None


AddConfigVar('warn.identify_1pexp_bug',
        'Warn if Theano versions prior to 7987b51 (2011-12-18) could have '
        'yielded a wrong result due to a bug in the is_1pexp function',
        BoolParam(theano.configdefaults.warn_default('0.4.1')),
        in_c_key=False)


def is_exp(var):
    """
    Match a variable with either of the `exp(x)` or `-exp(x)` patterns.

    :param var: The Variable to analyze.

    :return: A pair (b, x) with `b` a boolean set to True if `var` is of the
    form `-exp(x)` and False if `var` is of the form `exp(x)`. If `var` cannot
    be cast into either form, then return `None`.
    """
    neg = False
Beispiel #18
0
import unify
import toolbox
import op
import theano
from theano import config
from theano.gof.python25 import any, all, deque
from theano.configparser import AddConfigVar, BoolParam, config

#if sys.version_info[:2] >= (2,5):
#  from collections import defaultdict

_logger = logging.getLogger('theano.gof.opt')

AddConfigVar(
    'time_seq_optimizer',
    "Should SeqOptimizer print the time taked by each of its optimizer",
    BoolParam(False),
    in_c_key=False)

import destroyhandler as dh
import traceback

_optimizer_idx = [0]


def _list_of_nodes(env):
    return list(graph.io_toposort(env.inputs, env.outputs))


class Optimizer(object):
    """WRITEME
Beispiel #19
0
import warnings
from textwrap import dedent

import numpy

import theano
from theano import gof
import theano.gof.vm
from theano.configparser import config, AddConfigVar, StrParam
from theano.compile.ops import register_view_op_c_code, _output_guard

_logger = logging.getLogger('theano.compile.mode')

AddConfigVar(
    'optimizer_excluding',
    ("When using the default mode, we will remove optimizer with these "
     "tags. Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)
AddConfigVar(
    'optimizer_including',
    ("When using the default mode, we will add optimizer with these tags. "
     "Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)
AddConfigVar(
    'optimizer_requiring',
    ("When using the default mode, we will require optimizer with these "
     "tags. Separate tags with ':'."),
    StrParam("", allow_override=False),
    in_c_key=False)
Beispiel #20
0
import theano
from theano.compile import optdb
from theano.gof.cmodule import get_lib_extension
from theano.gof.compilelock import get_lock, release_lock
from theano.configparser import config, AddConfigVar, StrParam
import nvcc_compiler

_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.WARNING)

AddConfigVar(
    'cuda.root', """directory with bin/, lib/, include/ for cuda utilities.
        This directory is included via -L and -rpath when linking
        dynamically compiled modules.  If AUTO and nvcc is in the
        path, it will use one of nvcc parent directory.  Otherwise
        /usr/local/cuda will be used.  Leave empty to prevent extra
        linker directives.  Default: environment variable "CUDA_ROOT"
        or else "AUTO".
        """, StrParam(os.getenv('CUDA_ROOT', "AUTO")))

if config.cuda.root == "AUTO":
    # set nvcc_path correctly and get the version
    nvcc_compiler.set_cuda_root()

#is_nvcc_available called here to initialize global vars in
#nvcc_compiler module
nvcc_compiler.is_nvcc_available()

# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
Beispiel #21
0
            # failure is a mystery, it has been seen on some Windows system.
            home = os.getenv('USERPROFILE')
    assert home is not None
    return home


# On Windows we should avoid writing temporary files to a directory that is
# part of the roaming part of the user profile. Instead we use the local part
# of the user profile, when available.
if sys.platform == 'win32' and os.getenv('LOCALAPPDATA') is not None:
    default_base_compiledir = os.path.join(os.getenv('LOCALAPPDATA'), 'Theano')
else:
    default_base_compiledir = os.path.join(get_home_dir(), '.theano')

AddConfigVar('base_compiledir',
             "arch-independent cache directory for compiled modules",
             StrParam(default_base_compiledir, allow_override=False))

AddConfigVar(
    'compiledir', "arch-dependent cache directory for compiled modules",
    ConfigParam(os.path.join(os.path.expanduser(config.base_compiledir),
                             default_compiledirname()),
                filter=filter_compiledir,
                allow_override=False))


def print_compiledir_content():
    def flatten(a):
        if isinstance(a, (tuple, list, set)):
            l = []
            for item in a:
Beispiel #22
0
import os
import logging
import subprocess

from theano.configparser import (AddConfigVar, BoolParam, ConfigParam, EnumStr,
                                 IntParam, StrParam, TheanoConfigParser)
from theano.misc.cpucount import cpuCount
from theano.misc.windows import call_subprocess_Popen

_logger = logging.getLogger('theano.configdefaults')

config = TheanoConfigParser()

AddConfigVar(
    'floatX',
    "Default floating-point precision for python casts",
    EnumStr('float64', 'float32'),
)

AddConfigVar(
    'cast_policy',
    "Rules for implicit type casting",
    EnumStr(
        'custom',
        'numpy+floatX',
        # The 'numpy' policy was originally planned to provide a smooth
        # transition from numpy. It was meant to behave the same as
        # numpy+floatX, but keeping float64 when numpy would. However
        # the current implementation of some cast mechanisms makes it
        # a bit more complex to add than what was expected, so it is
        # currently not available.
Beispiel #23
0
from theano.gof import local_bitwidth
from theano.gof.utils import hash_from_file
from theano.gof.cmodule import (std_libs, std_lib_dirs,
                                std_include_dirs, dlimport,
                                Compiler,
                                get_lib_extension)
from theano.misc.windows import output_subprocess_Popen

_logger = logging.getLogger("theano.sandbox.cuda.nvcc_compiler")

from theano.configparser import (config, AddConfigVar, StrParam,
                                 BoolParam, ConfigParam)

AddConfigVar('nvcc.compiler_bindir',
             "If defined, nvcc compiler driver will seek g++ and gcc"
             " in this directory",
             StrParam(""),
             in_c_key=False)

user_provided_cuda_root = True


def default_cuda_root():
    global user_provided_cuda_root
    v = os.getenv('CUDA_ROOT', "")
    user_provided_cuda_root = False
    if v:
        return v
    return find_cuda_root()

AddConfigVar('cuda.root',
Beispiel #24
0
import copy
import sys
import time

import numpy

import theano
from theano.configparser import AddConfigVar, BoolParam

import_time = time.time()
config = theano.config

_atexit_print_list = []
_atexit_print_file = sys.stderr

AddConfigVar('profiling.time_thunks',
             """Time individual thunks when profiling""", BoolParam(True))


def _atexit_print_fn():
    """Print ProfileStat objects in _atexit_print_list to _atexit_print_file
    """
    printed = 0
    for ps in _atexit_print_list:
        if ps.fct_callcount or ps.compile_time > 0:
            ps.summary(file=_atexit_print_file)
            printed += 1
        else:
            print 'Skipping empty Profile'
    if printed > 1:
        # Make a global profile
        cum = copy.copy(_atexit_print_list[0])
Beispiel #25
0
import sys

import numpy

from theano.gof.python25 import DefaultOrderedDict
from theano.misc.ordered_set import OrderedSet
from theano.compat.six import StringIO
from theano.gof import opt
from theano.configparser import AddConfigVar, FloatParam
from theano import config
AddConfigVar('optdb.position_cutoff',
             'Where to stop eariler during optimization. It represent the'
             ' position of the optimizer where to stop.',
             FloatParam(numpy.inf),
             in_c_key=False)
AddConfigVar('optdb.max_use_ratio',
             'A ratio that prevent infinite loop in EquilibriumOptimizer.',
             FloatParam(5),
             in_c_key=False)


class DB(object):
    def __hash__(self):
        if not hasattr(self, '_optimizer_idx'):
            self._optimizer_idx = opt._optimizer_idx[0]
            opt._optimizer_idx[0] += 1
        return self._optimizer_idx

    def __init__(self):
        self.__db__ = DefaultOrderedDict(OrderedSet)
        self._names = set()
Beispiel #26
0
import os
import sys
import time
import warnings

from theano.configparser import (config, AddConfigVar, BoolParam, ConfigParam,
                                 _config_var_list)

import theano.gof.cmodule

from theano.compat import defaultdict

logger = logging.getLogger(__name__)

AddConfigVar('profile',
             "If VM should collect profile information",
             BoolParam(False),
             in_c_key=False)
AddConfigVar('profile_optimizer',
             "If VM should collect optimizer profile information",
             BoolParam(False),
             in_c_key=False)
AddConfigVar('profile_memory',
             "If VM should collect memory profile information and print it",
             BoolParam(False),
             in_c_key=False)


def filter_vm_lazy(val):
    if val == 'False' or val is False:
        return False
    elif val == 'True' or val is True:
Beispiel #27
0
import theano
from theano.gof.link import WrapLinker
from theano.compile.mode import (Mode, register_mode, predefined_modes,
                                 predefined_linkers, predefined_optimizers)
from theano.gof.python25 import any
from theano import gof
from theano.configparser import config, AddConfigVar, IntParam, BoolParam
from theano.compile.function_module import FunctionMaker
run_cthunk = None  # Will be imported only when needed.

from profiling import ProfileStats

import_time = time.time()

AddConfigVar('ProfileMode.n_apply_to_print',
             "Number of apply instances to print by default",
             IntParam(15, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('ProfileMode.n_ops_to_print',
             "Number of ops to print by default",
             IntParam(20, lambda i: i > 0),
             in_c_key=False)

AddConfigVar('ProfileMode.min_memory_size',
             """For the memory profile, do not print apply nodes if the size
 of their outputs (in bytes) is lower then this threshold""",
             IntParam(1024, lambda i: i >= 0),
             in_c_key=False)

AddConfigVar('ProfileMode.profile_memory',
             """Enable profiling of memory used by Theano functions""",
Beispiel #28
0
_logger_name = 'theano.sandbox.gpuarray'
_logger = logging.getLogger(_logger_name)

error = _logger.error
info = _logger.info

pygpu_activated = False
try:
    import pygpu
    import pygpu.gpuarray
except ImportError:
    pygpu = None

AddConfigVar('gpuarray.sync',
             """If True, every op will make sure its work is done before
                returning.  Setting this to True will slow down execution,
                but give much more accurate results in profiling.""",
             BoolParam(False),
             in_c_key=True)

# This is for documentation not to depend on the availability of pygpu
from type import (GpuArrayType, GpuArrayVariable, GpuArrayConstant,
                  GpuArraySharedVariable, gpuarray_shared_constructor)
import opt


def init_dev(dev):
    global pygpu_activated
    context = pygpu.init(dev)
    pygpu.set_default_context(context)
    pygpu_activated = True
    if config.print_active_device:
Beispiel #29
0
import time
import logging

from contextlib import contextmanager

from theano import config
from theano.configparser import AddConfigVar, IntParam

_logger = logging.getLogger("theano.gof.compilelock")
# If the user provided a logging level, we don't want to override it.
if _logger.level == logging.NOTSET:
    # INFO will show the "Refreshing lock" messages
    _logger.setLevel(logging.INFO)

AddConfigVar('compile.wait',
             """Time to wait before retrying to aquire the compile lock.""",
             IntParam(5, lambda i: i > 0, allow_override=False),
             in_c_key=False)


def _timeout_default():
    return config.compile.wait * 24


AddConfigVar('compile.timeout',
             """In seconds, time that a process will wait before deciding to
override an existing lock. An override only happens when the existing
lock is held by the same owner *and* has not been 'refreshed' by this
owner for more than this period. Refreshes are done every half timeout
period for running processes.""",
             IntParam(_timeout_default, lambda i: i >= 0,
                      allow_override=False),
Beispiel #30
0
from theano.configparser import AddConfigVar, StrParam

AddConfigVar('pthreads.inc_dir', "location of pthread.h", StrParam(""))

AddConfigVar('pthreads.lib_dir', "location of library implementing pthreads",
             StrParam(""))

AddConfigVar(
    'pthreads.lib',
    'name of the library that implements pthreads (e.g. "pthreadVC2" if using pthreadVC2.dll/.lib from pthreads-win32)',
    StrParam(""))