Exemple #1
0
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector("nan_guard")
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function([guard_input], T.min(guard_input), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function([guard_input], T.max(guard_input), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function([guard_input], T.max(T.abs_(guard_input)), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    def test_output_broadcast_cuda(self):
        from theano.sandbox import cuda
        if not cuda.cuda_available:
            raise SkipTest("Optional package Cuda disabled")

        v = cuda.fvector('v')
        c, r = VecAsRowAndCol()(v)
        f = theano.function([v], [c, r])

        v_val = cuda.CudaNdarray(self.rng.randn(5).astype('float32'))
        f(v_val)
Exemple #3
0
def test_nvidia_driver3():
    """ Test that the gpu device is initialized by theano when
        we build a function with gpu op.

        The driver should always be tested during theano initialization
        of the gpu device
    """
    var = cuda.fvector()
    f = theano.function([var], var + 1, mode=mode_with_gpu)
    topo = f.maker.env.toposort()
    assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
    assert theano.sandbox.cuda.use.device_number is not None
Exemple #4
0
def test_nvidia_driver3():
    """ Test that the gpu device is initialized by theano when
        we build a function with gpu op.

        The driver should always be tested during theano initialization
        of the gpu device
    """
    var = cuda.fvector()
    f = theano.function([var], var + 1, mode=mode_with_gpu,
                        profile=False)
    topo = f.maker.fgraph.toposort()
    assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
    assert theano.sandbox.cuda.use.device_number is not None
Exemple #5
0
    def test_output_broadcast_cuda(self):
        from theano.sandbox import cuda
        if not cuda.cuda_available:
            raise SkipTest("Optional package Cuda disabled")
        if cuda.use.device_number is None:
            # We should normally set VecAsRowAndCol as a GPUOp But we
            # don't want to do this here as this will disable others
            # tests in this file.  So we manually init the GPU if
            # needed to remove warning.
            cuda.use("gpu",
                     force=True,
                     default_to_move_computation_to_gpu=False,
                     move_shared_float32_to_gpu=False,
                     enable_cuda=False)
        v = cuda.fvector('v')
        c, r = VecAsRowAndCol()(v)
        f = theano.function([v], [c, r])

        v_val = cuda.CudaNdarray(self.rng.randn(5).astype('float32'))
        f(v_val)
    def test_output_broadcast_cuda(self):
        from theano.sandbox import cuda
        if not cuda.cuda_available:
            raise SkipTest("Optional package Cuda disabled")
        if cuda.use.device_number is None:
            # We should normally set VecAsRowAndCol as a GPUOp But we
            # don't want to do this here as this will disable others
            # tests in this file.  So we manually init the GPU if
            # needed to remove warning.
            cuda.use("gpu",
                     force=True,
                     default_to_move_computation_to_gpu=False,
                     move_shared_float32_to_gpu=False,
                     enable_cuda=False)
        v = cuda.fvector('v')
        c, r = VecAsRowAndCol()(v)
        f = theano.function([v], [c, r])

        v_val = cuda.CudaNdarray(self.rng.randn(5).astype('float32'))
        f(v_val)
Exemple #7
0
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector('nan_guard')
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function(
                [guard_input], T.min(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function(
                [guard_input], T.max(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function(
                [guard_input], T.max(T.abs_(guard_input)),
                mode='FAST_RUN'
                )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
Exemple #8
0
 def test_vector(self):
     x = cuda.fvector()
     y = numpy.zeros(7, dtype='float32')
     assert y.size == theano.function([x], x.size)(y)
    def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
        if cuda.cuda_available:
            self.guard_input = cuda.fvector('nan_guard')
            if nan_is_error or inf_is_error:
                self.gpumin = theano.function(
                    [self.guard_input], T.min(self.guard_input),
                    mode='FAST_RUN'
                )
            if inf_is_error:
                self.gpumax = theano.function(
                    [self.guard_input], T.max(self.guard_input),
                    mode='FAST_RUN'
                )
            if big_is_error:
                self.gpuabsmax = theano.function(
                    [self.guard_input], T.max(T.abs_(self.guard_input)),
                    mode='FAST_RUN'
                )

        def do_check_on(var, nd, f, is_input):
            """
            Checks `var` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            var : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed
            f : callable
                The thunk for the apply node
            is_input : bool
                If True, `var` is an input to `nd`.
                If False, it is an output.
            """
            error = False
            if nan_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = np.isnan(self.gpumin(var.reshape(var.size)))
                else:
                    err = contains_nan(var)
                if err:
                    logger.error('NaN detected')
                    error = True
            if inf_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (np.isinf(self.gpumin(var.reshape(var.size))) or
                           np.isinf(self.gpumax(var.reshape(var.size))))
                else:
                    err = contains_inf(var)
                if err:
                    logger.error('Inf detected')
                    error = True
            if big_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)
                elif isinstance(var, theano.gof.type.CDataType._cdata_type):
                    err = False
                elif isinstance(var, np.random.mtrand.RandomState):
                    err = False
                else:
                    err = (np.abs(var).max() > 1e10)
                if err:
                    logger.error('Big value detected')
                    error = True
            if error:
                if is_input:
                    logger.error('In an input')
                else:
                    logger.error('In an output')
                logger.error('Inputs: ')
                for ivar, ival in zip(nd.inputs, f.inputs):
                    logger.error('var')
                    logger.error(ivar)
                    logger.error(theano.printing.min_informative_str(ivar))
                    logger.error('val')
                    logger.error(ival)
                logger.error('Node:')
                logger.error(nd)
                assert False

        def nan_check(i, node, fn):
            """
            Runs `fn` while checking its inputs and outputs for NaNs / Infs

            Parameters
            ----------
            i : currently ignored (TODO: determine why it is here or remove)
            node : theano.gof.Apply
                The Apply node currently being executed
            fn : callable
                The thunk to execute for this Apply node
            """
            inputs = fn.inputs
            # TODO: figure out why individual inputs are themselves lists
            # sometimes
            for x in flatten(inputs):
                do_check_on(x, node, fn, True)
            fn()
            outputs = fn.outputs
            for j, x in enumerate(flatten(outputs)):
                do_check_on(x, node, fn, False)

        wrap_linker = theano.gof.WrapLinker([theano.gof.OpWiseCLinker()],
                                            nan_check)
        super(NanGuardMode, self).__init__(wrap_linker,
                                           optimizer=theano.config.optimizer)
Exemple #10
0
 def test_vector(self):
     x = cuda.fvector()
     y = numpy.zeros(7, dtype='float32')
     assert y.size == theano.function([x], x.size)(y)
Exemple #11
0
    def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
        if cuda.cuda_available:
            self.guard_input = cuda.fvector('nan_guard')
            if nan_is_error or inf_is_error:
                self.gpumin = theano.function([self.guard_input],
                                              T.min(self.guard_input),
                                              mode='FAST_RUN')
            if inf_is_error:
                self.gpumax = theano.function([self.guard_input],
                                              T.max(self.guard_input),
                                              mode='FAST_RUN')
            if big_is_error:
                self.gpuabsmax = theano.function([self.guard_input],
                                                 T.max(T.abs_(
                                                     self.guard_input)),
                                                 mode='FAST_RUN')

        def do_check_on(var, nd, f, is_input):
            """
            Checks `var` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            var : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed
            f : callable
                The thunk for the apply node
            is_input : bool
                If True, `var` is an input to `nd`.
                If False, it is an output.
            """
            error = False
            if nan_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = np.isnan(self.gpumin(var.reshape(var.size)))
                else:
                    err = contains_nan(var)
                if err:
                    logger.error('NaN detected')
                    error = True
            if inf_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (np.isinf(self.gpumin(var.reshape(var.size))) or \
                           np.isinf(self.gpumax(var.reshape(var.size))))
                else:
                    err = contains_inf(var)
                if err:
                    logger.error('Inf detected')
                    error = True
            if big_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)
                else:
                    err = (np.abs(var).max() > 1e10)
                if err:
                    logger.error('Big value detected')
                    error = True
            if error:
                if is_input:
                    logger.error('In an input')
                else:
                    logger.error('In an output')
                logger.error('Inputs: ')
                for ivar, ival in zip(nd.inputs, f.inputs):
                    logger.error('var')
                    logger.error(ivar)
                    logger.error(theano.printing.min_informative_str(ivar))
                    logger.error('val')
                    logger.error(ival)
                logger.error('Node:')
                logger.error(nd)
                assert False

        def nan_check(i, node, fn):
            """
            Runs `fn` while checking its inputs and outputs for NaNs / Infs

            Parameters
            ----------
            i : currently ignored (TODO: determine why it is here or remove)
            node : theano.gof.Apply
                The Apply node currently being executed
            fn : callable
                The thunk to execute for this Apply node
            """
            inputs = fn.inputs
            # TODO: figure out why individual inputs are themselves lists sometimes
            for x in flatten(inputs):
                do_check_on(x, node, fn, True)
            fn()
            outputs = fn.outputs
            for j, x in enumerate(flatten(outputs)):
                do_check_on(x, node, fn, False)

        wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()],
                                                [nan_check])
        super(NanGuardMode, self).__init__(wrap_linker,
                                           optimizer=theano.config.optimizer)
Exemple #12
0
    def __init__(self,
                 nan_is_error=None,
                 inf_is_error=None,
                 big_is_error=None,
                 optimizer=None,
                 linker=None):
        self.provided_optimizer = optimizer
        cuda_compile_failed = False
        if nan_is_error is None:
            nan_is_error = config.NanGuardMode.nan_is_error
        if inf_is_error is None:
            inf_is_error = config.NanGuardMode.inf_is_error
        if big_is_error is None:
            big_is_error = config.NanGuardMode.big_is_error

        assert nan_is_error or inf_is_error or big_is_error

        if cuda.cuda_available:
            self.guard_input = cuda.fvector('nan_guard')
            if nan_is_error or inf_is_error:
                try:
                    self.gpumin = theano.function([self.guard_input],
                                                  T.min(self.guard_input),
                                                  mode='FAST_RUN')
                except RuntimeError:
                    # This can happen if cuda is available, but the
                    # device is in exclusive mode and used by another
                    # process.
                    cuda_compile_failed = True
            if inf_is_error and not cuda_compile_failed:
                try:
                    self.gpumax = theano.function([self.guard_input],
                                                  T.max(self.guard_input),
                                                  mode='FAST_RUN')
                except RuntimeError:
                    # This can happen if cuda is available, but the
                    # device is in exclusive mode and used by another
                    # process.
                    cuda_compile_failed = True
            if big_is_error and not cuda_compile_failed:
                try:
                    self.gpuabsmax = theano.function(
                        [self.guard_input],
                        T.max(T.abs_(self.guard_input)),
                        mode='FAST_RUN')
                except RuntimeError:
                    # This can happen if cuda is available, but the
                    # device is in exclusive mode and used by another
                    # process.
                    cuda_compile_failed = True

        def do_check_on(var, nd, f, is_input):
            """
            Checks `var` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            var : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed.
            f : callable
                The thunk for the apply node.
            is_input : bool
                If True, `var` is an input to `nd`.
                If False, it is an output.

            """
            error = False
            if nan_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    if not isinstance(
                            nd.op,
                            # It store ints in float container
                            theano.sandbox.rng_mrg.GPU_mrg_uniform):
                        err = np.isnan(self.gpumin(var.reshape(var.size)))
                else:
                    err = contains_nan(var)
                if err:
                    logger.error('NaN detected')
                    error = True
            if inf_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (np.isinf(self.gpumin(var.reshape(var.size)))
                           or np.isinf(self.gpumax(var.reshape(var.size))))
                else:
                    err = contains_inf(var)
                if err:
                    logger.error('Inf detected')
                    error = True
            if big_is_error:
                err = False
                if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)
                elif isinstance(var, theano.gof.type.CDataType._cdata_type):
                    err = False
                elif isinstance(var, np.random.mtrand.RandomState):
                    err = False
                else:
                    err = (np.abs(var).max() > 1e10)
                if err:
                    logger.error('Big value detected')
                    error = True
            if error:
                if is_input:
                    logger.error('In an input')
                else:
                    logger.error('In an output')
                logger.error('Inputs: ')
                for ivar, ival in zip(nd.inputs, f.inputs):
                    logger.error('var')
                    logger.error(ivar)
                    logger.error(theano.printing.min_informative_str(ivar))
                    logger.error('val')
                    logger.error(ival)
                logger.error('Node:')
                logger.error(nd)
                assert False

        def nan_check(i, node, fn):
            """
            Runs `fn` while checking its inputs and outputs for NaNs / Infs.

            Parameters
            ----------
            i :
                Currently ignored.
                TODO: determine why it is here or remove).
            node : theano.gof.Apply
                The Apply node currently being executed.
            fn : callable
                The thunk to execute for this Apply node.

            """
            inputs = fn.inputs
            # TODO: figure out why individual inputs are themselves lists
            # sometimes
            for x in flatten(inputs):
                do_check_on(x, node, fn, True)
            fn()
            outputs = fn.outputs
            for j, x in enumerate(flatten(outputs)):
                do_check_on(x, node, fn, False)

        wrap_linker = theano.gof.WrapLinker([theano.gof.OpWiseCLinker()],
                                            nan_check)
        super(NanGuardMode, self).__init__(wrap_linker,
                                           optimizer=self.provided_optimizer)