Пример #1
0
def test_jax_compile_ops():
    x = theano.compile.ops.DeepCopyOp()(tt.as_tensor_variable(1.1))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    x_np = np.zeros((20, 3))
    x = theano.compile.ops.Shape()(tt.as_tensor_variable(x_np))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [], must_be_device_array=False)

    x = theano.compile.ops.Shape_i(1)(tt.as_tensor_variable(x_np))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [], must_be_device_array=False)

    x = theano.compile.ops.SpecifyShape()(tt.as_tensor_variable(x_np), (20, 3))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    with theano.change_flags(compute_test_value="off"):
        x = theano.compile.ops.SpecifyShape()(tt.as_tensor_variable(x_np),
                                              (2, 3))
        x_fg = theano.gof.FunctionGraph([], [x])

        with pytest.raises(AssertionError):
            compare_jax_and_py(x_fg, [])

    x_np = np.zeros((20, 1, 1))
    x = theano.compile.ops.Rebroadcast((0, False), (1, True),
                                       (2, False))(tt.as_tensor_variable(x_np))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    with theano.change_flags(compute_test_value="off"):
        x = theano.compile.ops.Rebroadcast(
            (0, True), (1, False), (2, False))(tt.as_tensor_variable(x_np))
        x_fg = theano.gof.FunctionGraph([], [x])

        with pytest.raises(ValueError):
            compare_jax_and_py(x_fg, [])

    x = theano.compile.ops.ViewOp()(tt.as_tensor_variable(x_np))
    x_fg = theano.gof.FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])
Пример #2
0
def test_badoptimization_opt_err():
    # This variant of test_badoptimization() replace the working code
    # with a new apply node that will raise an error.
    @gof.local_optimizer([theano.tensor.add])
    def insert_bigger_b_add(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:
                inputs[-1] = theano.tensor.concatenate(
                    (inputs[-1], inputs[-1]))
                return [node.op(*inputs)]
        return False

    @gof.local_optimizer([theano.tensor.add])
    def insert_bad_dtype(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:

                return [node.outputs[0].astype("float32")]
        return False

    edb = gof.EquilibriumDB()
    edb.register("insert_bigger_b_add", insert_bigger_b_add, "all")
    opt = edb.query("+all")
    edb2 = gof.EquilibriumDB()
    edb2.register("insert_bad_dtype", insert_bad_dtype, "all")
    opt2 = edb2.query("+all")

    a = theano.tensor.dvector()
    b = theano.tensor.dvector()

    f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
    with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
        f(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that opt that do an illegal change still get the error from gof.
    with pytest.raises(theano.gof.toolbox.BadOptimization,
                       match=r"insert_bad_dtype") as einfo:
        with theano.change_flags(on_opt_error="raise"):
            f2 = theano.function(
                [a, b],
                a + b,
                mode=debugmode.DebugMode(optimizer=opt2, stability_patience=1),
            )
        f2(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that we can reraise the error with an extended message
    with pytest.raises(theano.gof.toolbox.BadOptimization):
        e = einfo.value
        new_e = e.__class__("TTT" + str(e))
        exc_type, exc_value, exc_trace = sys.exc_info()
        exc_value = new_e
        reraise(e.__class__, exc_value, exc_trace)
Пример #3
0
    def optimizer_3d(self,
                     input_shapes,
                     direction,
                     include_tags,
                     exclude_tags,
                     op,
                     border_mode='valid',
                     subsample=(1, 1, 1),
                     filter_dilation=(1, 1, 1)):
        inp1 = theano.shared(
            np.random.random(input_shapes[0]).astype(theano.config.floatX))
        inp2 = theano.shared(
            np.random.random(input_shapes[1]).astype(theano.config.floatX))
        if (direction == 0):
            conv_op = abstract_conv.conv3d(inp1,
                                           inp2,
                                           input_shapes[0],
                                           input_shapes[1],
                                           border_mode=border_mode,
                                           subsample=subsample,
                                           filter_dilation=filter_dilation)

        if (direction == 1):
            conv_op = abstract_conv.conv3d_grad_wrt_weights(
                inp1,
                inp2,
                input_shapes[2],
                input_shapes[0],
                border_mode=border_mode,
                subsample=subsample,
                filter_dilation=filter_dilation)

        if (direction == 2):
            conv_op = abstract_conv.conv3d_grad_wrt_inputs(
                inp1,
                inp2,
                input_shapes[2],
                input_shapes[1],
                border_mode=border_mode,
                subsample=subsample,
                filter_dilation=filter_dilation)

        theano.config.metaopt.optimizer_including = include_tags
        theano.config.metaopt.optimizer_excluding = exclude_tags
        mode = mode_with_gpu.including('conv_meta')

        ref_func = theano.function([], conv_op, mode=mode_with_gpu)
        # All meta optimizer compile a new function. This need to know
        # the current linker, but this information is not available,
        # so it use the default mode.
        with theano.change_flags(mode=mode):
            conv_func = theano.function([], conv_op, mode=mode)
        if op is not None:
            assert any([
                isinstance(node.op, op)
                for node in conv_func.maker.fgraph.toposort()
            ])
        utt.assert_allclose(conv_func(), ref_func())
Пример #4
0
    def test_lifter_with_shared_var(self):
        x = tensor.lscalar('x')
        y = gpuarray_shared_constructor(np.asarray(1, dtype='float32'),
                                        target=test_ctx_name)
        z = tensor.constant(2.)

        a = theano.ifelse.ifelse(x, y, z)
        with theano.change_flags(on_opt_error='raise'):
            theano.function([x], [a], mode=mode_with_gpu)
Пример #5
0
def test_filter_float_subclass():
    """Make sure `TensorType.filter` can handle `float` subclasses."""
    with change_flags(floatX="float64"):
        test_type = TensorType("float64", broadcastable=[])

        nan = np.array([np.nan], dtype="float64")[0]
        assert isinstance(nan, np.float) and not isinstance(nan, np.ndarray)

        filtered_nan = test_type.filter(nan)
        assert isinstance(filtered_nan, np.ndarray)

    with change_flags(floatX="float32"):
        # Try again, except this time `nan` isn't a `float`
        test_type = TensorType("float32", broadcastable=[])

        nan = np.array([np.nan], dtype="float32")[0]
        assert isinstance(nan, np.floating) and not isinstance(nan, np.ndarray)

        filtered_nan = test_type.filter(nan)
        assert isinstance(filtered_nan, np.ndarray)
Пример #6
0
def test_filter_float_subclass():
    """Make sure `Scalar.filter` can handle `float` subclasses."""
    with change_flags(floatX="float64"):
        test_type = Scalar("float64")

        nan = np.array([np.nan], dtype="float64")[0]
        assert isinstance(nan, float)

        filtered_nan = test_type.filter(nan)
        assert isinstance(filtered_nan, float)

    with change_flags(floatX="float32"):
        # Try again, except this time `nan` isn't a `float`
        test_type = Scalar("float32")

        nan = np.array([np.nan], dtype="float32")[0]
        assert isinstance(nan, np.floating)

        filtered_nan = test_type.filter(nan)
        assert isinstance(filtered_nan, np.floating)
Пример #7
0
def test_overflow_cpu():
    # run with THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32
    rng = MRG_RandomStreams(np.random.randint(1234))
    fct = rng.uniform
    with change_flags(compute_test_value='off'):
        # should raise error as the size overflows
        sizes = [(2**31, ), (2**32, ), (2**15, 2**16,), (2, 2**15, 2**15)]
        rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(np.int32(2**10), ),
             (np.int32(2), np.int32(2**10), np.int32(2**10))]
    rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False)
Пример #8
0
    def _get_func(self):
        """
        Return a function that makes a value from an integer.

        The integer value is assumed to be a valid pointer for the
        type and no check is done to ensure that.
        """
        from theano.scalar import get_scalar_type

        if self._fn is None:
            with change_flags(compute_test_value='off'):
                v = get_scalar_type('int64')()
                self._fn = theano.function([v], _make_cdata(self)(v),
                                           mode=theano.Mode(optimizer=None),
                                           profile=False)
        return self._fn
Пример #9
0
    def _get_func(self):
        """
        Return a function that makes a value from an integer.

        The integer value is assumed to be a valid pointer for the
        type and no check is done to ensure that.
        """
        from theano.scalar import get_scalar_type

        if self._fn is None:
            with change_flags(compute_test_value='off'):
                v = get_scalar_type('int64')()
                self._fn = theano.function([v], _make_cdata(self)(v),
                                           mode=theano.Mode(optimizer=None),
                                           profile=False)
        return self._fn
Пример #10
0
def test_RandomVariable_floatX():
    test_rv_op = RandomVariable(
        "normal",
        0,
        [0, 0],
        "floatX",
        inplace=True,
    )

    assert test_rv_op.dtype == "floatX"

    assert test_rv_op(0, 1).dtype == config.floatX

    new_floatX = "float64" if config.floatX == "float32" else "float32"

    with change_flags(floatX=new_floatX):
        assert test_rv_op(0, 1).dtype == new_floatX
Пример #11
0
def test_overflow_cpu():
    # run with THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32
    rng = MRG_RandomStreams(np.random.randint(1234))
    fct = rng.uniform
    with change_flags(compute_test_value='off'):
        # should raise error as the size overflows
        sizes = [(2**31, ), (2**32, ), (
            2**15,
            2**16,
        ), (2, 2**15, 2**15)]
        rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=True)
    # should not raise error
    sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)]
    rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False)
    # should support int32 sizes
    sizes = [(np.int32(2**10), ),
             (np.int32(2), np.int32(2**10), np.int32(2**10))]
    rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False)
Пример #12
0
def test_not_useless_scalar_gpuelemwise():
    # We don't want to move elemwise on scalar on the GPU when the
    # result will not be used on the GPU!

    with theano.change_flags(warn_float64='ignore'):
        X = tensor.fmatrix()
        x = np.random.randn(32, 32).astype(np.float32)
        m1 = theano.shared(np.random.randn(32, 32).astype(np.float32))
        loss = (X - tensor.dot(X, m1)).norm(L=2)
        lr = theano.shared(np.asarray(.001, dtype=np.float32))
        grad = tensor.grad(loss, m1)

        train = theano.function(inputs=[X], updates=[(m1, m1 - lr * grad)],
                                mode=mode_with_gpu)
        train(x)
        topo = train.maker.fgraph.toposort()
        gemms = [app for app in topo if isinstance(app.op, GpuGemm)]
        assert len(gemms) == 2
        assert isinstance(gemms[1].inputs[1].owner.op, tensor.Elemwise)
Пример #13
0
def set_theano_flags():
    with theano.change_flags(cxx="", compute_test_value="ignore"):
        yield
Пример #14
0
def set_theano_flags():
    with change_flags(cxx="", compute_test_value="raise"):
        yield
Пример #15
0
def test_validate_input_types_gpuarray_backend():
    with change_flags(compute_test_value="raise"):
        rstate = np.zeros((7, 6), dtype="int32")
        rstate = gpuarray_shared_constructor(rstate)
        rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype="float32", size=(3, ))
Пример #16
0
def test_badoptimization_opt_err():
    # This variant of test_badoptimization() replace the working code
    # with a new apply node that will raise an error.
    @gof.local_optimizer([theano.tensor.add])
    def insert_bigger_b_add(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:
                inputs[-1] = theano.tensor.concatenate(
                    (inputs[-1], inputs[-1]))
                return [node.op(*inputs)]
        return False

    @gof.local_optimizer([theano.tensor.add])
    def insert_bad_dtype(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:

                return [node.outputs[0].astype('float32')]
        return False

    edb = gof.EquilibriumDB()
    edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all')
    opt = edb.query('+all')
    edb2 = gof.EquilibriumDB()
    edb2.register('insert_bad_dtype', insert_bad_dtype, 'all')
    opt2 = edb2.query('+all')

    a = theano.tensor.dvector()
    b = theano.tensor.dvector()

    f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
    try:
        f(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )
    except ValueError as e:
        assert 'insert_bigger_b_add' in exc_message(e)
    else:
        assert False

    # Test that opt that do an illegal change still get the error from gof.
    try:
        with theano.change_flags(on_opt_error='raise'):
            f2 = theano.function([a, b],
                                 a + b,
                                 mode=debugmode.DebugMode(
                                     optimizer=opt2, stability_patience=1))
        f2(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )
    except theano.gof.toolbox.BadOptimization as e:
        assert 'insert_bad_dtype' in str(e)
        # Test that we can reraise the error with an extended message
        try:
            new_e = e.__class__("TTT" + str(e))
            exc_type, exc_value, exc_trace = sys.exc_info()
            exc_value = new_e
            reraise(e.__class__, exc_value, exc_trace)
        except theano.gof.toolbox.BadOptimization as e:
            pass
        else:
            assert False
    else:
        assert False
Пример #17
0
class GeneratorOp(Op):
    """
    Generator Op is designed for storing python generators inside theano graph.

    __call__ creates TensorVariable
        It has 2 new methods
        - var.set_gen(gen): sets new generator
        - var.set_default(value): sets new default value (None erases default value)

    If generator is exhausted, variable will produce default value if it is not None,
    else raises `StopIteration` exception that can be caught on runtime.

    Parameters
    ----------
    gen: generator that implements __next__ (py3) or next (py2) method
        and yields np.arrays with same types
    default: np.array with the same type as generator produces
    """

    __props__ = ("generator", )

    def __init__(self, gen, default=None):
        super().__init__()
        if not isinstance(gen, GeneratorAdapter):
            gen = GeneratorAdapter(gen)
        self.generator = gen
        self.set_default(default)

    def make_node(self, *inputs):
        gen_var = self.generator.make_variable(self)
        return theano.Apply(self, [], [gen_var])

    def perform(self, node, inputs, output_storage, params=None):
        if self.default is not None:
            output_storage[0][0] = next(self.generator, self.default)
        else:
            output_storage[0][0] = next(self.generator)

    def do_constant_folding(self, node):
        return False

    __call__ = change_flags(compute_test_value="off")(Op.__call__)

    def set_gen(self, gen):
        if not isinstance(gen, GeneratorAdapter):
            gen = GeneratorAdapter(gen)
        if not gen.tensortype == self.generator.tensortype:
            raise ValueError("New generator should yield the same type")
        self.generator = gen

    def set_default(self, value):
        if value is None:
            self.default = None
        else:
            value = np.asarray(value, self.generator.tensortype.dtype)
            t1 = (False, ) * value.ndim
            t2 = self.generator.tensortype.broadcastable
            if not t1 == t2:
                raise ValueError(
                    "Default value should have the same type as generator")
            self.default = value
Пример #18
0
def test_logp():

    hmm_model_env = create_test_hmm()
    M_tt = hmm_model_env["M_tt"]
    N_tt = hmm_model_env["N_tt"]
    mus_tt = hmm_model_env["mus_tt"]
    sigmas_tt = hmm_model_env["sigmas_tt"]
    Y_rv = hmm_model_env["Y_rv"]
    S_rv = hmm_model_env["S_rv"]
    S_in = hmm_model_env["S_in"]
    Gamma_rv = hmm_model_env["Gamma_rv"]
    rng_tt = hmm_model_env["rng_tt"]

    Y_obs = Y_rv.clone()
    Y_obs.name = "Y_obs"
    # `S_in` includes `S_0_rv` (and `pi_0_rv`), unlike `S_rv`
    S_obs = S_in.clone()
    S_obs.name = "S_obs"
    Gamma_obs = Gamma_rv.clone()
    Gamma_obs.name = "Gamma_obs"

    test_point = {
        mus_tt: mus_tt.tag.test_value,
        N_tt: N_tt.tag.test_value,
        Gamma_obs: Gamma_rv.tag.test_value,
        Y_obs: Y_rv.tag.test_value,
        S_obs: S_in.tag.test_value,
    }

    def logp_scan_fn(s_t, s_tm1, y_t, mus_t, sigma_t, Gamma_t):
        gamma_t = Gamma_t[s_tm1]
        log_s_t = pm.Categorical.dist(gamma_t).logp(s_t)
        mu_t = mus_t[s_t]
        log_y_t = pm.Normal.dist(mu_t, sigma_t).logp(y_t)
        gamma_t.name = "gamma_t"
        log_y_t.name = "logp(y_t)"
        log_s_t.name = "logp(s_t)"
        mu_t.name = "mu[S_t]"
        return log_s_t, log_y_t

    (true_S_logp, true_Y_logp), scan_updates = theano.scan(
        fn=logp_scan_fn,
        sequences=[{
            "input": S_obs,
            "taps": [0, -1]
        }, Y_obs, mus_tt, sigmas_tt],
        non_sequences=[Gamma_obs],
        outputs_info=[{}, {}],
        strict=True,
        name="scan_rv",
    )

    # Make sure there are no `RandomVariable` nodes among our
    # expected/true log-likelihood graph.
    assert not vars_to_rvs(true_S_logp)
    assert not vars_to_rvs(true_Y_logp)

    true_S_logp_val = true_S_logp.eval(test_point)
    true_Y_logp_val = true_Y_logp.eval(test_point)

    #
    # Now, compute the log-likelihoods
    #
    logps = logp(Y_rv)

    S_logp = logps[S_in][1]
    Y_logp = logps[Y_rv][1]

    # from theano.printing import debugprint as tt_dprint

    # There shouldn't be any `RandomVariable`s here either
    assert not vars_to_rvs(S_logp[1])
    assert not vars_to_rvs(Y_logp[1])

    assert N_tt in tt_inputs([S_logp])
    assert mus_tt in tt_inputs([S_logp])
    assert logps[S_in][0] in tt_inputs([S_logp])
    assert logps[Y_rv][0] in tt_inputs([S_logp])
    assert logps[Gamma_rv][0] in tt_inputs([S_logp])

    new_test_point = {
        mus_tt: mus_tt.tag.test_value,
        N_tt: N_tt.tag.test_value,
        logps[Gamma_rv][0]: Gamma_rv.tag.test_value,
        logps[Y_rv][0]: Y_rv.tag.test_value,
        logps[S_in][0]: S_in.tag.test_value,
    }

    with theano.change_flags(on_unused_input="warn"):
        S_logp_val = S_logp.eval(new_test_point)
        Y_logp_val = Y_logp.eval(new_test_point)

    assert np.array_equal(true_S_logp_val, S_logp_val)
    assert np.array_equal(Y_logp_val, true_Y_logp_val)
Пример #19
0
def test_SwitchingProcess():

    np.random.seed(2023532)

    test_states = np.r_[2, 0, 1, 2, 0, 1]
    test_dists = [
        pm.Constant.dist(0),
        pm.Poisson.dist(100.0),
        pm.Poisson.dist(1000.0)
    ]
    test_dist = SwitchingProcess.dist(test_dists, test_states)
    assert np.array_equal(test_dist.shape, test_states.shape)

    test_sample = test_dist.random()
    assert test_sample.shape == (test_states.shape[0], )
    assert np.all(test_sample[test_states == 0] == 0)
    assert np.all(0 < test_sample[test_states == 1])
    assert np.all(test_sample[test_states == 1] < 1000)
    assert np.all(100 < test_sample[test_states == 2])

    test_mus = np.r_[100, 100, 500, 100, 100, 100]
    test_dists = [
        pm.Constant.dist(0),
        pm.Poisson.dist(test_mus),
        pm.Poisson.dist(10000.0),
    ]
    test_dist = SwitchingProcess.dist(test_dists, test_states)
    assert np.array_equal(test_dist.shape, test_states.shape)

    test_sample = test_dist.random()
    assert test_sample.shape == (test_states.shape[0], )
    assert np.all(200 < test_sample[2] < 600)
    assert np.all(0 < test_sample[5] < 200)
    assert np.all(5000 < test_sample[test_states == 2])

    test_dists = [
        pm.Constant.dist(0),
        pm.Poisson.dist(100.0),
        pm.Poisson.dist(1000.0)
    ]
    test_dist = SwitchingProcess.dist(test_dists, test_states)
    for i in range(len(test_dists)):
        test_logp = test_dist.logp(
            np.tile(test_dists[i].mode.eval(), test_states.shape)).eval()
        assert test_logp[test_states != i].max() < test_logp[test_states ==
                                                             i].min()

    # Try a continuous mixture
    test_states = np.r_[2, 0, 1, 2, 0, 1]
    test_dists = [
        pm.Normal.dist(0.0, 1.0),
        pm.Normal.dist(100.0, 1.0),
        pm.Normal.dist(1000.0, 1.0),
    ]
    test_dist = SwitchingProcess.dist(test_dists, test_states)
    assert np.array_equal(test_dist.shape, test_states.shape)

    test_sample = test_dist.random()
    assert test_sample.shape == (test_states.shape[0], )
    assert np.all(test_sample[test_states == 0] < 10)
    assert np.all(50 < test_sample[test_states == 1])
    assert np.all(test_sample[test_states == 1] < 150)
    assert np.all(900 < test_sample[test_states == 2])

    # Make sure we can use a large number of distributions in the mixture
    test_states = np.ones(50)
    test_dists = [pm.Constant.dist(i) for i in range(50)]
    test_dist = SwitchingProcess.dist(test_dists, test_states)
    assert np.array_equal(test_dist.shape, test_states.shape)

    with pytest.raises(TypeError):
        SwitchingProcess.dist([1], test_states)

    with theano.change_flags(compute_test_value="off"):
        # Test for the case when a default can't be computed
        test_dist = pm.Poisson.dist(tt.scalar())

        # Confirm that there's no default
        with pytest.raises(AttributeError):
            test_dist.default()

        # Let it try to sample using `Distribution.random` and fail
        with pytest.raises(ValueError):
            SwitchingProcess.dist([test_dist], test_states)

    # Evaluate multiple observed state sequences in an extreme case
    test_states = tt.imatrix("states")
    test_states.tag.test_value = np.zeros((10, 4)).astype("int32")
    test_dist = SwitchingProcess.dist(
        [pm.Constant.dist(0), pm.Constant.dist(1)], test_states)
    test_obs = np.tile(np.arange(4), (10, 1)).astype("int32")
    test_logp = test_dist.logp(test_obs)
    exp_logp = np.tile(
        np.array([0.0] + [-np.inf] * 3, dtype=theano.config.floatX), (10, 1))
    assert np.array_equal(test_logp.tag.test_value, exp_logp)
Пример #20
0
def test_badoptimization_opt_err():
    # This variant of test_badoptimization() replace the working code
    # with a new apply node that will raise an error.
    @gof.local_optimizer([theano.tensor.add])
    def insert_bigger_b_add(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:
                inputs[-1] = theano.tensor.concatenate((inputs[-1],
                                                        inputs[-1]))
                return [node.op(*inputs)]
        return False

    @gof.local_optimizer([theano.tensor.add])
    def insert_bad_dtype(node):
        if node.op == theano.tensor.add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:

                return [node.outputs[0].astype('float32')]
        return False
    edb = gof.EquilibriumDB()
    edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all')
    opt = edb.query('+all')
    edb2 = gof.EquilibriumDB()
    edb2.register('insert_bad_dtype', insert_bad_dtype, 'all')
    opt2 = edb2.query('+all')

    a = theano.tensor.dvector()
    b = theano.tensor.dvector()

    f = theano.function([a, b], a + b,
                        mode=debugmode.DebugMode(optimizer=opt))
    try:
        f([1.0, 2.0, 3.0], [2, 3, 4],)
    except ValueError as e:
        assert 'insert_bigger_b_add' in exc_message(e)
    else:
        assert False

    # Test that opt that do an illegal change still get the error from gof.
    try:
        with theano.change_flags(on_opt_error='raise'):
            f2 = theano.function([a, b], a + b,
                                 mode=debugmode.DebugMode(optimizer=opt2,
                                                          stability_patience=1))
        f2([1.0, 2.0, 3.0], [2, 3, 4],)
    except theano.gof.toolbox.BadOptimization as e:
        assert 'insert_bad_dtype' in str(e)
        # Test that we can reraise the error with an extended message
        try:
            new_e = e.__class__("TTT" + str(e))
            exc_type, exc_value, exc_trace = sys.exc_info()
            exc_value = new_e
            reraise(e.__class__, exc_value, exc_trace)
        except theano.gof.toolbox.BadOptimization as e:
            pass
        else:
            assert False
    else:
        assert False
Пример #21
0
def set_theano_flags():
    with theano.change_flags(compute_test_value="raise"):
        yield
Пример #22
0
def test_validate_input_types_gpuarray_backend():
    with change_flags(compute_test_value="raise"):
        rstate = np.zeros((7, 6), dtype="int32")
        rstate = gpuarray_shared_constructor(rstate)
        rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype="float32", size=(3,))