def test_badoptimization_opt_err(): # This variant of test_badoptimization() replace the working code # with a new apply node that will raise an error. @gof.local_optimizer([theano.tensor.add]) def insert_bigger_b_add(fgraph, node): if node.op == theano.tensor.add: inputs = list(node.inputs) if inputs[-1].owner is None: inputs[-1] = theano.tensor.concatenate( (inputs[-1], inputs[-1])) return [node.op(*inputs)] return False @gof.local_optimizer([theano.tensor.add]) def insert_bad_dtype(fgraph, node): if node.op == theano.tensor.add: inputs = list(node.inputs) if inputs[-1].owner is None: return [node.outputs[0].astype("float32")] return False edb = gof.EquilibriumDB() edb.register("insert_bigger_b_add", insert_bigger_b_add, "all") opt = edb.query("+all") edb2 = gof.EquilibriumDB() edb2.register("insert_bad_dtype", insert_bad_dtype, "all") opt2 = edb2.query("+all") a = theano.tensor.dvector() b = theano.tensor.dvector() f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt)) with pytest.raises(ValueError, match=r"insert_bigger_b_add"): f( [1.0, 2.0, 3.0], [2, 3, 4], ) # Test that opt that do an illegal change still get the error from gof. with pytest.raises(theano.gof.toolbox.BadOptimization, match=r"insert_bad_dtype") as einfo: with config.change_flags(on_opt_error="raise"): f2 = theano.function( [a, b], a + b, mode=debugmode.DebugMode(optimizer=opt2, stability_patience=1), ) f2( [1.0, 2.0, 3.0], [2, 3, 4], ) # Test that we can reraise the error with an extended message with pytest.raises(theano.gof.toolbox.BadOptimization): e = einfo.value new_e = e.__class__("TTT" + str(e)) exc_type, exc_value, exc_trace = sys.exc_info() exc_value = new_e raise exc_value.with_traceback(exc_trace)
def test_get_substream_rstates(): with config.change_flags(compute_test_value="raise"): n_streams = 100 dtype = "float32" rng = MRG_RandomStream(np.random.randint(2147462579)) rng.get_substream_rstates(n_streams, dtype)
def test_identical_constant_args(self): x = MyVariable("x") y = Constant(MyType(), 2, name="y") z = Constant(MyType(), 2, name="z") with config.change_flags(compute_test_value="off"): e1 = op1(y, z) g = FunctionGraph([x, y, z], [e1]) MergeOptimizer().optimize(g) strg = str(g) assert strg == "FunctionGraph(Op1(y, y))" or strg == "FunctionGraph(Op1(z, z))"
def test_filter_float_subclass(): """Make sure `Scalar.filter` can handle `float` subclasses.""" with config.change_flags(floatX="float64"): test_type = Scalar("float64") nan = np.array([np.nan], dtype="float64")[0] assert isinstance(nan, float) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, float) with config.change_flags(floatX="float32"): # Try again, except this time `nan` isn't a `float` test_type = Scalar("float32") nan = np.array([np.nan], dtype="float32")[0] assert isinstance(nan, np.floating) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, np.floating)
def test_filter_float_subclass(): """Make sure `TensorType.filter` can handle `float` subclasses.""" with config.change_flags(floatX="float64"): test_type = TensorType("float64", broadcastable=[]) nan = np.array([np.nan], dtype="float64")[0] assert isinstance(nan, np.float) and not isinstance(nan, np.ndarray) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, np.ndarray) with config.change_flags(floatX="float32"): # Try again, except this time `nan` isn't a `float` test_type = TensorType("float32", broadcastable=[]) nan = np.array([np.nan], dtype="float32")[0] assert isinstance(nan, np.floating) and not isinstance(nan, np.ndarray) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, np.ndarray)
def test_compute_flag(self): x = tt.matrix("x") y = tt.matrix("y") y.tag.test_value = np.random.rand(4, 5).astype(config.floatX) # should skip computation of test value with config.change_flags(compute_test_value="off"): z = tt.dot(x, y) assert not hasattr(z.tag, "test_value") # should fail when asked by user with pytest.raises(ValueError), config.change_flags( compute_test_value="raise"): tt.dot(x, y) # test that a warning is raised if required with warnings.catch_warnings(), config.change_flags( compute_test_value="warn"): warnings.simplefilter("error", UserWarning) with pytest.raises(UserWarning): tt.dot(x, y)
def test_VMLinker_no_cxx(): from importlib import reload from unittest.mock import patch with config.change_flags(cxx=""): with pytest.raises(MissingGXX): import theano.link.c.cvm reload(theano.link.c.cvm) with patch.dict("sys.modules", {"theano.link.c.cvm": None}): linker = VMLinker(allow_gc=False, use_cloop=True) a = tensor.scalar() with pytest.raises(ModuleNotFoundError): _ = function([a], a, mode=Mode(optimizer=None, linker=linker))
def _get_func(self): """ Return a function that makes a value from an integer. The integer value is assumed to be a valid pointer for the type and no check is done to ensure that. """ from theano.scalar import get_scalar_type if self._fn is None: with config.change_flags(compute_test_value="off"): v = get_scalar_type("int64")() self._fn = theano.function( [v], _make_cdata(self)(v), mode=theano.Mode(optimizer=None), profile=False, ) return self._fn
def test_get_test_values_success(): """Tests that `get_test_values` returns values when available (and the debugger is on).""" for mode in ["ignore", "warn", "raise"]: with config.change_flags(compute_test_value=mode): x = tt.vector() x.tag.test_value = np.zeros((4,), dtype=config.floatX) y = np.zeros((5, 5)) iters = 0 for x_val, y_val in op.get_test_values(x, y): assert x_val.shape == (4,) assert y_val.shape == (5, 5) iters += 1 assert iters == 1
def test_overflow_cpu(): # run with THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 rng = MRG_RandomStream(np.random.randint(1234)) fct = rng.uniform with config.change_flags(compute_test_value="off"): # should raise error as the size overflows sizes = [ (2**31, ), (2**32, ), ( 2**15, 2**16, ), (2, 2**15, 2**15), ] rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=True) # should not raise error sizes = [(2**5, ), (2**5, 2**5), (2**5, 2**5, 2**5)] rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False) # should support int32 sizes sizes = [(np.int32(2**10), ), (np.int32(2), np.int32(2**10), np.int32(2**10))] rng_mrg_overflow(sizes, fct, config.mode, should_raise_error=False)
def test_validate_input_types_gpuarray_backend(): with config.change_flags(compute_test_value="raise"): rstate = np.zeros((7, 6), dtype="int32") rstate = gpuarray_shared_constructor(rstate) rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype="float32", size=(3, ))
if size != 2: stderr.write( "mpiexec failed to create a world with two nodes.\n" "Closing with success message." ) stdout.write("True") exit(0) shape = (2, 2) dtype = "float32" scheduler = sort_schedule_fn(*mpi_cmps) mode = theano.Mode(optimizer=None, linker=theano.OpWiseCLinker(schedule=scheduler)) with config.change_flags(compute_test_value="off"): if rank == 0: x = theano.tensor.matrix("x", dtype=dtype) y = x + 1 send_request = send(y, 1, 11) z = recv(shape, dtype, 1, 12) f = theano.function([x], [send_request, z], mode=mode) xx = np.random.rand(*shape).astype(dtype) expected = (xx + 1) * 2 _, zz = f(xx) same = np.linalg.norm(zz - expected) < 0.001
def set_theano_flags(): with config.change_flags(compute_test_value="raise"): yield