def test_default_dtype(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dscalar()
        high = tensor.dscalar()

        # Should not silently downcast from low and high
        out0 = random.uniform(low=low, high=high, size=(42, ))
        assert out0.dtype == "float64"
        f0 = function([low, high], out0)
        val0 = f0(-2.1, 3.1)
        assert val0.dtype == "float64"

        # Should downcast, since asked explicitly
        out1 = random.uniform(low=low, high=high, size=(42, ), dtype="float32")
        assert out1.dtype == "float32"
        f1 = function([low, high], out1)
        val1 = f1(-1.1, 1.1)
        assert val1.dtype == "float32"

        # Should use floatX
        lowf = tensor.fscalar()
        highf = tensor.fscalar()
        outf = random.uniform(low=lowf, high=highf, size=(42, ))
        assert outf.dtype == config.floatX
        ff = function([lowf, highf], outf)
        valf = ff(np.float32(-0.1), np.float32(0.3))
        assert valf.dtype == config.floatX
示例#2
0
    def test_param_allow_downcast_floatX(self):
        a = tensor.fscalar("a")
        b = tensor.fscalar("b")
        c = tensor.fscalar("c")

        f = pfunc(
            [
                In(a, allow_downcast=True),
                In(b, allow_downcast=False),
                In(c, allow_downcast=None),
            ],
            (a + b + c),
        )

        # If the values can be accurately represented, everything is OK
        assert np.all(f(0, 0, 0) == 0)

        # If allow_downcast is True, idem
        assert np.allclose(f(0.1, 0, 0), 0.1)

        # If allow_downcast is False, nope
        with pytest.raises(TypeError):
            f(0, 0.1, 0)

        # If allow_downcast is None, it should work iff floatX=float32
        if config.floatX == "float32":
            assert np.allclose(f(0, 0, 0.1), 0.1)
        else:
            with pytest.raises(TypeError):
                f(0, 0, 0.1)
示例#3
0
    def test_copy_delete_updates(self):
        w = tt.iscalar("w")
        x = tt.fscalar("x")
        # SharedVariable for tests, one of them has update
        y = aesara.shared(value=1, name="y")
        z = aesara.shared(value=2, name="z")
        out = x + y + z

        # Test for different linkers
        # for mode in ["FAST_RUN","FAST_COMPILE"]:
        # second_time = False
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = aesara.function([x], out, mode=mode, updates={z: z * 2})
            cpy = ori.copy(delete_updates=True)

            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4

        # Test if unused implicit and explicit inputs from delete_updates
        # are ignored as intended.
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = aesara.function([x], x, mode=mode, updates={z: z * 2})
            cpy = ori.copy(delete_updates=True)

            ori = aesara.function([x, w], x, mode=mode, updates={z: z + w})
            cpy = ori.copy(delete_updates=True)
示例#4
0
    def test_copy_share_memory(self):
        x = tt.fscalar("x")
        # SharedVariable for tests, one of them has update
        y = aesara.shared(value=1)
        z = aesara.shared(value=2)
        out = tt.tanh((x + y + 2) / (x + z - 0.2) ** 2)

        # Test for different linkers
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = aesara.function([x], [out], mode=mode, updates={z: z + 1})
            cpy = ori.copy(share_memory=True)

            # Test if memories shared
            storage_map_ori = ori.fn.storage_map
            storage_map_cpy = cpy.fn.storage_map
            fgraph_cpy = cpy.maker.fgraph

            # Assert intermediate and Constants storages are shared.
            # and output stoarges are not shared
            i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs
            ori_storages = storage_map_ori.values()
            l = [
                val
                for key, val in storage_map_cpy.items()
                if key not in i_o_variables or isinstance(key, tt.Constant)
            ]
            for storage in l:
                assert any([storage is s for s in ori_storages])

            # Assert storages of SharedVariable without updates are shared
            for (input, _1, _2), here, there in zip(
                ori.indices, ori.input_storage, cpy.input_storage
            ):
                assert here.data is there.data
示例#5
0
    def test_allow_downcast_floatX(self):
        a = tensor.fscalar("a")
        b = tensor.fvector("b")

        f = pfunc([a, b], (a + b), allow_input_downcast=True)
        g = pfunc([a, b], (a + b), allow_input_downcast=False)
        h = pfunc([a, b], (a + b), allow_input_downcast=None)

        # If the values can be accurately represented, OK
        assert np.all(f(0, [0]) == 0)
        assert np.all(g(0, [0]) == 0)
        assert np.all(h(0, [0]) == 0)

        # For the vector: OK iff allow_input_downcast is True
        assert np.allclose(f(0, [0.1]), 0.1)
        with pytest.raises(TypeError):
            g(0, [0.1])
        with pytest.raises(TypeError):
            h(0, [0.1])

        # For the scalar: OK if allow_input_downcast is True,
        # or None and floatX==float32
        assert np.allclose(f(0.1, [0]), 0.1)
        with pytest.raises(TypeError):
            g(0.1, [0])
        if config.floatX == "float32":
            assert np.allclose(h(0.1, [0]), 0.1)
        else:
            with pytest.raises(TypeError):
                h(0.1, [0])
示例#6
0
    def setup_method(self):
        super().setup_method()

        # Sample computation that involves tensors with different numbers
        # of dimensions
        self.input1 = tt.fmatrix()
        self.input2 = tt.fscalar()
        self.output = tt.dot((self.input1 - self.input2),
                             (self.input1 - self.input2).transpose())

        # Declare the conditional breakpoint
        self.breakpointOp = PdbBreakpoint("Sum of output too high")
        self.condition = tt.gt(self.output.sum(), 1000)
        (
            self.monitored_input1,
            self.monitored_input2,
            self.monitored_output,
        ) = self.breakpointOp(self.condition, self.input1, self.input2,
                              self.output)