Example #1
0
def test_deprecated_kwargs():
    with pytest.warns(DeprecationWarning, match=".*broadcastable.*"):
        res = aesara.shared(np.array([[1.0]]), broadcastable=(True, False))

    assert res.type.shape == (1, None)
 def test_explicit_shared_input(self):
     # This is not a test of the In class per se, but the In class relies
     # on the fact that shared variables cannot be explicit inputs
     a = aesara.shared(1.0)
     with pytest.raises(TypeError):
         aesara.function([a], a + 1)
Example #3
0
def test_scalar_shared_options():
    # Simple test to make sure we do not loose that fonctionality.
    aesara.shared(value=0.0, name="lk", borrow=True)
    aesara.shared(value=np.float32(0.0), name="lk", borrow=True)
Example #4
0
def test_get_vector_length():
    x = aesara.shared(np.array((2, 3, 4, 5)))
    assert get_vector_length(x) == 4
Example #5
0
 def test_setsubtensor2(self):
     tv = np.asarray(self.rng.uniform(size=(10, )), aesara.config.floatX)
     t = aesara.shared(tv)
     out = aesara.tensor.subtensor.set_subtensor(t[:4], self.x[:4])
     self.check_rop_lop(out, (10, ))
Example #6
0
def test_compile_pymc_with_updates():
    x = aesara.shared(0)
    f = compile_pymc([], x, updates={x: x + 1})
    assert f() == 0
    assert f() == 1
Example #7
0
    def test_swap_SharedVariable(self):
        i = tt.iscalar()
        x_list = aesara.shared(value=np.random.rand(10).astype(config.floatX))

        x = tt.scalar("x")
        # SharedVariable for tests, one of them has update
        y = aesara.shared(value=1, name="y")
        z = aesara.shared(value=2, name="z")
        m = aesara.shared(value=0, name="m")

        # SharedVariable to replace
        y_rpl = aesara.shared(value=3, name="y_rpl")
        z_rpl = aesara.shared(value=4, name="z_rpl")
        swap = {y: y_rpl, z: z_rpl}
        map_SV = {"y_rpl": y_rpl, "z_rpl": z_rpl}

        out = x + y + z + m

        # Test for different linkers
        # for mode in ["FAST_RUN","FAST_COMPILE"]:
        second_time = False
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = aesara.function(
                [i],
                [out],
                mode=mode,
                updates=[(z, z + 1), (m, m + 2)],
                givens={x: x_list[i]},
            )
            cpy = ori.copy(swap=swap)

            # run fuction several time
            ori(1), cpy(1), cpy(2)

            # assert same SharedVariable are update in different function
            if not second_time:
                # m should be updated 3 times
                assert m.get_value() == 6
                # z should be updated once
                assert z.get_value() == 3
                # z_rpl should be updated twice
                assert z_rpl.get_value() == 6
                # y and y_rpl should not be updated
                assert y_rpl.get_value() == 3
                assert y.get_value() == 1
            elif second_time:
                # doule update for sharedvariable
                assert m.get_value() == 12
                assert z.get_value() == 4
                assert z_rpl.get_value() == 8
                assert y_rpl.get_value() == 3

            # test cpy function:
            # 2. SharedVariable is updatable -> values did update(z == 5)
            # 1. sharedvariable is swap ->  Rpl sharedvariables share storage
            names = map_SV.keys()
            for key in cpy.fn.storage_map:
                if key.name in names:
                    assert (
                        map_SV[key.name].container.storage[0]
                        == cpy.fn.storage_map[key][0]
                    )

            second_time = True
Example #8
0
 def test_get_size(self):
     rng = np.random.RandomState(12)
     rng_a = shared(rng)
     shape_info = random_state_type.get_shape_info(rng_a)
     size = random_state_type.get_size(shape_info)
     assert size == sys.getsizeof(rng.get_state(legacy=False))
Example #9
0
def test_debugprint_mitsot():
    def fn(a_m2, a_m1, b_m2, b_m1):
        return a_m1 + a_m2, b_m1 + b_m2

    a0 = aesara.shared(np.arange(2, dtype="int64"))
    b0 = aesara.shared(np.arange(2, dtype="int64"))

    (a, b), _ = aesara.scan(
        fn,
        outputs_info=[
            {
                "initial": a0,
                "taps": [-2, -1]
            },
            {
                "initial": b0,
                "taps": [-2, -1]
            },
        ],
        n_steps=5,
    )

    final_result = a + b
    output_str = debugprint(final_result, file="str", print_op_info=True)
    lines = output_str.split("\n")

    expected_output = """Elemwise{add,no_inplace} [id A]
    |Subtensor{int64::} [id B]
    | |for{cpu,scan_fn}.0 [id C] (outer_out_mit_sot-0)
    | | |TensorConstant{5} [id D] (n_steps)
    | | |IncSubtensor{Set;:int64:} [id E] (outer_in_mit_sot-0)
    | | | |AllocEmpty{dtype='int64'} [id F]
    | | | | |Elemwise{add,no_inplace} [id G]
    | | | |   |TensorConstant{5} [id D]
    | | | |   |Subtensor{int64} [id H]
    | | | |     |Shape [id I]
    | | | |     | |Subtensor{:int64:} [id J]
    | | | |     |   |<TensorType(int64, (None,))> [id K]
    | | | |     |   |ScalarConstant{2} [id L]
    | | | |     |ScalarConstant{0} [id M]
    | | | |Subtensor{:int64:} [id J]
    | | | |ScalarFromTensor [id N]
    | | |   |Subtensor{int64} [id H]
    | | |IncSubtensor{Set;:int64:} [id O] (outer_in_mit_sot-1)
    | |   |AllocEmpty{dtype='int64'} [id P]
    | |   | |Elemwise{add,no_inplace} [id Q]
    | |   |   |TensorConstant{5} [id D]
    | |   |   |Subtensor{int64} [id R]
    | |   |     |Shape [id S]
    | |   |     | |Subtensor{:int64:} [id T]
    | |   |     |   |<TensorType(int64, (None,))> [id U]
    | |   |     |   |ScalarConstant{2} [id V]
    | |   |     |ScalarConstant{0} [id W]
    | |   |Subtensor{:int64:} [id T]
    | |   |ScalarFromTensor [id X]
    | |     |Subtensor{int64} [id R]
    | |ScalarConstant{2} [id Y]
    |Subtensor{int64::} [id Z]
    |for{cpu,scan_fn}.1 [id C] (outer_out_mit_sot-1)
    |ScalarConstant{2} [id BA]

    Inner graphs:

    for{cpu,scan_fn}.0 [id C] (outer_out_mit_sot-0)
    >Elemwise{add,no_inplace} [id BB] (inner_out_mit_sot-0)
    > |*1-<TensorType(int64, ())> [id BC] -> [id E] (inner_in_mit_sot-0-1)
    > |*0-<TensorType(int64, ())> [id BD] -> [id E] (inner_in_mit_sot-0-0)
    >Elemwise{add,no_inplace} [id BE] (inner_out_mit_sot-1)
    > |*3-<TensorType(int64, ())> [id BF] -> [id O] (inner_in_mit_sot-1-1)
    > |*2-<TensorType(int64, ())> [id BG] -> [id O] (inner_in_mit_sot-1-0)

    for{cpu,scan_fn}.1 [id C] (outer_out_mit_sot-1)
    >Elemwise{add,no_inplace} [id BB] (inner_out_mit_sot-0)
    >Elemwise{add,no_inplace} [id BE] (inner_out_mit_sot-1)"""

    for truth, out in zip(expected_output.split("\n"), lines):
        assert truth.strip() == out.strip()
Example #10
0
        def run_case(self, testname, inputs):
            inputs_ref = [aesara.shared(inp) for inp in inputs]
            inputs_tst = [aesara.shared(inp) for inp in inputs]

            try:
                node_ref = safe_make_node(self.op, *inputs_ref)
                node_tst = safe_make_node(self.op, *inputs_tst)
            except Exception as exc:
                err_msg = ("Test %s::%s: Error occurred while making "
                           "a node with inputs %s") % (self.gpu_op, testname,
                                                       inputs)
                exc.args += (err_msg, )
                raise

            try:
                f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
                f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
            except Exception as exc:
                err_msg = ("Test %s::%s: Error occurred while trying to "
                           "make a Function") % (self.gpu_op, testname)
                exc.args += (err_msg, )
                raise

            self.assertFunctionContains1(f_tst, self.gpu_op)

            ref_e = None
            try:
                expecteds = f_ref()
            except Exception as exc:
                ref_e = exc

            try:
                variables = f_tst()
            except Exception as exc:
                if ref_e is None:
                    err_msg = ("Test %s::%s: exception when calling the "
                               "Function") % (self.gpu_op, testname)
                    exc.args += (err_msg, )
                    raise
                else:
                    # if we raised an exception of the same type we're good.
                    if isinstance(exc, type(ref_e)):
                        return
                    else:
                        err_msg = (
                            "Test %s::%s: exception raised during test "
                            "call was not the same as the reference "
                            "call (got: %s, expected %s)" %
                            (self.gpu_op, testname, type(exc), type(ref_e)))
                        exc.args += (err_msg, )
                        raise

            for i, (variable, expected) in enumerate(zip(variables,
                                                         expecteds)):
                condition = (
                    variable.dtype != expected.dtype
                    or variable.shape != expected.shape
                    or not TensorType.values_eq_approx(variable, expected))
                assert not condition, ("Test %s::%s: Output %s gave the wrong "
                                       "value. With inputs %s, expected %s "
                                       "(dtype %s), got %s (dtype %s)." % (
                                           self.op,
                                           testname,
                                           i,
                                           inputs,
                                           expected,
                                           expected.dtype,
                                           variable,
                                           variable.dtype,
                                       ))

            for description, check in self.checks.items():
                assert check(
                    inputs,
                    variables), ("Test %s::%s: Failed check: %s "
                                 "(inputs were %s, outputs were %s)") % (
                                     self.op, testname, description, inputs,
                                     variables)
Example #11
0
    def test_neibs_wrap_centered_step_manual(self):

        expected1 = [
            [24, 20, 21, 4, 0, 1, 9, 5, 6],
            [21, 22, 23, 1, 2, 3, 6, 7, 8],
            [23, 24, 20, 3, 4, 0, 8, 9, 5],
            [9, 5, 6, 14, 10, 11, 19, 15, 16],
            [6, 7, 8, 11, 12, 13, 16, 17, 18],
            [8, 9, 5, 13, 14, 10, 18, 19, 15],
            [19, 15, 16, 24, 20, 21, 4, 0, 1],
            [16, 17, 18, 21, 22, 23, 1, 2, 3],
            [18, 19, 15, 23, 24, 20, 3, 4, 0],
        ]
        expected2 = [
            [24, 20, 21, 4, 0, 1, 9, 5, 6],
            [22, 23, 24, 2, 3, 4, 7, 8, 9],
            [14, 10, 11, 19, 15, 16, 24, 20, 21],
            [12, 13, 14, 17, 18, 19, 22, 23, 24],
        ]
        expected3 = [
            [19, 15, 16, 24, 20, 21, 4, 0, 1, 9, 5, 6, 14, 10, 11],
            [17, 18, 19, 22, 23, 24, 2, 3, 4, 7, 8, 9, 12, 13, 14],
            [9, 5, 6, 14, 10, 11, 19, 15, 16, 24, 20, 21, 4, 0, 1],
            [7, 8, 9, 12, 13, 14, 17, 18, 19, 22, 23, 24, 2, 3, 4],
        ]
        expected4 = [
            [23, 24, 20, 21, 22, 3, 4, 0, 1, 2, 8, 9, 5, 6, 7],
            [21, 22, 23, 24, 20, 1, 2, 3, 4, 0, 6, 7, 8, 9, 5],
            [13, 14, 10, 11, 12, 18, 19, 15, 16, 17, 23, 24, 20, 21, 22],
            [11, 12, 13, 14, 10, 16, 17, 18, 19, 15, 21, 22, 23, 24, 20],
        ]
        expected5 = [
            [24, 20, 21, 4, 0, 1, 9, 5, 6],
            [22, 23, 24, 2, 3, 4, 7, 8, 9],
            [9, 5, 6, 14, 10, 11, 19, 15, 16],
            [7, 8, 9, 12, 13, 14, 17, 18, 19],
            [19, 15, 16, 24, 20, 21, 4, 0, 1],
            [17, 18, 19, 22, 23, 24, 2, 3, 4],
        ]
        expected6 = [
            [24, 20, 21, 4, 0, 1, 9, 5, 6],
            [21, 22, 23, 1, 2, 3, 6, 7, 8],
            [23, 24, 20, 3, 4, 0, 8, 9, 5],
            [14, 10, 11, 19, 15, 16, 24, 20, 21],
            [11, 12, 13, 16, 17, 18, 21, 22, 23],
            [13, 14, 10, 18, 19, 15, 23, 24, 20],
        ]

        # TODO test discontinuous image

        for shp_idx, (shape, neib_shape, neib_step, expected) in enumerate([
            [(7, 8, 5, 5), (3, 3), (2, 2), expected1],
            [(7, 8, 5, 5), (3, 3), (3, 3), expected2],
            [(7, 8, 5, 5), (5, 3), (3, 3), expected3],
            [(7, 8, 5, 5), (3, 5), (3, 3), expected4],
            [(80, 90, 5, 5), (3, 3), (2, 3), expected5],
            [(1025, 9, 5, 5), (3, 3), (3, 2), expected6],
            [(1, 1, 5, 1035), (3, 3), (3, 3), None],
            [(1, 1, 1045, 5), (3, 3), (3, 3), None],
        ]):

            for dtype in self.dtypes:

                images = shared(
                    np.asarray(np.arange(np.prod(shape)).reshape(shape),
                               dtype=dtype))
                neib_shape = at.as_tensor_variable(neib_shape)
                neib_step = at.as_tensor_variable(neib_step)
                expected = np.asarray(expected)

                f = function(
                    [],
                    images2neibs(images,
                                 neib_shape,
                                 neib_step,
                                 mode="wrap_centered"),
                    mode=self.mode,
                )
                neibs = f()

                if expected.size > 1:
                    for i in range(shape[0] * shape[1]):
                        assert np.allclose(
                            neibs[i * expected.shape[0]:(i + 1) *
                                  expected.shape[0], :],
                            expected + 25 * i,
                        ), "wrap_centered"

                assert self.op in [
                    type(node.op) for node in f.maker.fgraph.toposort()
                ]
Example #12
0
 def test_dot(self):
     insh = self.in_shape[0]
     vW = np.asarray(self.rng.uniform(size=(insh, insh)),
                     aesara.config.floatX)
     W = aesara.shared(vW)
     self.check_rop_lop(dot(self.x, W), self.in_shape)
Example #13
0
 def test_join(self):
     tv = np.asarray(self.rng.uniform(size=(10, )), aesara.config.floatX)
     t = aesara.shared(tv)
     out = aet.join(0, self.x, t)
     self.check_rop_lop(out, (self.in_shape[0] + 10, ))
Example #14
0
def test_time_varying_model():

    np.random.seed(1039)

    data = gen_toy_data()

    formula_str = "1 + C(weekday)"
    X_df = patsy.dmatrix(formula_str, data, return_type="dataframe")
    X_np = X_df.values

    xi_shape = X_np.shape[1]

    xi_0_true = np.array([2.0, -2.0, 2.0, -2.0, 2.0, -2.0, 2.0]).reshape(xi_shape, 1)
    xi_1_true = np.array([2.0, -2.0, 2.0, -2.0, 2.0, -2.0, 2.0]).reshape(xi_shape, 1)

    xis_rv_true = np.stack([xi_0_true, xi_1_true], axis=1)

    with pm.Model(**TV_CONFIG) as sim_model:
        _ = create_dirac_zero_hmm(
            X_np, mu=1000, xis=xis_rv_true, observed=np.zeros(X_np.shape[0])
        )

    sim_point = pm.sample_prior_predictive(samples=1, model=sim_model)

    y_t = sim_point["Y_t"].squeeze().astype(int)

    split = int(len(y_t) * 0.7)

    train_y, test_V = y_t[:split], sim_point["V_t"].squeeze()[split:]
    train_X, test_X = X_np[:split, :], X_np[split:, :]

    X = shared(train_X, name="X", borrow=True)
    Y = shared(train_y, name="y_t", borrow=True)

    with pm.Model() as model:
        xis_rv = pm.Normal("xis", 0, 10, shape=xis_rv_true.shape)
        _ = create_dirac_zero_hmm(X, 1000, xis_rv, Y)

    number_of_draws = 500

    with model:
        steps = [
            FFBSStep([model.V_t]),
            pm.NUTS(
                vars=[
                    model.gamma_0,
                    model.Gamma,
                ],
                target_accept=0.90,
            ),
        ]

    with model:
        posterior_trace = pm.sample(
            draws=number_of_draws,
            step=steps,
            random_seed=100,
            return_inferencedata=True,
            chains=1,
            cores=1,
            progressbar=True,
            idata_kwargs={"dims": {"Y_t": ["date"], "V_t": ["date"]}},
        )

    # Update the shared variable values
    Y.set_value(np.ones(test_X.shape[0], dtype=Y.dtype))
    X.set_value(test_X)

    model.V_t.distribution.shape = (test_X.shape[0],)

    hdi_data = az.hdi(posterior_trace, hdi_prob=0.95, var_names=["xis"]).to_dataframe()
    hdi_data = hdi_data.unstack(level="hdi")

    xis_true_flat = xis_rv_true.squeeze().flatten()
    check_idx = ~np.in1d(
        np.arange(len(xis_true_flat)), np.arange(3, len(xis_true_flat), step=4)
    )
    assert np.all(
        xis_true_flat[check_idx] <= hdi_data["xis", "higher"].values[check_idx]
    )
    assert np.all(
        xis_true_flat[check_idx] >= hdi_data["xis", "lower"].values[check_idx]
    )

    trace = posterior_trace.posterior.drop_vars(["Gamma", "V_t"])

    with aesara.config.change_flags(compute_test_value="off"):
        adds_pois_ppc = pm.sample_posterior_predictive(
            trace, var_names=["V_t", "Y_t", "Gamma"], model=model
        )

    assert (np.abs(adds_pois_ppc["V_t"] - test_V) / test_V.shape[0]).mean() < 1e-2
Example #15
0
def aevb_initial():
    return aesara.shared(np.random.rand(3, 7).astype("float32"))
Example #16
0
    def test_get_shape_info(self):
        rng = np.random.RandomState(12)
        rng_a = shared(rng)

        assert isinstance(random_state_type.get_shape_info(rng_a),
                          np.random.RandomState)
Example #17
0
 class _approx:
     params = (aesara.shared(np.asarray([1, 2, 3])), )
Example #18
0
import numpy as np
import aesara
import aesara.tensor as aet

aesara.config.floatX = 'float32'

rng = np.random

N = 400
feats = 784
D = (rng.randn(N, feats).astype(aesara.config.floatX),
     rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX))
training_steps = 10000

# Declare Aesara symbolic variables
x = aesara.shared(D[0], name="x")
y = aesara.shared(D[1], name="y")
w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w")
b = aesara.shared(np.asarray(0., dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()

# Construct Aesara expression graph
p_1 = 1 / (1 + aet.exp(-aet.dot(x, w) - b))  # Probability of having a one
prediction = p_1 > 0.5  # The prediction that is done: 0 or 1
xent = -y * aet.log(p_1) - (1 - y) * aet.log(1 - p_1)  # Cross-entropy
cost = aet.cast(xent.mean(), 'float32') + \
    0.01 * (w ** 2).sum()  # The cost to optimize
gw, gb = aet.grad(cost, [w, b])
Example #19
0
 def test_setsubtensor1(self):
     tv = np.asarray(self.rng.uniform(size=(3, )), aesara.config.floatX)
     t = aesara.shared(tv)
     out = aesara.tensor.subtensor.set_subtensor(self.x[:3], t)
     self.check_rop_lop(out, self.in_shape)