def test_argsort():
    # Set up
    rng = np.random.RandomState(seed=utt.fetch_seed())
    m_val = rng.rand(3, 2)
    v_val = rng.rand(4)

    # Example 1
    a = tensor.dmatrix()
    w = argsort(a)
    f = aesara.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    utt.assert_allclose(gv, gt)

    # Example 2
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    w = argsort(a, axis)
    f = aesara.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 3
    a = tensor.dvector()
    w2 = argsort(a)
    f = aesara.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    utt.assert_allclose(gv, gt)

    # Example 4
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    l = argsort(a, axis, "mergesort")
    f = aesara.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 5
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = tensor.dmatrix()
    w2 = argsort(a, None)
    f = aesara.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    utt.assert_allclose(gv, gt)
Exemple #2
0
    def test_pickle(self):
        x = dmatrix("x")
        y = dmatrix("y")

        m = mul(x, y)

        s = pickle.dumps(m)
        m2 = pickle.loads(s)

        assert m2.owner.op == m.owner.op
    def test_infer_shape(self):
        x = tensor.dmatrix()
        y = tensor.dmatrix()

        self._compile_and_check(
            [x, y],
            [self.op_class()(x, y)],
            [np.random.rand(5, 6), np.random.rand(5, 6)],
            self.op_class,
        )
    def test_infer_shape(self):
        x = tensor.dmatrix()
        y = tensor.dmatrix()

        # adapt the choice of the next instruction to the op under test

        self._compile_and_check(
            [x, y],
            self.op_class()(x, y),
            [np.random.rand(5, 6), np.random.rand(5, 6)],
            self.op_class,
        )
Exemple #5
0
    def test_borrow_output(self):
        a = tt.dmatrix()
        f = function([a], Out(a, borrow=False))
        o = np.ones((3, 3))
        assert o is not f(o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should not clobber the memory used to store four
        assert np.all(four == 4)

        f = function(
            [a], Out(a * 4, borrow=True), mode=aesara.Mode("c|py_nogc", "fast_run")
        )
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should clobber the memory used to store four
        if aesara.config.cxx:
            assert not np.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert np.all(four == 4)
Exemple #6
0
def test_copy():
    x = tt.dmatrix("x")
    data = np.random.rand(5, 5)
    y = x.copy(name="y")
    f = aesara.function([x], y)
    assert_equal(f(data), data)
    assert_string_equal(y.name, "y")
Exemple #7
0
    def test_shared(self):

        # CHECK: two functions (f1 and f2) can share w
        w = shared(np.random.rand(2, 2), "w")
        wval = w.get_value(borrow=False)

        x = dmatrix()
        out1 = w + x
        out2 = w * x
        f1 = pfunc([x], [out1])
        f2 = pfunc([x], [out2])
        xval = np.random.rand(2, 2)
        assert np.all(f1(xval) == xval + wval)
        assert np.all(f2(xval) == xval * wval)

        # CHECK: updating a shared value
        f3 = pfunc([x], out1, updates=[(w, (w - 1))])
        # f3 changes the value of w
        assert np.all(f3(xval) == xval + wval)
        # this same value is read by f1
        assert np.all(f1(xval) == xval + (wval - 1))

        w.set_value(w.get_value(borrow=True) * 10, borrow=True)
        # this same value is read by f1
        assert np.all(f1(xval) == xval + w.get_value(borrow=True))
 def test_None(self):
     a = tensor.dmatrix()
     l = sort(a, None)
     f = aesara.function([a], l)
     gv = f(self.m_val)
     gt = np.sort(self.m_val, None)
     utt.assert_allclose(gv, gt)
Exemple #9
0
    def test_unary(self, method, exp_type, cm):
        x = at.dmatrix("x")
        x = sparse.csr_from_dense(x)

        method_to_call = getattr(x, method)

        if cm is None:
            cm = pytest.warns(UserWarning, match=".*converted to dense.*")

        if exp_type == SparseTensorType:
            exp_res_type = csr_matrix
        else:
            exp_res_type = np.ndarray

        with cm:
            z = method_to_call()

        if not isinstance(z, tuple):
            z_outs = (z, )
        else:
            z_outs = z

        assert all(isinstance(out.type, exp_type) for out in z_outs)

        f = aesara.function([x], z, on_unused_input="ignore")

        res = f([[1.1, 0.0, 2.0], [-1.0, 0.0, 0.0]])

        if not isinstance(res, list):
            res_outs = [res]
        else:
            res_outs = res

        assert all(isinstance(out, exp_res_type) for out in res_outs)
Exemple #10
0
    def test_simple_2d(self):
        # Increments or sets part of a tensor by a scalar using full slice and
        # a partial slice depending on a scalar.

        a = tt.dmatrix()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)

        for do_set in [False, True]:

            if do_set:
                resut = tt.set_subtensor(a[sl1, sl2], increment)
            else:
                resut = tt.inc_subtensor(a[sl1, sl2], increment)

            f = aesara.function([a, increment, sl2_end], resut)

            val_a = np.ones((5, 5))
            val_inc = 2.3
            val_sl2_end = 2

            result = f(val_a, val_inc, val_sl2_end)

            expected_result = np.copy(val_a)
            if do_set:
                expected_result[:, :val_sl2_end] = val_inc
            else:
                expected_result[:, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result)
Exemple #11
0
def test_numpy_method(fct):
    # This type of code is used frequently by PyMC3 users
    x = tt.dmatrix("x")
    data = np.random.rand(5, 5)
    x.tag.test_value = data
    y = fct(x)
    f = aesara.function([x], y)
    utt.assert_allclose(np.nan_to_num(f(data)), np.nan_to_num(fct(data)))
Exemple #12
0
    def test_optimizations_preserved(self):
        a = tt.dvector()  # the a is for 'anonymous' (un-named).
        x = tt.dvector("x")
        s = tt.dvector("s")
        xm = tt.dmatrix("x")
        sm = tt.dmatrix("s")

        f = function(
            [a, x, s, xm, sm],
            ((a.T.T) * (tt.dot(xm, (sm.T.T.T)) + x).T * (x / x) + s),
        )
        old_default_mode = config.mode
        old_default_opt = config.optimizer
        old_default_link = config.linker
        try:
            try:
                str_f = pickle.dumps(f, protocol=-1)
                config.mode = "Mode"
                config.linker = "py"
                config.optimizer = "None"
                g = pickle.loads(str_f)
                # print g.maker.mode
                # print compile.mode.default_mode
            except NotImplementedError as e:
                if e[0].startswith("DebugMode is not pickl"):
                    g = "ok"
        finally:
            config.mode = old_default_mode
            config.optimizer = old_default_opt
            config.linker = old_default_link

        if g == "ok":
            return

        assert f.maker is not g.maker
        assert f.maker.fgraph is not g.maker.fgraph
        tf = f.maker.fgraph.toposort()
        tg = f.maker.fgraph.toposort()
        assert len(tf) == len(tg)
        for nf, ng in zip(tf, tg):
            assert nf.op == ng.op
            assert len(nf.inputs) == len(ng.inputs)
            assert len(nf.outputs) == len(ng.outputs)
            assert [i.type for i in nf.inputs] == [i.type for i in ng.inputs]
            assert [i.type for i in nf.outputs] == [i.type for i in ng.outputs]
 def test4(self):
     a = tensor.dmatrix()
     axis = tensor.scalar()
     l = sort(a, axis, "mergesort")
     f = aesara.function([a, axis], l)
     for axis_val in 0, 1:
         gv = f(self.m_val, axis_val)
         gt = np.sort(self.m_val, axis_val)
         utt.assert_allclose(gv, gt)
Exemple #14
0
    def test_bug_complext_10_august_09(self):
        v0 = dmatrix()
        v1 = basic._convert_to_complex128(v0)

        inputs = [v0]
        outputs = [v1]
        f = function(inputs, outputs)
        i = np.zeros((2, 2))
        assert (f(i) == np.zeros((2, 2))).all()
Exemple #15
0
    def test_getitem(self):
        x = at.dmatrix("x")
        x = sparse.csr_from_dense(x)

        z = x[:, :2]
        assert isinstance(z.type, SparseTensorType)

        f = aesara.function([x], z)
        exp_res = f([[1.1, 0.0, 2.0], [-1.0, 0.0, 0.0]])
        assert isinstance(exp_res, csr_matrix)
Exemple #16
0
def test_empty_list_indexing():
    ynp = np.zeros((2, 2))[:, []]
    znp = np.zeros((2, 2))[:, ()]
    data = [[0, 0], [0, 0]]
    x = tt.dmatrix("x")
    y = x[:, []]
    z = x[:, ()]
    fy = aesara.function([x], y)
    fz = aesara.function([x], z)
    assert_equal(fy(data).shape, ynp.shape)
    assert_equal(fz(data).shape, znp.shape)
Exemple #17
0
    def test_1arg(self):
        x = dmatrix("x")

        @as_op(dmatrix, dvector)
        def cumprod(x):
            return np.cumprod(x)

        fn = function([x], cumprod(x))
        r = fn([[1.5, 5], [2, 2]])
        r0 = np.array([1.5, 7.5, 15.0, 30.0])

        assert np.allclose(r, r0), (r, r0)
    def test_shuffle_row_elements(self):
        # Test that RandomStreams.shuffle_row_elements generates the right results
        # Check over two calls to see if the random state is correctly updated.

        # On matrices, for each row, the elements of that row should be shuffled.
        # Note that this differs from np.random.shuffle, where all the elements
        # of the matrix are shuffled.
        random = RandomStreams(utt.fetch_seed())
        m_input = tensor.dmatrix()
        f = function([m_input],
                     random.shuffle_row_elements(m_input),
                     updates=random.updates())

        # Generate the elements to be shuffled
        val_rng = np.random.RandomState(utt.fetch_seed() + 42)
        in_mval = val_rng.uniform(-2, 2, size=(20, 5))
        fn_mval0 = f(in_mval)
        fn_mval1 = f(in_mval)
        assert not np.all(in_mval == fn_mval0)
        assert not np.all(in_mval == fn_mval1)
        assert not np.all(fn_mval0 == fn_mval1)

        rng_seed = np.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = np.random.RandomState(int(rng_seed))
        numpy_mval0 = in_mval.copy()
        numpy_mval1 = in_mval.copy()
        for row in numpy_mval0:
            rng.shuffle(row)
        for row in numpy_mval1:
            rng.shuffle(row)

        assert np.all(numpy_mval0 == fn_mval0)
        assert np.all(numpy_mval1 == fn_mval1)

        # On vectors, the behaviour is the same as np.random.shuffle,
        # except that it does not work in place, but returns a shuffled vector.
        random1 = RandomStreams(utt.fetch_seed())
        v_input = tensor.dvector()
        f1 = function([v_input], random1.shuffle_row_elements(v_input))

        in_vval = val_rng.uniform(-3, 3, size=(12, ))
        fn_vval = f1(in_vval)
        numpy_vval = in_vval.copy()
        vrng = np.random.RandomState(int(rng_seed))
        vrng.shuffle(numpy_vval)
        assert np.all(numpy_vval == fn_vval)

        # Trying to shuffle a vector with function that should shuffle
        # matrices, or vice versa, raises a TypeError
        with pytest.raises(TypeError):
            f1(in_mval)
        with pytest.raises(TypeError):
            f(in_vval)
Exemple #19
0
    def test_repeat(self):
        x = at.dmatrix("x")
        x = sparse.csr_from_dense(x)

        with pytest.warns(UserWarning, match=".*converted to dense.*"):
            z = x.repeat(2, axis=1)

        assert isinstance(z.type, DenseTensorType)

        f = aesara.function([x], z)
        exp_res = f([[1.1, 0.0, 2.0], [-1.0, 0.0, 0.0]])
        assert isinstance(exp_res, np.ndarray)
Exemple #20
0
 def test_no_aliasing_1(self):
     # B is a shared variable, A is updated with B's contents
     # since B is being updated as well, we don't need to copy anything
     # to avoid aliasing shared variables.
     A = self.shared(np.zeros((2, 2)) + 0.5)
     B = self.shared(np.zeros((2, 2)) - 0.5)
     C = tensor.dmatrix()
     f = pfunc([C], [], updates=[(A, B), (B, C)])
     z = np.zeros((2, 2))
     f(z)
     assert not np.may_share_memory(data_of(A), data_of(B))
     # Aesara tries to maintain its own memory space.
     assert not np.may_share_memory(z, data_of(B))
     assert np.all(data_of(B) == z)
Exemple #21
0
def test_broadcast_arrays():
    x, y = aet.dvector(), aet.dmatrix()
    x_bcast, y_bcast = broadcast_arrays(x, y)

    py_mode = Mode("py", None)
    bcast_fn = function([x, y], [x_bcast, y_bcast], mode=py_mode)

    x_val = np.array([1.0], dtype=np.float64)
    y_val = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
    x_bcast_val, y_bcast_val = bcast_fn(x_val, y_val)
    x_bcast_exp, y_bcast_exp = np.broadcast_arrays(x_val, y_val)

    assert np.array_equal(x_bcast_val, x_bcast_exp)
    assert np.array_equal(y_bcast_val, y_bcast_exp)
    def test_wrong_input(self):
        # Make sure errors are raised when image and kernel are not 4D tensors

        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          input=tt.dmatrix())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          filters=tt.dvector())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          input=tt.dtensor3())
def test_multinomial_dtypes():
    p = tensor.dmatrix()
    u = tensor.dvector()
    m = multinomial.MultinomialFromUniform("auto")(p, u)
    assert m.dtype == "float64", m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform("auto")(p, u)
    assert m.dtype == "float32", m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform("float64")(p, u)
    assert m.dtype == "float64", m.dtype
Exemple #24
0
    def test_2arg(self):
        x = dmatrix("x")
        x.tag.test_value = np.zeros((2, 2))
        y = dvector("y")
        y.tag.test_value = [0, 0, 0, 0]

        @as_op([dmatrix, dvector], dvector)
        def cumprod_plus(x, y):
            return np.cumprod(x) + y

        fn = function([x, y], cumprod_plus(x, y))
        r = fn([[1.5, 5], [2, 2]], [1, 100, 2, 200])
        r0 = np.array([2.5, 107.5, 17.0, 230.0])

        assert np.allclose(r, r0), (r, r0)
Exemple #25
0
    def test_potential_output_aliasing_induced_by_updates(self):

        A = self.shared(np.zeros((2, 2)))
        B = self.shared(np.zeros((2, 2)))
        C = np.zeros((2, 2))
        D = tensor.dmatrix()
        DD = D + 5

        f = pfunc([D], [], updates=[(A, D), (B, D)])
        f(C)

        assert not np.may_share_memory(data_of(A), data_of(B))
        f = pfunc([D], [], updates=[(A, D[:]), (B, D)])
        f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))
        f = pfunc([D], [], updates=[(A, (D + 5)), (B, D[:])])
        f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))

        f = pfunc([D], [], updates=[(A, (D + 5)), (B, D)])
        f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))

        f = pfunc([D], DD, updates=[(A, DD[:1]), (B, DD)])
        R = f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))
        assert not np.may_share_memory(R, data_of(B))
        assert not np.may_share_memory(R, data_of(A))

        f = pfunc([D], DD, updates=[(A, DD[:1]), (B, (DD[:1] * 2))])
        R = f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))
        assert not np.may_share_memory(R, data_of(B))
        assert not np.may_share_memory(R, data_of(A))

        f = pfunc([D], (DD * 4),
                  updates=[(A, (DD[:1] * 3)), (B, (DD[:1] * 2))])
        R = f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))
        assert not np.may_share_memory(R, data_of(B))
        assert not np.may_share_memory(R, data_of(A))

        f = pfunc([D], (DD * 4),
                  updates=[(A, (DD[:1] * 3)), (B, (DD[:1] * 3))])
        R = f(C)
        assert not np.may_share_memory(data_of(A), data_of(B))
        assert not np.may_share_memory(R, data_of(B))
        assert not np.may_share_memory(R, data_of(A))
Exemple #26
0
    def test_infer_shape(self):

        adscal = at.dscalar()
        bdscal = at.dscalar()
        adscal_val = np.random.random()
        bdscal_val = np.random.random() + 1
        out = assert_op(adscal, bdscal)
        self._compile_and_check([adscal, bdscal], [out],
                                [adscal_val, bdscal_val], Assert)

        admat = at.dmatrix()
        admat_val = np.random.random((3, 4))
        adscal_val += 1
        out = assert_op(admat, adscal, bdscal)
        self._compile_and_check([admat, adscal, bdscal], [out],
                                [admat_val, adscal_val, bdscal_val], Assert)
Exemple #27
0
def test_Sparse_convert_variable():
    x = dmatrix(name="x")
    y = sp_matrix("csc", dtype="float64", name="y")
    z = sp_matrix("csr", dtype="float64", name="z")

    assert y.type.convert_variable(z) is None

    # TODO FIXME: This is a questionable result, because `x.type` is associated
    # with a dense `Type`, but, since `TensorType` is a base class of `Sparse`,
    # we would need to added sparse/dense logic to `TensorType`, and we don't
    # want to do that.
    assert x.type.convert_variable(y) is y

    # TODO FIXME: We should be able to do this.
    with pytest.raises(NotImplementedError):
        y.type.convert_variable(x)
Exemple #28
0
    def test_maxpool(self):
        # generate flatted images
        maxpoolshps = ((2, 2), (3, 3), (4, 4), (5, 5), (6, 6))
        imval = np.random.rand(4, 5, 10, 10)

        images = tensor.dmatrix()
        for maxpoolshp in maxpoolshps:

            # symbolic stuff
            output, outshp = sp.max_pool(images, imval.shape[1:], maxpoolshp)
            f = function(
                [
                    images,
                ],
                [
                    output,
                ],
            )
            output_val = f(imval.reshape(imval.shape[0], -1))

            # numeric verification
            my_output_val = np.zeros((
                imval.shape[0],
                imval.shape[1],
                imval.shape[2] // maxpoolshp[0],
                imval.shape[3] // maxpoolshp[1],
            ))
            assert np.prod(my_output_val.shape[1:]) == np.prod(
                np.r_[imval.shape[1], outshp])

            for n in range(imval.shape[0]):
                for k in range(imval.shape[1]):
                    for i in range(imval.shape[2] // maxpoolshp[0]):
                        for j in range(imval.shape[3] // maxpoolshp[1]):
                            ii, jj = i * maxpoolshp[0], j * maxpoolshp[1]
                            patch = imval[n, k, ii:ii + maxpoolshp[0],
                                          jj:jj + maxpoolshp[1]]
                            my_output_val[n, k, i, j] = np.max(patch)
            my_output_val = my_output_val.reshape(imval.shape[0], -1)
            assert np.all(output_val == my_output_val)

            def mp(input):
                output, outshp = sp.max_pool(input, imval.shape[1:],
                                             maxpoolshp)
                return output

            utt.verify_grad(mp, [imval.reshape(imval.shape[0], -1)])
Exemple #29
0
    def test_borrow_input(self):
        # Tests that the contract for io.In is respected. When borrow=False, it should be
        # impossible for outputs to be aliased to the input variables provided by the user,
        # either through a view-map or a destroy map. New tests should be added in the future
        # when borrow=True is implemented.

        a = tt.dmatrix()
        aval = np.random.rand(3, 3)

        # when borrow=False, test that a destroy map cannot alias output to input
        f = aesara.function([In(a, borrow=False)], Out(a + 1, borrow=True))
        assert np.all(f(aval) == aval + 1)
        assert not np.may_share_memory(aval, f(aval))

        # when borrow=False, test that a viewmap cannot alias output to input
        f = aesara.function([In(a, borrow=False)], Out(a[0, :], borrow=True))
        assert np.all(f(aval) == aval[0, :])
        assert not np.may_share_memory(aval, f(aval))
    def test_no_make_node(self):
        class DoubleOp(Op):
            """An Op without make_node"""

            __props__ = ()

            itypes = [tt.dmatrix]
            otypes = [tt.dmatrix]

            def perform(self, node, inputs, outputs):
                inp = inputs[0]
                output = outputs[0]
                output[0] = inp * 2

        x_input = tt.dmatrix("x_input")
        f = aesara.function([x_input], DoubleOp()(x_input))
        inp = np.random.rand(5, 4)
        out = f(inp)
        assert np.allclose(inp * 2, out)