Example #1
0
def test_jax_BatchedDot():
    # tensor3 . tensor3
    a = tensor3("a")
    a.tag.test_value = (np.linspace(-1, 1,
                                    10 * 5 * 3).astype(config.floatX).reshape(
                                        (10, 5, 3)))
    b = tensor3("b")
    b.tag.test_value = (np.linspace(1, -1,
                                    10 * 3 * 2).astype(config.floatX).reshape(
                                        (10, 3, 2)))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

    # A dimension mismatch should raise a TypeError for compatibility
    inputs = [get_test_value(a)[:-1], get_test_value(b)]
    opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
    jax_mode = Mode(JAXLinker(), opts)
    aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
    with pytest.raises(TypeError):
        aesara_jax_fn(*inputs)

    # matrix . matrix
    a = matrix("a")
    a.tag.test_value = np.linspace(-1, 1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    b = matrix("b")
    b.tag.test_value = np.linspace(1, -1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Example #2
0
    def test_non_zero_init(self):
        # Test the case where the initial value for the nitsot output is non-zero

        input1 = tensor3()
        input2 = tensor3()
        input3 = tensor3()

        W = aesara.shared(np.random.normal(size=(4, 5))).astype(config.floatX)
        U = aesara.shared(np.random.normal(size=(6, 7))).astype(config.floatX)

        def inner_fct(seq1, seq2, seq3, previous_output):
            temp1 = dot(seq1, W) + seq3
            temp2 = dot(seq2, U)
            dot_output = dot(temp1, temp2)
            return previous_output + dot_output

        init = aet.as_tensor_variable(np.random.normal(size=(3, 7)))

        # Compile the function twice, once with the optimization and once
        # without
        opt_mode = mode.including("scan")
        h, _ = aesara.scan(
            inner_fct,
            sequences=[input1, input2, input3],
            outputs_info=init,
            mode=opt_mode,
        )
        output = h[-1]
        f_opt = aesara.function([input1, input2, input3],
                                output,
                                mode=opt_mode)

        no_opt_mode = mode.excluding("scanOp_pushout_output")
        h, _ = aesara.scan(
            inner_fct,
            sequences=[input1, input2, input3],
            outputs_info=init,
            mode=no_opt_mode,
        )
        output = h[-1]
        f_no_opt = aesara.function([input1, input2, input3],
                                   output,
                                   mode=no_opt_mode)

        # Ensure that the optimization has been applied for f_opt
        # TODO

        # Compare the outputs of the 2 functions
        input1_value = np.random.random((2, 3, 4)).astype(config.floatX)
        input2_value = np.random.random((2, 5, 6)).astype(config.floatX)
        input3_value = np.random.random((2, 3, 5)).astype(config.floatX)

        output_opt = f_opt(input1_value, input2_value, input3_value)
        output_no_opt = f_no_opt(input1_value, input2_value, input3_value)

        utt.assert_allclose(output_opt, output_no_opt)
Example #3
0
 def test_basic(self):
     # Reported in https://github.com/Theano/Theano/issues/5730
     x = tensor3()
     y = tensor3()
     z = batched_dot(x, y[:, 0, :, np.newaxis])
     f = aesara.function([x, y], z, mode=mode_with_gpu)
     x_num = np.arange(32 * 19 * 600, dtype=config.floatX).reshape(
         (32, 19, 600))
     y_num = np.arange(7 * 32 * 600, dtype=config.floatX).reshape(
         (32, 7, 600))
     f(x_num, y_num)
     assert f.maker.fgraph.toposort()[-2].op.inplace
Example #4
0
    def test_cum_op(self):
        x = tensor3("x")
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        # Test axis out of bounds
        with pytest.raises(ValueError):
            cumsum(x, axis=3)
        with pytest.raises(ValueError):
            cumsum(x, axis=-4)
        with pytest.raises(ValueError):
            cumprod(x, axis=3)
        with pytest.raises(ValueError):
            cumprod(x, axis=-4)

        f = aesara.function([x], [cumsum(x), cumprod(x)])
        s, p = f(a)
        assert np.allclose(np.cumsum(a), s)  # Test axis=None
        assert np.allclose(np.cumprod(a), p)  # Test axis=None

        for axis in range(-len(a.shape), len(a.shape)):
            f = aesara.function([x],
                                [cumsum(x, axis=axis),
                                 cumprod(x, axis=axis)])
            s, p = f(a)
            assert np.allclose(np.cumsum(a, axis=axis), s)
            assert np.allclose(np.cumprod(a, axis=axis), p)
Example #5
0
    def test_infer_shape(self, mode):
        op_class = partial(self.op_class, mode=mode)
        x = tensor3("x")
        a = np.random.random((3, 5, 2)).astype(aesara.config.floatX)

        for axis in range(-len(a.shape), len(a.shape)):
            self._compile_and_check([x], [op_class(axis=axis)(x)], [a], GpuCumOp)
Example #6
0
def test_NanGuardMode():
    # Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans
    # intentionally. A working implementation should be able to capture all
    # the abnormalties.
    rng = np.random.default_rng(2482)
    x = matrix()
    w = shared(rng.standard_normal((5, 7)).astype(config.floatX))
    y = dot(x, w)

    fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
    a = rng.standard_normal((3, 5)).astype(config.floatX)

    with pytest.warns(RuntimeWarning):
        infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5))

    nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 5))

    biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 5))

    fun(a)  # normal values

    # Temporarily silence logger
    _logger = logging.getLogger("aesara.compile.nanguardmode")
    try:
        _logger.propagate = False
        with pytest.raises(AssertionError):
            fun(infa)  # INFs
        with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
            fun(nana)  # NANs
        with pytest.raises(AssertionError):
            fun(biga)  # big values
    finally:
        _logger.propagate = True

    # slices
    a = rng.standard_normal((3, 4, 5)).astype(config.floatX)

    with pytest.warns(RuntimeWarning):
        infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5))

    nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 4, 5))

    biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 4, 5))

    x = tensor3()
    y = x[:, at.arange(2), at.arange(2), None]
    fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
    fun(a)  # normal values
    try:
        _logger.propagate = False
        with pytest.raises(AssertionError):
            fun(infa)  # INFs
        with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
            fun(nana)  # NANs
        with pytest.raises(AssertionError):
            fun(biga)  # big values
    finally:
        _logger.propagate = True
Example #7
0
 def setup_method(self):
     super().setup_method()
     self.A = tensor4("A", dtype=config.floatX)
     self.B = tensor3("B", dtype=config.floatX)
     self.a = np.random.rand(4, 6, 8, 3).astype(config.floatX)
     self.b = np.random.rand(2, 15, 30).astype(config.floatX)
     self.b1 = np.random.rand(30, 2, 15).astype(
         config.floatX
     )  # for ind=1 since we need prod(b1.shape[:ind]) == prod(b1.shape[ind:])
Example #8
0
    def test_infer_shape(self):
        x = tensor3("x")
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        # Test axis=None
        self._compile_and_check([x], [self.op(x)], [a], self.op_class)

        for axis in range(-len(a.shape), len(a.shape)):
            self._compile_and_check([x], [cumsum(x, axis=axis)], [a], self.op_class)
Example #9
0
 def test_m1(self):
     t = tensor3()
     rng = np.random.RandomState(seed=utt.fetch_seed())
     val = rng.uniform(size=(3, 4, 5)).astype(config.floatX)
     for out in [
         t.reshape([-1]),
         t.reshape([-1, 5]),
         t.reshape([5, -1]),
         t.reshape([5, -1, 3]),
     ]:
         self._compile_and_check([t], [out], [val], self.op)
Example #10
0
    def setup_method(self):
        super().setup_method()
        self.op_class = SearchsortedOp
        self.op = SearchsortedOp()

        self.x = vector("x")
        self.v = tensor3("v")

        self.a = 30 * np.random.random(50).astype(config.floatX)
        self.b = 30 * np.random.random((8, 10, 5)).astype(config.floatX)
        self.idx_sorted = np.argsort(self.a).astype("int32")
Example #11
0
    def test_multiple_out_crash(self):
        # This test failed up to commit 2faeb62c38
        p0 = self.shared(np.asarray(np.random.random([4, 8]),
                                    dtype=self.dtype))
        p1 = self.shared(np.asarray(np.random.random(8), dtype=self.dtype))
        p2 = self.shared(np.asarray(np.random.random([8, 3]),
                                    dtype=self.dtype))
        p3 = self.shared(np.asarray(np.random.random(3), dtype=self.dtype))
        p = [p0, p1, p2, p3]

        # in my code these vars are the result of applying scan
        ften0 = tensor3("ft0", dtype=self.dtype)
        fmat1 = matrix("fm1", dtype=self.dtype)
        ften2 = tensor3("ft2", dtype=self.dtype)
        fmat3 = matrix("fm3", dtype=self.dtype)

        # then I keep only the last iteration
        fsub0 = ften0[-1]
        fsub1 = fmat1[-1]
        fsub2 = ften2[-1]
        fsub3 = fmat3[-1]

        fsub = [fsub0, fsub1, fsub2, fsub3]

        acc = at.constant(1, "int8") >= 0

        new_positions = ifelse(acc, fsub, p)

        new_updates = [(p[0], new_positions[0])]

        f = function([ften0, fmat1, ften2, fmat3], [],
                     updates=new_updates,
                     mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(4))

        i1 = np.asarray(np.random.random([19, 4, 8]), dtype=self.dtype)
        i2 = np.asarray(np.random.random([19, 8]), dtype=self.dtype)
        i3 = np.asarray(np.random.random([19, 8, 3]), dtype=self.dtype)
        i4 = np.asarray(np.random.random([19, 3]), dtype=self.dtype)

        f(i1, i2, i3, i4)
Example #12
0
    def test_invalid_softmax_expressions(self, f):
        # Test that graphs are not rewritten into a softmax when a dimshuffle
        # swaps or adds extra dimensions, or when more than one but not all axis
        # are summed over (which is not allowed by the Softmax Op but otherwise
        # valid)
        c = tensor3("c")
        out = f(c)
        f = aesara.function([c], out, mode=self.mode)

        f_ops = [n.op for n in f.maker.fgraph.toposort()]
        assert len(f_ops) > 1
        assert not any(isinstance(op, Softmax) for op in f_ops)
Example #13
0
    def test_correct_answer(self):
        a = matrix()
        b = matrix()

        x = tensor3()
        y = tensor3()

        A = np.cast[aesara.config.floatX](np.random.rand(5, 3))
        B = np.cast[aesara.config.floatX](np.random.rand(7, 2))
        X = np.cast[aesara.config.floatX](np.random.rand(5, 6, 1))
        Y = np.cast[aesara.config.floatX](np.random.rand(1, 9, 3))

        make_list((3.0, 4.0))
        c = make_list((a, b))
        z = make_list((x, y))
        fc = aesara.function([a, b], c)
        fz = aesara.function([x, y], z)
        for m, n in zip(fc(A, B), [A, B]):
            assert (m == n).all()
        for m, n in zip(fz(X, Y), [X, Y]):
            assert (m == n).all()
Example #14
0
    def test_savemem_opt_0_step(self):
        """
        Test a case where the savemem optimization has the opportunity to
        lower the number of steps of a Scan to 0. It tests that the
        optimization doesn't do so since Scan nodes with 0
        steps are not currently supported and doing so would result in a
        crash during the function execution.
        """

        def inner_scan_step(x_t_t, h_tm1, w):
            return dot(h_tm1, w) + x_t_t

        def outer_scan_step(x_t, w):
            h, _ = scan(
                inner_scan_step,
                sequences=[x_t[1:]],
                outputs_info=[x_t[0]],
                non_sequences=[w],
                strict=True,
                name="the_inner_scan",
            )
            return h

        def get_outputs(x, w):
            features, _ = scan(
                outer_scan_step,
                sequences=[x],
                non_sequences=[w],
                strict=True,
                name="the_outer_scan",
            )

            return_val = grad(features.sum(), w)
            return return_val

        # Compile the aesara function
        x = tensor3("x")
        w = matrix("w")
        f = function(inputs=[x, w], outputs=get_outputs(x, w), mode=self.mode)

        # Test the function to ensure it returns valid results
        x_value = (
            np.random.default_rng(utt.fetch_seed())
            .random((2, 2, 3))
            .astype(config.floatX)
        )
        w_value = (
            np.random.default_rng(utt.fetch_seed()).random((3, 3)).astype(config.floatX)
        )
        expected_output = np.tile(x_value[:, 0].sum(0), (3, 1)).transpose()

        output = f(x_value, w_value)
        utt.assert_allclose(output, expected_output)
Example #15
0
 def test_perform_3d(self):
     rng = np.random.default_rng(43)
     a = rng.random((3, 3, 3)).astype(config.floatX)
     x = tensor3()
     y = scalar()
     f = function([x, y], fill_diagonal(x, y))
     val = np.cast[config.floatX](rng.random() + 10)
     out = f(a, val)
     # We can't use np.fill_diagonal as it is bugged.
     assert out[0, 0, 0] == val
     assert out[1, 1, 1] == val
     assert out[2, 2, 2] == val
     assert (out == val).sum() == min(a.shape)
Example #16
0
def test_nonstandard_shapes():
    a = tensor3(config.floatX)
    a.tag.test_value = np.random.random((2, 3, 4)).astype(config.floatX)
    b = tensor3(config.floatX)
    b.tag.test_value = np.random.random((2, 3, 4)).astype(config.floatX)

    tl = make_list([a, b])
    tl_shape = shape(tl)
    assert np.array_equal(tl_shape.get_test_value(), (2, 2, 3, 4))

    # There's no `FunctionGraph`, so it should return a `Subtensor`
    tl_shape_i = shape_i(tl, 0)
    assert isinstance(tl_shape_i.owner.op, Subtensor)
    assert tl_shape_i.get_test_value() == 2

    tl_fg = FunctionGraph([a, b], [tl], features=[ShapeFeature()])
    tl_shape_i = shape_i(tl, 0, fgraph=tl_fg)
    assert not isinstance(tl_shape_i.owner.op, Subtensor)
    assert tl_shape_i.get_test_value() == 2

    none_shape = shape(NoneConst)
    assert np.array_equal(none_shape.get_test_value(), [])
Example #17
0
    def test_basic_keepdims(self, axis):
        c = tensor3()
        p_y = exp(c) / exp(c).sum(axis=axis, keepdims=True)

        # test that function contains softmax and no div.
        f = aesara.function([c], p_y, mode=self.mode)

        assert check_stack_trace(f, ops_to_check=Softmax)

        f_ops = [n.op for n in f.maker.fgraph.toposort()]

        assert len(f_ops) == 1
        assert isinstance(f_ops[0], Softmax)

        c_val = self.rng.random((3, 4, 5)).astype(config.floatX)
        assert np.allclose(f(c_val), sp.softmax(c_val, axis=axis))
Example #18
0
def valid_axis_tester(Op):
    with pytest.raises(TypeError):
        Op(1.5)

    x = [tensor3()] * Op.nin
    with does_not_raise():
        Op(2)(*x)

    with pytest.raises(ValueError):
        Op(3)(*x)

    with does_not_raise():
        Op(-3)(*x)

    with pytest.raises(ValueError):
        Op(-4)(*x)
Example #19
0
    def test_perform(self):
        x = matrix()
        y = scalar()
        f = function([x, y], fill_diagonal(x, y))
        for shp in [(8, 8), (5, 8), (8, 5)]:
            a = np.random.rand(*shp).astype(config.floatX)
            val = np.cast[config.floatX](np.random.rand())
            out = f(a, val)
            # We can't use np.fill_diagonal as it is bugged.
            assert np.allclose(np.diag(out), val)
            assert (out == val).sum() == min(a.shape)

        # test for 3dtt
        a = np.random.rand(3, 3, 3).astype(config.floatX)
        x = tensor3()
        y = scalar()
        f = function([x, y], fill_diagonal(x, y))
        val = np.cast[config.floatX](np.random.rand() + 10)
        out = f(a, val)
        # We can't use np.fill_diagonal as it is bugged.
        assert out[0, 0, 0] == val
        assert out[1, 1, 1] == val
        assert out[2, 2, 2] == val
        assert (out == val).sum() == min(a.shape)
Example #20
0
 def invalid_input_func():
     A = tensor3("A", dtype="float64")
     GpuCholesky(lower=True, inplace=False)(A)
Example #21
0
    def test_machine_translation(self):
        # This test case comes from https://github.com/rizar/scan-grad-speed and
        # is an example of actual computation done with scan in the context of
        # machine translation
        #
        # 'dim' has been reduced from 1000 to 5 to make the test run faster

        # Parameters from an actual machine tranlation run
        batch_size = 80
        seq_len = 50
        dim = 5

        # Weight matrices
        U = aesara.shared(
            np.random.normal(size=(dim, dim),
                             scale=0.0001).astype(config.floatX))
        U.name = "U"
        V = aesara.shared(U.get_value())
        V.name = "V"
        W = aesara.shared(U.get_value())
        W.name = "W"

        # Variables and their values
        x = tensor3("x")
        x_value = np.random.normal(size=(seq_len, batch_size, dim),
                                   scale=0.0001).astype(config.floatX)

        ri = tensor3("ri")
        ri_value = x_value

        zi = tensor3("zi")
        zi_value = x_value

        init = aet.alloc(np.cast[config.floatX](0), batch_size, dim)

        def rnn_step1(
            # sequences
            x,
            ri,
            zi,
            # outputs_info
            h,
        ):
            pre_r = ri + h.dot(U)
            pre_z = zi + h.dot(V)
            r = nnet.sigmoid(pre_r)
            z = nnet.sigmoid(pre_z)

            after_r = r * h
            pre_h = x + after_r.dot(W)
            new_h = tanh(pre_h)

            res_h = z * new_h + (1 - z) * h
            return res_h

        # Compile the function twice, once with the optimization and once
        # without
        opt_mode = mode.including("scan")
        h, _ = aesara.scan(
            rnn_step1,
            sequences=[x, ri, zi],
            n_steps=seq_len,
            outputs_info=init,
            name="fpass1",
            mode=opt_mode,
        )
        cost = h[-1].sum()
        grad1 = grad(cost, [U, V, W])
        f_opt = aesara.function(inputs=[x, ri, zi],
                                outputs=grad1,
                                mode=opt_mode)

        no_opt_mode = mode.excluding("scanOp_pushout_output")
        h, _ = aesara.scan(
            rnn_step1,
            sequences=[x, ri, zi],
            n_steps=seq_len,
            outputs_info=init,
            name="fpass1",
            mode=no_opt_mode,
        )
        cost = h[-1].sum()
        grad1 = grad(cost, [U, V, W])
        f_no_opt = aesara.function(inputs=[x, ri, zi],
                                   outputs=grad1,
                                   mode=no_opt_mode)

        # Validate that the optimization has been applied
        scan_node_grad = [
            node for node in f_opt.maker.fgraph.toposort()
            if isinstance(node.op, Scan)
        ][1]

        for output in scan_node_grad.op.outputs:
            assert not (isinstance(output.owner.op, Elemwise) and any(
                [isinstance(i, Dot) for i in output.owner.inputs]))

        # Compare the outputs of the two functions on the same input data.
        f_opt_output = f_opt(x_value, ri_value, zi_value)
        f_no_opt_output = f_no_opt(x_value, ri_value, zi_value)
        utt.assert_allclose(f_opt_output, f_no_opt_output)
Example #22
0
    def _run(self, num_features, num_timesteps, batch_size, mode):
        # determine shapes of inputs and targets depending on the batch size
        if batch_size == 1:
            inputs_size = (num_timesteps, num_features)
            targets_size = (num_timesteps, 1)
        else:
            inputs_size = (num_timesteps, batch_size, num_features)
            targets_size = (num_timesteps, batch_size, 1)

        # make inputs and targets shared variables
        inputs = aesara.shared(self.rng.uniform(size=inputs_size).astype(
            config.floatX),
                               borrow=True)
        targets = aesara.shared(self.rng.uniform(size=targets_size).astype(
            config.floatX),
                                borrow=True)

        # create symbolic inputs and targets variables
        if batch_size == 1:
            x = matrix("inputs")
            t = matrix("targets")
        else:
            x = tensor3("inputs")
            t = tensor3("inputs")
        x.tag.test_value = inputs.get_value(borrow=True)
        t.tag.test_value = targets.get_value(borrow=True)

        # create a set of parameters for a simple RNN
        W_xh = aesara.shared(
            (0.01 * self.rng.uniform(size=(num_features, 10))).astype(
                config.floatX),
            borrow=True,
        )
        W_hh = aesara.shared(
            (0.01 * self.rng.uniform(size=(10, 10))).astype(config.floatX),
            borrow=True)
        W_hy = aesara.shared(
            (0.01 * self.rng.uniform(size=(10, 1))).astype(config.floatX),
            borrow=True)
        b_h = aesara.shared(np.zeros(10).astype(config.floatX), borrow=True)
        b_y = aesara.shared(np.zeros(1).astype(config.floatX), borrow=True)

        params = [W_xh, W_hh, W_hy, b_h, b_y]

        # recurrent function
        def step(x_t, h_tm1):
            h = tanh(dot(h_tm1, W_hh) + dot(x_t, W_xh) + b_h)
            return h

        # build recurrent graph
        if batch_size == 1:
            h_0 = aet.alloc(0.0, 10).astype(config.floatX)
        else:
            h_0 = aet.alloc(0.0, batch_size, 10).astype(config.floatX)
        h, updates = aesara.scan(step, sequences=[x], outputs_info=[h_0])
        # network output
        y = dot(h, W_hy) + b_y

        # Create Gauss-Newton-Matrix object. Not really of any use here, but I
        # need it for Hessian-Free optimization.
        gn = GaussNewtonMatrix(y)

        # compute MSE
        cost = ((t - y)**2).sum(axis=1).mean()

        # Compute the cost at some other point in the parameter
        # space. Not really of any use here, but this is how I do it
        # during certain iterations of CG in the HF algorithm. There,
        # it's in fact `pi + current update proposal`.  For simplicity,
        # I just multiply by 2 here.
        cost_ = aesara.clone_replace(cost,
                                     replace={pi: 2 * pi
                                              for pi in params})

        # Compute Gauss-Newton-Matrix times some vector `v` which is `p` in CG,
        # but for simplicity, I just take the parameters vector because it's
        # already there.
        Gv = gn(v=params, cost=cost, parameters=params, damp=aet.constant(1.0))

        # compile Aesara function
        f = aesara.function([], [cost_] + Gv,
                            givens={
                                x: inputs,
                                t: targets
                            },
                            mode=mode)
        # execute
        f()
Example #23
0
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor


def test_print_constant():
    c = aesara.tensor.constant(1, name="const")
    assert str(c) == "const{1}"
    d = aesara.tensor.constant(1)
    assert str(d) == "TensorConstant{1}"


@pytest.mark.parametrize(
    "x, indices, new_order",
    [
        (tensor3(), (np.newaxis, slice(None), np.newaxis),
         ("x", 0, "x", 1, 2)),
        (cscalar(), (np.newaxis, ), ("x", )),
        (matrix(), (np.newaxis, ), ("x", 0, 1)),
        (matrix(), (np.newaxis, np.newaxis), ("x", "x", 0, 1)),
        (matrix(), (np.newaxis, slice(None)), ("x", 0, 1)),
        (matrix(), (np.newaxis, slice(None), slice(None)), ("x", 0, 1)),
        (matrix(), (np.newaxis, np.newaxis, slice(None)), ("x", "x", 0, 1)),
        (matrix(), (slice(None), np.newaxis), (0, "x", 1)),
        (matrix(), (slice(None), slice(None), np.newaxis), (0, 1, "x")),
        (
            matrix(),
            (np.newaxis, slice(None), np.newaxis, slice(None), np.newaxis),
            ("x", 0, "x", 1, "x"),
        ),
    ],