示例#1
0
def multMatVect(v, A, m1, B, m2):
    # TODO : need description for parameter and return
    """
    Multiply the first half of v by A with a modulo of m1 and the second half
    by B with a modulo of m2.

    Notes
    -----
    The parameters of dot_modulo are passed implicitly because passing them
    explicitly takes more time than running the function's C-code.

    """
    if multMatVect.dot_modulo is None:
        A_sym = tensor.lmatrix("A")
        s_sym = tensor.ivector("s")
        m_sym = tensor.iscalar("m")
        A2_sym = tensor.lmatrix("A2")
        s2_sym = tensor.ivector("s2")
        m2_sym = tensor.iscalar("m2")
        o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
        multMatVect.dot_modulo = function(
            [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)

    # This way of calling the Aesara fct is done to bypass Aesara overhead.
    f = multMatVect.dot_modulo
    f.input_storage[0].storage[0] = A
    f.input_storage[1].storage[0] = v[:3]
    f.input_storage[2].storage[0] = m1
    f.input_storage[3].storage[0] = B
    f.input_storage[4].storage[0] = v[3:]
    f.input_storage[5].storage[0] = m2
    f.fn()
    r = f.output_storage[0].storage[0]

    return r
def test_multMatVect():
    A1 = tensor.lmatrix("A1")
    s1 = tensor.ivector("s1")
    m1 = tensor.iscalar("m1")
    A2 = tensor.lmatrix("A2")
    s2 = tensor.ivector("s2")
    m2 = tensor.iscalar("m2")

    g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
    f0 = aesara.function([A1, s1, m1, A2, s2, m2], g0)

    i32max = np.iinfo(np.int32).max

    A1 = np.random.randint(0, i32max, (3, 3)).astype("int64")
    s1 = np.random.randint(0, i32max, 3).astype("int32")
    m1 = np.asarray(np.random.randint(i32max), dtype="int32")
    A2 = np.random.randint(0, i32max, (3, 3)).astype("int64")
    s2 = np.random.randint(0, i32max, 3).astype("int32")
    m2 = np.asarray(np.random.randint(i32max), dtype="int32")

    f0.input_storage[0].storage[0] = A1
    f0.input_storage[1].storage[0] = s1
    f0.input_storage[2].storage[0] = m1
    f0.input_storage[3].storage[0] = A2
    f0.input_storage[4].storage[0] = s2
    f0.input_storage[5].storage[0] = m2

    r_a1 = rng_mrg.matVecModM(A1, s1, m1)
    r_a2 = rng_mrg.matVecModM(A2, s2, m2)
    f0.fn()
    r_b = f0.output_storage[0].value

    assert np.allclose(r_a1, r_b[:3])
    assert np.allclose(r_a2, r_b[3:])
示例#3
0
 def test_correct_solution(self):
     x = tensor.lmatrix()
     y = tensor.lmatrix()
     z = tensor.lscalar()
     b = aesara.tensor.nlinalg.lstsq()(x, y, z)
     f = function([x, y, z], b)
     TestMatrix1 = np.asarray([[2, 1], [3, 4]])
     TestMatrix2 = np.asarray([[17, 20], [43, 50]])
     TestScalar = np.asarray(1)
     f = function([x, y, z], b)
     m = f(TestMatrix1, TestMatrix2, TestScalar)
     assert np.allclose(TestMatrix2, np.dot(TestMatrix1, m[0]))
示例#4
0
    def test_dot(self):
        x = at.lmatrix("x")
        y = at.lmatrix("y")
        x = sparse.csr_from_dense(x)
        y = sparse.csr_from_dense(y)

        z = x.__dot__(y)
        assert isinstance(z.type, SparseTensorType)

        f = aesara.function([x, y], z)
        exp_res = f(
            [[1, 0, 2], [-1, 0, 0]],
            [[-1], [2], [1]],
        )
        assert isinstance(exp_res, csr_matrix)
示例#5
0
    def test_blocksparse_grad_merge(self):
        b = tensor.fmatrix()
        h = tensor.ftensor3()
        iIdx = tensor.lmatrix()
        oIdx = tensor.lmatrix()

        W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()
        W = gpuarray_shared_constructor(W_val, context=test_ctx_name)

        o = gpu_sparse_block_gemv(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
        gW = aesara.grad(o.sum(), W)

        lr = np.asarray(0.05, dtype="float32")

        upd = W - lr * gW

        f1 = aesara.function([h, iIdx, b, oIdx], updates=[(W, upd)], mode=mode_with_gpu)

        # Make sure the lr update was merged.
        assert isinstance(f1.maker.fgraph.outputs[0].owner.op, GpuSparseBlockOuter)

        # Exclude the merge optimizations.
        mode = mode_with_gpu.excluding("local_merge_blocksparse_alpha")
        mode = mode.excluding("local_merge_blocksparse_output")

        f2 = aesara.function([h, iIdx, b, oIdx], updates=[(W, upd)], mode=mode)

        # Make sure the lr update is not merged.
        assert not isinstance(f2.maker.fgraph.outputs[0].owner.op, GpuSparseBlockOuter)

        f2(h_val, iIdx_val, b_val, oIdx_val)
        W_ref = W.get_value()

        # reset the var
        W.set_value(W_val)
        f1(h_val, iIdx_val, b_val, oIdx_val)
        W_opt = W.get_value()

        utt.assert_allclose(W_ref, W_opt)
示例#6
0
    def test_binary(self, method, exp_type):
        x = at.lmatrix("x")
        y = at.lmatrix("y")
        x = sparse.csr_from_dense(x)
        y = sparse.csr_from_dense(y)

        method_to_call = getattr(x, method)

        if exp_type == SparseTensorType:
            exp_res_type = csr_matrix
            cm = ExitStack()
        else:
            exp_res_type = np.ndarray
            cm = pytest.warns(UserWarning, match=".*converted to dense.*")

        with cm:
            z = method_to_call(y)

        if not isinstance(z, tuple):
            z_outs = (z, )
        else:
            z_outs = z

        assert all(isinstance(out.type, exp_type) for out in z_outs)

        f = aesara.function([x, y], z)
        res = f(
            [[1, 0, 2], [-1, 0, 0]],
            [[1, 1, 2], [1, 4, 1]],
        )

        if not isinstance(res, list):
            res_outs = [res]
        else:
            res_outs = res

        assert all(isinstance(out, exp_res_type) for out in res_outs)