Ejemplo n.º 1
0
    def test_doc(self):
        # Ensure the code given in pfunc.txt works as expected

        # Example #1.
        a = lscalar()
        b = shared(1)
        f1 = pfunc([a], (a + b))
        f2 = pfunc([In(a, value=44)], a + b, updates={b: b + 1})
        assert b.get_value() == 1
        assert f1(3) == 4
        assert f2(3) == 4
        assert b.get_value() == 2
        assert f1(3) == 5
        b.set_value(0)
        assert f1(3) == 3

        # Example #2.
        a = lscalar()
        b = shared(7)
        f1 = pfunc([a], a + b)
        f2 = pfunc([a], a * b)
        assert f1(5) == 12
        b.set_value(8)
        assert f1(5) == 13
        assert f2(4) == 32
Ejemplo n.º 2
0
def test_argsort():
    # Set up
    rng = np.random.default_rng(seed=utt.fetch_seed())
    m_val = rng.random((3, 2))
    v_val = rng.random((4))

    # Example 1
    a = dmatrix()
    w = argsort(a)
    f = aesara.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    utt.assert_allclose(gv, gt)

    # Example 2
    a = dmatrix()
    axis = lscalar()
    w = argsort(a, axis)
    f = aesara.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 3
    a = dvector()
    w2 = argsort(a)
    f = aesara.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    utt.assert_allclose(gv, gt)

    # Example 4
    a = dmatrix()
    axis = lscalar()
    l = argsort(a, axis, "mergesort")
    f = aesara.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 5
    a = dmatrix()
    axis = lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = dmatrix()
    w2 = argsort(a, None)
    f = aesara.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    utt.assert_allclose(gv, gt)
Ejemplo n.º 3
0
 def test_infer_shape(self):
     x = lscalar()
     self._compile_and_check([x], [self.op(x)],
                             [np.random.randint(3, 51, size=())],
                             self.op_class)
     self._compile_and_check([x], [self.op(x)], [0], self.op_class)
     self._compile_and_check([x], [self.op(x)], [1], self.op_class)
Ejemplo n.º 4
0
def test_shape_basic():
    s = shape([])
    assert s.type.broadcastable == (True, )

    s = shape([10])
    assert s.type.broadcastable == (True, )

    s = shape(lscalar())
    assert s.type.broadcastable == (False, )

    class MyType(Type):
        def filter(self, *args, **kwargs):
            raise NotImplementedError()

        def __eq__(self, other):
            return isinstance(other, MyType) and other.thingy == self.thingy

    s = shape(Variable(MyType()))
    assert s.type.broadcastable == (False, )

    s = shape(np.array(1))
    assert np.array_equal(eval_outputs([s]), [])

    s = shape(np.ones((5, 3)))
    assert np.array_equal(eval_outputs([s]), [5, 3])

    s = shape(np.ones(2))
    assert np.array_equal(eval_outputs([s]), [2])

    s = shape(np.ones((5, 3, 10)))
    assert np.array_equal(eval_outputs([s]), [5, 3, 10])
Ejemplo n.º 5
0
 def test_partial_shapes(self):
     x = matrix()
     s1 = lscalar()
     y = specify_shape(x, (s1, None))
     f = aesara.function([x, s1], y, mode=self.mode)
     assert f(np.zeros((2, 5), dtype=config.floatX), 2).shape == (2, 5)
     assert f(np.zeros((3, 5), dtype=config.floatX), 3).shape == (3, 5)
Ejemplo n.º 6
0
 def test_perform(self):
     x = lscalar()
     f = function([x], self.op(x))
     M = np.random.randint(3, 51, size=())
     assert np.allclose(f(M), np.bartlett(M))
     assert np.allclose(f(0), np.bartlett(0))
     assert np.allclose(f(-1), np.bartlett(-1))
     b = np.array([17], dtype="uint8")
     assert np.allclose(f(b[0]), np.bartlett(b[0]))
Ejemplo n.º 7
0
 def test_infer_shape(self):
     x = lscalar()
     self._compile_and_check(
         [x],
         [self.op(x)],
         [np.random.default_rng().integers(3, 51, size=())],
         self.op_class,
     )
     self._compile_and_check([x], [self.op(x)], [0], self.op_class)
     self._compile_and_check([x], [self.op(x)], [1], self.op_class)
Ejemplo n.º 8
0
 def test_correct_solution(self):
     x = lmatrix()
     y = lmatrix()
     z = lscalar()
     b = aesara.tensor.nlinalg.lstsq()(x, y, z)
     f = function([x, y, z], b)
     TestMatrix1 = np.asarray([[2, 1], [3, 4]])
     TestMatrix2 = np.asarray([[17, 20], [43, 50]])
     TestScalar = np.asarray(1)
     f = function([x, y, z], b)
     m = f(TestMatrix1, TestMatrix2, TestScalar)
     assert np.allclose(TestMatrix2, np.dot(TestMatrix1, m[0]))
Ejemplo n.º 9
0
 def make_node(self, x, y, rcond):
     x = as_tensor_variable(x)
     y = as_tensor_variable(y)
     rcond = as_tensor_variable(rcond)
     return Apply(
         self,
         [x, y, rcond],
         [
             matrix(),
             dvector(),
             lscalar(),
             dvector(),
         ],
     )
Ejemplo n.º 10
0
    def test_op(self):
        n = lscalar()
        f = aesara.function([self.p, n], multinomial(n, self.p))

        _n = 5
        tested = f(self._p, _n)
        assert tested.shape == self._p.shape
        assert np.allclose(np.floor(tested.todense()), tested.todense())
        assert tested[2, 1] == _n

        n = lvector()
        f = aesara.function([self.p, n], multinomial(n, self.p))

        _n = np.asarray([1, 2, 3, 4], dtype="int64")
        tested = f(self._p, _n)
        assert tested.shape == self._p.shape
        assert np.allclose(np.floor(tested.todense()), tested.todense())
        assert tested[2, 1] == _n[2]
Ejemplo n.º 11
0
    def test_downcast_dtype(self):
        # Test that the gradient of a cost wrt a float32 variable does not
        # get upcasted to float64.
        # x has dtype float32, regardless of the value of floatX
        x = fscalar("x")
        y = x * 2
        z = lscalar("z")

        c = y + z
        dc_dx, dc_dy, dc_dz, dc_dc = grad(c, [x, y, z, c])
        # The dtype of dc_dy and dc_dz can be either float32 or float64,
        # that might depend on floatX, but is not specified.
        assert dc_dc.dtype in ("float32", "float64")
        assert dc_dz.dtype in ("float32", "float64")
        assert dc_dy.dtype in ("float32", "float64")

        # When the output gradient of y is passed to op.grad, it should
        # be downcasted to float32, so dc_dx should also be float32
        assert dc_dx.dtype == "float32"
Ejemplo n.º 12
0
def test_Gpujoin_inplace():
    # Test Gpujoin to work inplace.
    #
    # This function tests the case when several elements are passed to the
    # Gpujoin function but all except one of them are empty. In this case
    # Gpujoin should work inplace and the output should be the view of the
    # non-empty element.
    s = lscalar()
    data = np.array([3, 4, 5], dtype=aesara.config.floatX)
    x = gpuarray_shared_constructor(data, borrow=True)
    z = aet.zeros((s, ))

    join = GpuJoin(view=0)
    c = join(0, x, z)

    f = aesara.function([s], aesara.Out(c, borrow=True))
    if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
        assert x.get_value(borrow=True, return_internal_type=True) is f(0)
    assert np.allclose(f(0), [3, 4, 5])
Ejemplo n.º 13
0
    def test_default_updates_expressions(self):
        x = shared(0)
        y = shared(1)
        a = lscalar("a")

        z = a * x
        x.default_update = x + y

        f1 = pfunc([a], z)
        f1(12)
        assert x.get_value() == 1

        f2 = pfunc([a], z, no_default_updates=True)
        assert f2(7) == 7
        assert x.get_value() == 1

        f3 = pfunc([a], z, no_default_updates=[x])
        assert f3(9) == 9
        assert x.get_value() == 1
Ejemplo n.º 14
0
    def check_updates(linker_name):
        x = lscalar("input")
        y = shared(np.asarray(1, "int64"), name="global")
        f = function(
            [x],
            [x, x + 34],
            updates=[(y, x + 1)],
            mode=Mode(optimizer=None, linker=linker_name),
        )
        g = function(
            [x],
            [x - 6],
            updates=[(y, y + 3)],
            mode=Mode(optimizer=None, linker=linker_name),
        )

        assert f(3, output_subset=[]) == []
        assert y.get_value() == 4
        assert g(30, output_subset=[0]) == [24]
        assert g(40, output_subset=[]) == []
        assert y.get_value() == 10
Ejemplo n.º 15
0
    def test_default_updates_input(self):
        x = shared(0)
        y = shared(1)
        if PYTHON_INT_BITWIDTH == 32:
            a = iscalar("a")
        else:
            a = lscalar("a")

        x.default_update = y
        y.default_update = y + a

        f1 = pfunc([], x, no_default_updates=True)
        f1()
        assert x.get_value() == 0
        assert y.get_value() == 1

        f2 = pfunc([], x, no_default_updates=[x])
        f2()
        assert x.get_value() == 0
        assert y.get_value() == 1

        f3 = pfunc([], x, no_default_updates=[y])
        f3()
        assert x.get_value() == 1
        assert y.get_value() == 1

        f4 = pfunc([a], x)
        f4(2)
        assert x.get_value() == 1
        assert y.get_value() == 3

        f5 = pfunc([], x, updates={y: (y - 1)})
        f5()
        assert x.get_value() == 3
        assert y.get_value() == 2

        # a is needed as input if y.default_update is used
        with pytest.raises(MissingInputError):
            pfunc([], x)
Ejemplo n.º 16
0
    def test_scalar_shapes(self):
        with pytest.raises(ValueError, match="will never match"):
            specify_shape(vector(), shape=())
        with pytest.raises(ValueError, match="will never match"):
            specify_shape(matrix(), shape=[])

        x = scalar()
        y = specify_shape(x, shape=())
        f = aesara.function([x], y, mode=self.mode)
        assert f(15) == 15

        x = vector()
        s = lscalar()
        y = specify_shape(x, shape=s)
        f = aesara.function([x, s], y, mode=self.mode)
        assert f([15], 1) == [15]

        x = vector()
        s = as_tensor_variable(1, dtype=np.int64)
        y = specify_shape(x, shape=s)
        f = aesara.function([x], y, mode=self.mode)
        assert f([15]) == [15]
Ejemplo n.º 17
0
 def test_duplicate_inputs(self):
     x = lscalar("x")
     with pytest.raises(UnusedInputError):
         function([x, x, x], x)
Ejemplo n.º 18
0
 def make_node(self, *args):
     # HERE `args` must be AESARA VARIABLES
     return Apply(op=self, inputs=args, outputs=[lscalar()])
Ejemplo n.º 19
0
def test_jax_scan_multiple_output():
    """Test a scan implementation of a SEIR model.

    SEIR model definition:
    S[t+1] = S[t] - B[t]
    E[t+1] = E[t] +B[t] - C[t]
    I[t+1] = I[t+1] + C[t] - D[t]

    B[t] ~ Binom(S[t], beta)
    C[t] ~ Binom(E[t], gamma)
    D[t] ~ Binom(I[t], delta)
    """
    def binomln(n, k):
        return gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1)

    def binom_log_prob(n, p, value):
        return binomln(n, value) + value * log(p) + (n - value) * log(1 - p)

    # sequences
    aet_C = ivector("C_t")
    aet_D = ivector("D_t")
    # outputs_info (initial conditions)
    st0 = lscalar("s_t0")
    et0 = lscalar("e_t0")
    it0 = lscalar("i_t0")
    logp_c = scalar("logp_c")
    logp_d = scalar("logp_d")
    # non_sequences
    beta = scalar("beta")
    gamma = scalar("gamma")
    delta = scalar("delta")

    # TODO: Use random streams when their JAX conversions are implemented.
    # trng = aesara.tensor.random.RandomStream(1234)

    def seir_one_step(ct0, dt0, st0, et0, it0, logp_c, logp_d, beta, gamma,
                      delta):
        # bt0 = trng.binomial(n=st0, p=beta)
        bt0 = st0 * beta
        bt0 = bt0.astype(st0.dtype)

        logp_c1 = binom_log_prob(et0, gamma, ct0).astype(logp_c.dtype)
        logp_d1 = binom_log_prob(it0, delta, dt0).astype(logp_d.dtype)

        st1 = st0 - bt0
        et1 = et0 + bt0 - ct0
        it1 = it0 + ct0 - dt0
        return st1, et1, it1, logp_c1, logp_d1

    (st, et, it, logp_c_all, logp_d_all), _ = scan(
        fn=seir_one_step,
        sequences=[aet_C, aet_D],
        outputs_info=[st0, et0, it0, logp_c, logp_d],
        non_sequences=[beta, gamma, delta],
    )
    st.name = "S_t"
    et.name = "E_t"
    it.name = "I_t"
    logp_c_all.name = "C_t_logp"
    logp_d_all.name = "D_t_logp"

    out_fg = FunctionGraph(
        [aet_C, aet_D, st0, et0, it0, logp_c, logp_d, beta, gamma, delta],
        [st, et, it, logp_c_all, logp_d_all],
    )

    s0, e0, i0 = 100, 50, 25
    logp_c0 = np.array(0.0, dtype=config.floatX)
    logp_d0 = np.array(0.0, dtype=config.floatX)
    beta_val, gamma_val, delta_val = [
        np.array(val, dtype=config.floatX)
        for val in [0.277792, 0.135330, 0.108753]
    ]
    C = np.array([3, 5, 8, 13, 21, 26, 10, 3], dtype=np.int32)
    D = np.array([1, 2, 3, 7, 9, 11, 5, 1], dtype=np.int32)

    test_input_vals = [
        C,
        D,
        s0,
        e0,
        i0,
        logp_c0,
        logp_d0,
        beta_val,
        gamma_val,
        delta_val,
    ]
    compare_jax_and_py(out_fg, test_input_vals)
Ejemplo n.º 20
0
 def test_duplicate_inputs(self):
     x = lscalar("x")
     with pytest.raises(aesara.compile.UnusedInputError):
         aesara.function([x, x, x], x)
Ejemplo n.º 21
0
def test_mlp():
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                         http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


    """
    datasets = gen_data()

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    batch_size = 100  # size of the minibatch

    # compute number of minibatches for training, validation and testing
    # n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    # n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    # n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    # print '... building the model'

    # allocate symbolic variables for the data
    index = lscalar()  # index to a [mini]batch
    x = matrix("x")  # the data is presented as rasterized images
    y = ivector("y")  # the labels are presented as 1D vector of
    # [int] labels

    rng = np.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=500, n_out=10)

    # the cost we minimize during training is the negative log likelihood of
    # the model.
    # We take the mean of the cost over each minibatch.
    cost = classifier.negative_log_likelihood(y).mean()

    # compute the gradient of cost with respect to theta (stored in params)
    # the resulting gradients will be stored in a list gparams
    gparams = []
    for param in classifier.params:
        gparam = grad(cost, param)
        gparams.append(gparam)

    # Some optimizations needed are tagged with 'fast_run'
    # TODO: refine that and include only those
    mode = aesara.compile.get_default_mode().including("fast_run")

    updates2 = OrderedDict()

    updates2[classifier.hiddenLayer.params[0]] = grad(
        cost, classifier.hiddenLayer.params[0])
    train_model = aesara.function(
        inputs=[index],
        updates=updates2,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
        },
        mode=mode,
    )
    # print 'MODEL 1'
    # aesara.printing.debugprint(train_model, print_type=True)
    assert any([
        isinstance(i.op, CrossentropySoftmax1HotWithBiasDx)
        for i in train_model.maker.fgraph.toposort()
    ])

    # Even without FeatureShape
    train_model = aesara.function(
        inputs=[index],
        updates=updates2,
        mode=mode.excluding("ShapeOpt"),
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
        },
    )
    # print
    # print 'MODEL 2'
    # aesara.printing.debugprint(train_model, print_type=True)
    assert any([
        isinstance(i.op, CrossentropySoftmax1HotWithBiasDx)
        for i in train_model.maker.fgraph.toposort()
    ])