def test_uniform_vector(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dvector()
        high = tensor.dvector()
        out = random.uniform(low=low, high=high)
        assert out.ndim == 1
        f = function([low, high], out)

        low_val = [0.1, 0.2, 0.3]
        high_val = [1.1, 2.2, 3.3]
        seed_gen = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(low_val, high_val)
        numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val)
        assert np.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(low_val[:-1], high_val[:-1])
        numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1])
        assert np.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([low, high], random.uniform(low=low,
                                                 high=high,
                                                 size=(3, )))
        val2 = g(low_val, high_val)
        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy_rng.uniform(low=low_val, high=high_val, size=(3, ))
        assert np.all(val2 == numpy_val2)
        with pytest.raises(ValueError):
            g(low_val[:-1], high_val[:-1])
    def test_normal_vector(self):
        random = RandomStreams(utt.fetch_seed())
        avg = tensor.dvector()
        std = tensor.dvector()
        out = random.normal(avg=avg, std=std)
        assert out.ndim == 1
        f = function([avg, std], out)

        avg_val = [1, 2, 3]
        std_val = [0.1, 0.2, 0.3]
        seed_gen = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(avg_val, std_val)
        numpy_val0 = numpy_rng.normal(loc=avg_val, scale=std_val)
        assert np.allclose(val0, numpy_val0)

        # arguments of size (2,)
        val1 = f(avg_val[:-1], std_val[:-1])
        numpy_val1 = numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1])
        assert np.allclose(val1, numpy_val1)

        # Specifying the size explicitly
        g = function([avg, std], random.normal(avg=avg, std=std, size=(3, )))
        val2 = g(avg_val, std_val)
        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy_rng.normal(loc=avg_val, scale=std_val, size=(3, ))
        assert np.allclose(val2, numpy_val2)
        with pytest.raises(ValueError):
            g(avg_val[:-1], std_val[:-1])
 def test3(self):
     a = tensor.dvector()
     w2 = sort(a)
     f = aesara.function([a], w2)
     gv = f(self.v_val)
     gt = np.sort(self.v_val)
     utt.assert_allclose(gv, gt)
Exemple #4
0
    def test_fail(self):
        # Test that conv2d fails for dimensions other than 2 or 3.

        with pytest.raises(Exception):
            conv.conv2d(tt.dtensor4(), tt.dtensor3())
        with pytest.raises(Exception):
            conv.conv2d(tt.dtensor3(), tt.dvector())
def test_argsort():
    # Set up
    rng = np.random.RandomState(seed=utt.fetch_seed())
    m_val = rng.rand(3, 2)
    v_val = rng.rand(4)

    # Example 1
    a = tensor.dmatrix()
    w = argsort(a)
    f = aesara.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    utt.assert_allclose(gv, gt)

    # Example 2
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    w = argsort(a, axis)
    f = aesara.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 3
    a = tensor.dvector()
    w2 = argsort(a)
    f = aesara.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    utt.assert_allclose(gv, gt)

    # Example 4
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    l = argsort(a, axis, "mergesort")
    f = aesara.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 5
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = tensor.dmatrix()
    w2 = argsort(a, None)
    f = aesara.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    utt.assert_allclose(gv, gt)
Exemple #6
0
def test_stickbreaking_accuracy():
    val = np.array([-30])
    x = at.dvector("x")
    x.tag.test_value = val
    identity_f = aesara.function([x],
                                 tr.stick_breaking.forward(
                                     tr.stick_breaking.backward(x)))
    close_to(val, identity_f(val), tol)
Exemple #7
0
def test_simplex_accuracy():
    val = np.array([-30])
    x = at.dvector("x")
    x.tag.test_value = val
    identity_f = aesara.function([x],
                                 tr.simplex.forward(x,
                                                    tr.simplex.backward(x, x)))
    close_to(val, identity_f(val), tol)
Exemple #8
0
    def test_optimizations_preserved(self):
        a = tt.dvector()  # the a is for 'anonymous' (un-named).
        x = tt.dvector("x")
        s = tt.dvector("s")
        xm = tt.dmatrix("x")
        sm = tt.dmatrix("s")

        f = function(
            [a, x, s, xm, sm],
            ((a.T.T) * (tt.dot(xm, (sm.T.T.T)) + x).T * (x / x) + s),
        )
        old_default_mode = config.mode
        old_default_opt = config.optimizer
        old_default_link = config.linker
        try:
            try:
                str_f = pickle.dumps(f, protocol=-1)
                config.mode = "Mode"
                config.linker = "py"
                config.optimizer = "None"
                g = pickle.loads(str_f)
                # print g.maker.mode
                # print compile.mode.default_mode
            except NotImplementedError as e:
                if e[0].startswith("DebugMode is not pickl"):
                    g = "ok"
        finally:
            config.mode = old_default_mode
            config.optimizer = old_default_opt
            config.linker = old_default_link

        if g == "ok":
            return

        assert f.maker is not g.maker
        assert f.maker.fgraph is not g.maker.fgraph
        tf = f.maker.fgraph.toposort()
        tg = f.maker.fgraph.toposort()
        assert len(tf) == len(tg)
        for nf, ng in zip(tf, tg):
            assert nf.op == ng.op
            assert len(nf.inputs) == len(ng.inputs)
            assert len(nf.outputs) == len(ng.outputs)
            assert [i.type for i in nf.inputs] == [i.type for i in ng.inputs]
            assert [i.type for i in nf.outputs] == [i.type for i in ng.outputs]
        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
            inp = np.random.rand(1000000)
            for i in range(500):
                f_a(inp)
Exemple #10
0
    def __init__(
        self,
        input=None,
        target=None,
        n_input=1,
        n_hidden=1,
        n_output=1,
        lr=1e-3,
        **kw,
    ):
        super().__init__(**kw)

        if input is None:
            input = tensor.dvector("input")
        if target is None:
            target = tensor.dvector("target")

        self.input = input
        self.target = target
        self.lr = shared(lr, "learning_rate")
        self.w1 = shared(np.zeros((n_hidden, n_input)), "w1")
        self.w2 = shared(np.zeros((n_output, n_hidden)), "w2")
        # print self.lr.type

        self.hidden = sigmoid(tensor.dot(self.w1, self.input))
        self.output = tensor.dot(self.w2, self.hidden)
        self.cost = tensor.sum((self.output - self.target) ** 2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2),
        }

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates,
        )

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
Exemple #11
0
    def test_on_real_input(self):
        x = dvector()
        rng = np.random.RandomState(23)
        xval = rng.randn(10)
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))

        x = imatrix()
        xval = np.asarray(rng.randn(3, 3) * 100, dtype="int32")
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))
    def test_vector_arguments(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dvector()
        out = random.uniform(low=low, high=1)
        assert out.ndim == 1
        f = function([low], out)

        seed_gen = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))
        val0 = f([-5, 0.5, 0, 1])
        val1 = f([0.9])
        numpy_val0 = numpy_rng.uniform(low=[-5, 0.5, 0, 1], high=1)
        numpy_val1 = numpy_rng.uniform(low=[0.9], high=1)
        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)

        high = tensor.vector()
        outb = random.uniform(low=low, high=high)
        assert outb.ndim == 1
        fb = function([low, high], outb)

        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))
        val0b = fb([-4.0, -2], [-1, 0])
        val1b = fb([-4.0], [-1])
        numpy_val0b = numpy_rng.uniform(low=[-4.0, -2], high=[-1, 0])
        numpy_val1b = numpy_rng.uniform(low=[-4.0], high=[-1])
        assert np.all(val0b == numpy_val0b)
        assert np.all(val1b == numpy_val1b)
        with pytest.raises(ValueError):
            fb([-4.0, -2], [-1, 0, 1])
        # TODO: do we want that?
        # with pytest.raises(ValueError):
        #    fb([-4., -2], [-1])

        size = tensor.lvector()
        outc = random.uniform(low=low, high=high, size=size, ndim=1)
        fc = function([low, high, size], outc)

        numpy_rng = np.random.RandomState(int(seed_gen.randint(2**30)))
        val0c = fc([-4.0, -2], [-1, 0], [2])
        val1c = fc([-4.0], [-1], [1])
        numpy_val0c = numpy_rng.uniform(low=[-4.0, -2], high=[-1, 0])
        numpy_val1c = numpy_rng.uniform(low=[-4.0], high=[-1])
        assert np.all(val0c == numpy_val0c)
        assert np.all(val1c == numpy_val1c)

        with pytest.raises(ValueError):
            fc([-4.0, -2], [-1, 0], [1, 2])
        with pytest.raises(ValueError):
            fc([-4.0, -2], [-1, 0], [2, 1])
        with pytest.raises(ValueError):
            fc([-4.0, -2], [-1, 0], [1])
        with pytest.raises(ValueError):
            fc([-4.0, -2], [-1], [1])
def test_scan_debugprint1():
    k = tensor.iscalar("k")
    A = tensor.dvector("A")

    # Symbolic description of the result
    result, updates = aesara.scan(
        fn=lambda prior_result, A: prior_result * A,
        outputs_info=tensor.ones_like(A),
        non_sequences=A,
        n_steps=k,
    )

    final_result = result[-1]
    output_str = aesara.printing.debugprint(final_result, file="str")
    lines = output_str.split("\n")

    expected_output = """Subtensor{int64} [id A] ''
     |Subtensor{int64::} [id B] ''
     | |for{cpu,scan_fn} [id C] ''
     | | |k [id D]
     | | |IncSubtensor{Set;:int64:} [id E] ''
     | | | |AllocEmpty{dtype='float64'} [id F] ''
     | | | | |Elemwise{add,no_inplace} [id G] ''
     | | | | | |k [id D]
     | | | | | |Subtensor{int64} [id H] ''
     | | | | |   |Shape [id I] ''
     | | | | |   | |Rebroadcast{0} [id J] ''
     | | | | |   |   |InplaceDimShuffle{x,0} [id K] ''
     | | | | |   |     |Elemwise{second,no_inplace} [id L] ''
     | | | | |   |       |A [id M]
     | | | | |   |       |InplaceDimShuffle{x} [id N] ''
     | | | | |   |         |TensorConstant{1.0} [id O]
     | | | | |   |Constant{0} [id P]
     | | | | |Subtensor{int64} [id Q] ''
     | | | |   |Shape [id R] ''
     | | | |   | |Rebroadcast{0} [id J] ''
     | | | |   |Constant{1} [id S]
     | | | |Rebroadcast{0} [id J] ''
     | | | |ScalarFromTensor [id T] ''
     | | |   |Subtensor{int64} [id H] ''
     | | |A [id M]
     | |Constant{1} [id U]
     |Constant{-1} [id V]

    Inner graphs of the scan ops:

    for{cpu,scan_fn} [id C] ''
     >Elemwise{mul,no_inplace} [id W] ''
     > |<TensorType(float64, vector)> [id X] -> [id E]
     > |A_copy [id Y] -> [id M]"""

    for truth, out in zip(expected_output.split("\n"), lines):
        assert truth.strip() == out.strip()
    def test_shuffle_row_elements(self):
        # Test that RandomStreams.shuffle_row_elements generates the right results
        # Check over two calls to see if the random state is correctly updated.

        # On matrices, for each row, the elements of that row should be shuffled.
        # Note that this differs from np.random.shuffle, where all the elements
        # of the matrix are shuffled.
        random = RandomStreams(utt.fetch_seed())
        m_input = tensor.dmatrix()
        f = function([m_input],
                     random.shuffle_row_elements(m_input),
                     updates=random.updates())

        # Generate the elements to be shuffled
        val_rng = np.random.RandomState(utt.fetch_seed() + 42)
        in_mval = val_rng.uniform(-2, 2, size=(20, 5))
        fn_mval0 = f(in_mval)
        fn_mval1 = f(in_mval)
        assert not np.all(in_mval == fn_mval0)
        assert not np.all(in_mval == fn_mval1)
        assert not np.all(fn_mval0 == fn_mval1)

        rng_seed = np.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = np.random.RandomState(int(rng_seed))
        numpy_mval0 = in_mval.copy()
        numpy_mval1 = in_mval.copy()
        for row in numpy_mval0:
            rng.shuffle(row)
        for row in numpy_mval1:
            rng.shuffle(row)

        assert np.all(numpy_mval0 == fn_mval0)
        assert np.all(numpy_mval1 == fn_mval1)

        # On vectors, the behaviour is the same as np.random.shuffle,
        # except that it does not work in place, but returns a shuffled vector.
        random1 = RandomStreams(utt.fetch_seed())
        v_input = tensor.dvector()
        f1 = function([v_input], random1.shuffle_row_elements(v_input))

        in_vval = val_rng.uniform(-3, 3, size=(12, ))
        fn_vval = f1(in_vval)
        numpy_vval = in_vval.copy()
        vrng = np.random.RandomState(int(rng_seed))
        vrng.shuffle(numpy_vval)
        assert np.all(numpy_vval == fn_vval)

        # Trying to shuffle a vector with function that should shuffle
        # matrices, or vice versa, raises a TypeError
        with pytest.raises(TypeError):
            f1(in_mval)
        with pytest.raises(TypeError):
            f(in_vval)
Exemple #15
0
def test_broadcast_arrays():
    x, y = aet.dvector(), aet.dmatrix()
    x_bcast, y_bcast = broadcast_arrays(x, y)

    py_mode = Mode("py", None)
    bcast_fn = function([x, y], [x_bcast, y_bcast], mode=py_mode)

    x_val = np.array([1.0], dtype=np.float64)
    y_val = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
    x_bcast_val, y_bcast_val = bcast_fn(x_val, y_val)
    x_bcast_exp, y_bcast_exp = np.broadcast_arrays(x_val, y_val)

    assert np.array_equal(x_bcast_val, x_bcast_exp)
    assert np.array_equal(y_bcast_val, y_bcast_exp)
def test_pydotprint_long_name():
    # This is a REALLY PARTIAL TEST.
    # It prints a graph where there are variable and apply nodes whose long
    # names are different, but not the shortened names.
    # We should not merge those nodes in the dot graph.
    x = tensor.dvector()
    mode = aesara.compile.mode.get_default_mode().excluding("fusion")
    f = aesara.function([x], [x * 2, x + x], mode=mode)
    f([1, 2, 3, 4])

    aesara.printing.pydotprint(f, max_label_size=5, print_output_file=False)
    aesara.printing.pydotprint([x * 2, x + x],
                               max_label_size=5,
                               print_output_file=False)
    def test_wrong_input(self):
        # Make sure errors are raised when image and kernel are not 4D tensors

        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          input=tt.dmatrix())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          filters=tt.dvector())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                          "valid",
                          input=tt.dtensor3())
Exemple #18
0
    def test_2arg(self):
        x = dmatrix("x")
        x.tag.test_value = np.zeros((2, 2))
        y = dvector("y")
        y.tag.test_value = [0, 0, 0, 0]

        @as_op([dmatrix, dvector], dvector)
        def cumprod_plus(x, y):
            return np.cumprod(x) + y

        fn = function([x, y], cumprod_plus(x, y))
        r = fn([[1.5, 5], [2, 2]], [1, 100, 2, 200])
        r0 = np.array([2.5, 107.5, 17.0, 230.0])

        assert np.allclose(r, r0), (r, r0)
def test_multinomial_dtypes():
    p = tensor.dmatrix()
    u = tensor.dvector()
    m = multinomial.MultinomialFromUniform("auto")(p, u)
    assert m.dtype == "float64", m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform("auto")(p, u)
    assert m.dtype == "float32", m.dtype

    p = tensor.fmatrix()
    u = tensor.fvector()
    m = multinomial.MultinomialFromUniform("float64")(p, u)
    assert m.dtype == "float64", m.dtype
 def test_multiple_inplace(self):
     skip_if_blas_ldflags_empty()
     x = tensor.dmatrix("x")
     y = tensor.dvector("y")
     z = tensor.dvector("z")
     f = aesara.function(
         [x, y, z], [tensor.dot(y, x), tensor.dot(z, x)], mode=mode_blas_opt
     )
     vx = np.random.rand(3, 3)
     vy = np.random.rand(3)
     vz = np.random.rand(3)
     out = f(vx, vy, vz)
     assert np.allclose(out[0], np.dot(vy, vx))
     assert np.allclose(out[1], np.dot(vz, vx))
     assert (
         len(
             [
                 n
                 for n in f.maker.fgraph.apply_nodes
                 if isinstance(n.op, tensor.AllocEmpty)
             ]
         )
         == 2
     )
Exemple #21
0
def test_functions():
    xvals = list(map(np.atleast_1d, [0.01, 0.1, 2, 100, 10000]))

    x = aet.dvector("x")
    x.tag.test_value = xvals[0]

    p = aet.iscalar("p")
    p.tag.test_value = 1

    gammaln = function([x], ps.gammaln(x))
    psi = function([x], ps.psi(x))
    function([x, p], ps.multigammaln(x, p))
    for x in xvals:
        check_vals(gammaln, ss.gammaln, x)
    for x in xvals[1:]:
        check_vals(psi, ss.psi, x)
Exemple #22
0
def t_multigamma():
    xvals = list(map(np.atleast_1d, [0, 0.1, 2, 100]))

    x = aet.dvector("x")
    x.tag.test_value = xvals[0]

    p = aet.iscalar("p")
    p.tag.test_value = 1

    multigammaln = function([x, p], ps.multigammaln(x, p))

    def ssmultigammaln(a, b):
        return ss.multigammaln(a[0], b)

    for p in [0, 1, 2, 3, 4, 100]:
        for x in xvals:
            check_vals(multigammaln, ssmultigammaln, x, p)
Exemple #23
0
    def test_param_mutable(self):
        a = tensor.dvector()
        a_out = a * 2  # assuming the op which makes this "in place" triggers

        # using mutable=True will let fip change the value in aval
        fip = pfunc([In(a, mutable=True)], [a_out], mode="FAST_RUN")
        aval = np.random.rand(10)
        aval2 = aval.copy()
        assert np.all(fip(aval) == (aval2 * 2))
        assert not np.all(aval == aval2)

        # using mutable=False should leave the input untouched
        f = pfunc([In(a, mutable=False)], [a_out], mode="FAST_RUN")
        aval = np.random.rand(10)
        aval2 = aval.copy()
        assert np.all(f(aval) == (aval2 * 2))
        assert np.all(aval == aval2)
Exemple #24
0
    def test_param_strict(self):

        a = tensor.dvector()
        b = shared(7)
        out = a + b

        f = pfunc([In(a, strict=False)], [out])
        # works, rand generates float64 by default
        f(np.random.rand(8))
        # works, casting is allowed
        f(np.array([1, 2, 3, 4], dtype="int32"))

        f = pfunc([In(a, strict=True)], [out])
        try:
            # fails, f expects float64
            f(np.array([1, 2, 3, 4], dtype="int32"))
        except TypeError:
            pass
Exemple #25
0
    def test_infer_shape(self):
        x = dmatrix("x")
        x.tag.test_value = np.zeros((2, 2))
        y = dvector("y")
        y.tag.test_value = [0, 0, 0, 0]

        def infer_shape(node, shapes):
            x, y = shapes
            return [y]

        @as_op([dmatrix, dvector], dvector, infer_shape)
        def cumprod_plus(x, y):
            return np.cumprod(x) + y

        self._compile_and_check(
            [x, y],
            [cumprod_plus(x, y)],
            [[[1.5, 5], [2, 2]], [1, 100, 2, 200]],
            cumprod_plus.__class__,
            warn=False,
        )
    def test_broadcast_arguments(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dvector()
        high = tensor.dcol()
        out = random.uniform(low=low, high=high)
        assert out.ndim == 2
        f = function([low, high], out)

        rng_seed = np.random.RandomState(utt.fetch_seed()).randint(2**30)
        numpy_rng = np.random.RandomState(int(rng_seed))
        val0 = f([-5, 0.5, 0, 1], [[1.0]])
        val1 = f([0.9], [[1.0], [1.1], [1.5]])
        val2 = f([-5, 0.5, 0, 1], [[1.0], [1.1], [1.5]])

        numpy_val0 = numpy_rng.uniform(low=[-5, 0.5, 0, 1], high=[1.0])
        numpy_val1 = numpy_rng.uniform(low=[0.9], high=[[1.0], [1.1], [1.5]])
        numpy_val2 = numpy_rng.uniform(low=[-5, 0.5, 0, 1],
                                       high=[[1.0], [1.1], [1.5]])

        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)
        assert np.all(val2 == numpy_val2)
Exemple #27
0
def test_merge_opt_runtime():
    # In the original merge optimization, the following graph took
    # like caused the MERGE optimizer to exhibit really bad performance
    # (quadratic? exponential?)
    #
    # Ironically, there is actually no merging to do in this graph.

    x = tt.dvector()
    r = x
    for i in range(50):
        r = r + r / 10

    t = time.time()
    aesara.function([x], r, mode="FAST_COMPILE")
    # FAST_RUN does in-place optimizer which requires a lot of
    # toposorting, which is actually pretty slow at the moment.  This
    # test was designed to test MergeOptimizer... so I'm leaving
    # toposort optimizations for a later date.
    dt = time.time() - t

    # it should never take longer than 5 seconds to compile this graph
    assert dt < 5.0, dt
Exemple #28
0
    def test_broadcast_arguments(self):
        rng_R = random_state_type()
        low = tensor.dvector()
        high = tensor.dcol()
        post_r, out = uniform(rng_R, low=low, high=high)
        assert out.ndim == 2
        f = compile.function([rng_R, low, high], [post_r, out],
                             accept_inplace=True)

        rng_state0 = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(utt.fetch_seed())
        post0, val0 = f(rng_state0, [-5, 0.5, 0, 1], [[1.0]])
        post1, val1 = f(post0, [0.9], [[1.0], [1.1], [1.5]])
        post2, val2 = f(post1, [-5, 0.5, 0, 1], [[1.0], [1.1], [1.5]])

        numpy_val0 = numpy_rng.uniform(low=[-5, 0.5, 0, 1], high=[1.0])
        numpy_val1 = numpy_rng.uniform(low=[0.9], high=[[1.0], [1.1], [1.5]])
        numpy_val2 = numpy_rng.uniform(low=[-5, 0.5, 0, 1],
                                       high=[[1.0], [1.1], [1.5]])

        assert np.all(val0 == numpy_val0), (val0, numpy_val0)
        assert np.all(val1 == numpy_val1)
        assert np.all(val2 == numpy_val2)
def test_pydotprint_cond_highlight():
    # This is a REALLY PARTIAL TEST.
    # I did them to help debug stuff.
    x = tensor.dvector()
    f = aesara.function([x], x * 2)
    f([1, 2, 3, 4])

    s = StringIO()
    new_handler = logging.StreamHandler(s)
    new_handler.setLevel(logging.DEBUG)
    orig_handler = aesara.logging_default_handler

    aesara.aesara_logger.removeHandler(orig_handler)
    aesara.aesara_logger.addHandler(new_handler)
    try:
        aesara.printing.pydotprint(f,
                                   cond_highlight=True,
                                   print_output_file=False)
    finally:
        aesara.aesara_logger.addHandler(orig_handler)
        aesara.aesara_logger.removeHandler(new_handler)

    assert (s.getvalue() == "pydotprint: cond_highlight is set but there"
            " is no IfElse node in the graph\n")
Exemple #30
0
    def test_convolution(self):
        #        print '\n\n*************************************************'
        #        print '           TEST CONVOLUTION'
        #        print '*************************************************'

        # fixed parameters
        bsize = 10  # batch size
        imshp = (28, 28)
        kshp = (5, 5)
        nkern = 5
        ssizes = ((1, 1), (2, 2), (3, 3), (4, 4))
        convmodes = ("full", "valid")

        # symbolic stuff
        bias = tensor.dvector()
        kerns = tensor.dmatrix()
        input = tensor.dmatrix()
        rng = np.random.RandomState(3423489)
        filters = rng.randn(nkern, np.prod(kshp))
        biasvals = rng.randn(nkern)

        for mode in ("FAST_COMPILE", "FAST_RUN"):
            ttot, ntot = 0, 0
            for conv_mode in convmodes:
                for ss in ssizes:

                    output, outshp = sp.convolve(kerns,
                                                 kshp,
                                                 nkern,
                                                 input,
                                                 imshp,
                                                 ss,
                                                 bias=bias,
                                                 mode=conv_mode)
                    f = function([kerns, bias, input], output, mode=mode)

                    # now test with real values
                    img2d = np.arange(
                        bsize * np.prod(imshp)).reshape((bsize, ) + imshp)
                    img1d = img2d.reshape(bsize, -1)

                    # create filters (need to be flipped to use convolve2d)
                    filtersflipped = np.zeros((nkern, ) + kshp)
                    for k in range(nkern):
                        it = reversed(filters[k, :])
                        for i in range(kshp[0]):
                            for j in range(kshp[1]):
                                filtersflipped[k, i, j] = next(it)

                    # compute output with convolve2d
                    if conv_mode == "valid":
                        fulloutshp = np.array(imshp) - np.array(kshp) + 1
                    else:
                        fulloutshp = np.array(imshp) + np.array(kshp) - 1
                    ntime1 = time.time()
                    refout = np.zeros((bsize, ) + tuple(fulloutshp) +
                                      (nkern, ))
                    for b in range(bsize):
                        for n in range(nkern):
                            refout[b, ...,
                                   n] = convolve2d(img2d[b, :, :],
                                                   filtersflipped[n, ...],
                                                   conv_mode)
                    ntot += time.time() - ntime1

                    # need to flatten images
                    bench1 = refout[:, 0::ss[0],
                                    0::ss[1], :].reshape(bsize, -1, nkern)
                    bench1 += biasvals.reshape(1, 1, nkern)

                    # swap the last two dimensions (output needs to be nkern x outshp)
                    bench1 = np.swapaxes(bench1, 1, 2)
                    ttime1 = time.time()
                    out1 = f(filters, biasvals, img1d)
                    ttot += time.time() - ttime1
                    temp = bench1.flatten() - out1.flatten()

                    assert (temp < 1e-5).all()