예제 #1
0
    def test_xent_thing_int32(self):
        x = matrix("x")
        y = lvector("y")
        yi = aet.cast(y, "int32")
        expressions = [
            aet_sum(-log(softmax(x)[aet.arange(yi.shape[0]), yi])),
            -aet_sum(log(softmax(x)[aet.arange(yi.shape[0]), yi])),
            -aet_sum(log(softmax(x))[aet.arange(yi.shape[0]), yi]),
            aet_sum(-log(softmax(x))[aet.arange(yi.shape[0]), yi]),
        ]

        for expr in expressions:
            fgraph = FunctionGraph([x, y], [expr])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 5
            assert crossentropy_softmax_argmax_1hot_with_bias in ops
            assert not [1 for o in ops if isinstance(o, AdvancedSubtensor)]

            # Also verify the gradient wrt x
            fgraph = FunctionGraph([x, y], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 3
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_legacy in ops
            assert softmax_grad_legacy not in ops
예제 #2
0
    def test_scipy_paper_example2(self):
        """ This just sees if things compile well and if they run """
        rng = numpy.random

        x = matrix()
        y = vector()
        w = shared(rng.randn(100))
        b = shared(np.zeros(()))

        # Construct Aesara expression graph
        p_1 = 1 / (1 + exp(-dot(x, w) - b))
        xent = -y * log(p_1) - (1 - y) * log(1 - p_1)
        prediction = p_1 > 0.5
        cost = xent.mean() + 0.01 * (w ** 2).sum()
        gw, gb = grad(cost, [w, b])

        # Compile expressions to functions
        train = function(
            inputs=[x, y],
            outputs=[prediction, xent],
            updates=[(w, w - 0.1 * gw), (b, b - 0.1 * gb)],
        )
        function(inputs=[x], outputs=prediction)

        N = 4
        feats = 100
        D = (rng.randn(N, feats), rng.randint(size=4, low=0, high=2))
        training_steps = 10
        for i in range(training_steps):
            pred, err = train(D[0], D[1])
예제 #3
0
    def test_check_isfinite(self):
        x = vector()
        f = aesara.function([x], (x + 2) * 5, mode="DEBUG_MODE")
        g = aesara.function([x], log(x), mode="DEBUG_MODE")

        # this should work
        f(np.log([3, 4, 5]).astype(config.floatX))

        # if TensorType.filter_checks_isfinite were true, these would raise
        # ValueError
        # if not, DebugMode will check internally, and raise InvalidValueError
        # passing an invalid value as an input should trigger ValueError
        with pytest.raises(InvalidValueError):
            f(np.log([3, -4, 5]).astype(config.floatX))
        with pytest.raises(InvalidValueError):
            f((np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
        with pytest.raises(InvalidValueError):
            f((np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))

        # generating an invalid value internally should trigger
        # InvalidValueError
        with pytest.raises(InvalidValueError):
            g(np.asarray([3, -4, 5], dtype=config.floatX))

        # this should disable the exception
        TensorType.filter_checks_isfinite = False
        aesara.compile.mode.predefined_modes[
            "DEBUG_MODE"].check_isfinite = False
        # insert several Inf
        f(np.asarray(np.asarray([1.0, 1.0, 1.0]) / 0, dtype=config.floatX))
예제 #4
0
파일: ops.py 프로젝트: ricardoV94/aesara
def local_log_pow(fgraph, node):
    if node.op == log:
        (x, ) = node.inputs
        if x.owner and x.owner.op == aet_pow:
            base, exponent = x.owner.inputs
            # TODO: reason to be careful with dtypes?
            return [exponent * log(base)]
예제 #5
0
def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
    # This is basic test for GpuCrossentropySoftmaxArgmax1HotWithBias
    # We check that we loop when their is too much threads

    n_in = 1000
    batch_size = 4097
    n_out = 1250

    if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
        n_in = 4098
        n_out = 4099

    y = lvector("y")

    b = fvector("b")

    # we precompute the dot with big shape before to allow the test of
    # GpuCrossentropySoftmax1HotWithBiasDx to don't fail with the error
    # (the launch timed out and was terminated) on GPU card not
    # powerful enough. We need the big shape to check for corner
    # case.
    dot_result = fmatrix("dot_result")

    xx = np.asarray(np.random.rand(batch_size, n_in), dtype=np.float32)
    yy = np.ones((batch_size, ), dtype="int32")
    b_values = np.zeros((n_out, ), dtype="float32")
    W_values = np.asarray(np.random.rand(n_in, n_out), dtype="float32")

    dot_value = np.asarray(np.dot(xx, W_values), dtype="float32")
    del W_values
    p_y_given_x = aesara.tensor.nnet.softmax(dot_result + b)
    y_pred = argmax(p_y_given_x, axis=-1)
    loss = -mean(log(p_y_given_x)[aet.arange(y.shape[0]), y])
    dW = grad(loss, dot_result)
    classify = aesara.function(inputs=[y, b, dot_result],
                               outputs=[loss, y_pred, dW],
                               mode=mode_without_gpu)
    classify_gpu = aesara.function(inputs=[y, b, dot_result],
                                   outputs=[loss, y_pred, dW],
                                   mode=mode_with_gpu)

    assert any([
        isinstance(node.op,
                   aesara.tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias)
        for node in classify.maker.fgraph.toposort()
    ])
    assert any([
        isinstance(node.op, GpuCrossentropySoftmaxArgmax1HotWithBias)
        for node in classify_gpu.maker.fgraph.toposort()
    ])

    out = classify(yy, b_values, dot_value)
    gout = classify_gpu(yy, b_values, dot_value)

    assert len(out) == len(gout) == 3
    utt.assert_allclose(out[0], gout[0])
    utt.assert_allclose(out[2], gout[2], atol=3e-6)
    utt.assert_allclose(out[1], gout[1])
예제 #6
0
파일: ops.py 프로젝트: ricardoV94/aesara
def local_log_prod_sqr(fgraph, node):
    if node.op == log:
        (x, ) = node.inputs
        if x.owner and isinstance(x.owner.op, Prod):
            # we cannot always make this substitution because
            # the prod might include negative terms
            p = x.owner.inputs[0]

            # p is the matrix we're reducing with prod
            if is_positive(p):
                return [log(p).sum(axis=x.owner.op.axis)]
예제 #7
0
    def test_local_logsoftmax_opt(self, axis):
        # Test the Logsoftmax substitution
        #
        # Check that Log(Softmax(x)) is substituted with Logsoftmax(x). Note that
        # only the forward pass is checked (i.e., doesn't check the gradient)

        x = matrix("x")
        sm = softmax(x, axis=axis)
        logsm = log(sm)
        f = aesara.function([x], logsm)
        assert isinstance(f.maker.fgraph.outputs[0].owner.op, LogSoftmax)
        assert check_stack_trace(f, ops_to_check=LogSoftmax)
예제 #8
0
def local_log_prod_sqr(fgraph, node):
    """
    This utilizes a boolean `positive` tag on matrices.
    """
    if node.op == log:
        (x,) = node.inputs
        if x.owner and isinstance(x.owner.op, Prod):
            # we cannot always make this substitution because
            # the prod might include negative terms
            p = x.owner.inputs[0]

            # p is the matrix we're reducing with prod
            if getattr(p.tag, "positive", None) is True:
                return [log(p).sum(axis=x.owner.op.axis)]
예제 #9
0
    def test_log1msigm_to_softplus(self):
        x = matrix()

        out = log(1 - sigmoid(x))
        f = aesara.function([x], out, mode=self.m)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert isinstance(topo[0].op.scalar_op, ScalarSoftplus)
        assert isinstance(topo[1].op.scalar_op, aesara.scalar.Neg)
        # assert check_stack_trace(f, ops_to_check='all')
        f(np.random.rand(54, 11).astype(config.floatX))

        # Same test with a flatten
        out = log(1 - aet.flatten(sigmoid(x)))
        f = aesara.function([x], out, mode=self.m)

        # assert check_stack_trace(f, ops_to_check='all')
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 3
        assert aet.is_flat(topo[0].outputs[0])
        assert isinstance(topo[1].op.scalar_op, ScalarSoftplus)
        assert isinstance(topo[2].op.scalar_op, aesara.scalar.Neg)
        f(np.random.rand(54, 11).astype(config.floatX))

        # Same test with a reshape
        out = log(1 - sigmoid(x).reshape([x.size]))
        f = aesara.function([x], out, mode=self.m)
        topo = f.maker.fgraph.toposort()
        # assert len(topo) == 3
        assert any(isinstance(node.op, Reshape) for node in topo)
        assert any(
            isinstance(
                getattr(node.op, "scalar_op", None),
                ScalarSoftplus,
            ) for node in topo)
        f(np.random.rand(54, 11).astype(config.floatX))
예제 #10
0
    def test_log1pexp_to_softplus(self):
        m = aesara.config.mode
        if m == "FAST_COMPILE":
            m = "FAST_RUN"

        x = vector()

        out = log(1 + exp(x))
        f = aesara.function([x], out, mode=self.m)

        # Fix ticket #4581 first
        # assert check_stack_trace(f, ops_to_check='all')
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op.scalar_op, ScalarSoftplus)
        f(np.random.rand(54).astype(config.floatX))
예제 #11
0
    def test_logsigm_to_softplus(self):
        x = vector()

        out = log(sigmoid(x))
        f = aesara.function([x], out, mode=self.m)

        # Fix ticket #4581 first
        # assert check_stack_trace(
        #     f, ops_to_check=(aesara.scalar.Neg,
        #                      ScalarSoftplus))
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 3
        assert isinstance(topo[0].op.scalar_op, aesara.scalar.Neg)
        assert isinstance(topo[1].op.scalar_op, ScalarSoftplus)
        assert isinstance(topo[2].op.scalar_op, aesara.scalar.Neg)
        f(np.random.rand(54).astype(config.floatX))
예제 #12
0
def test_jax_logp():

    mu = vector("mu")
    mu.tag.test_value = np.r_[0.0, 0.0].astype(config.floatX)
    tau = vector("tau")
    tau.tag.test_value = np.r_[1.0, 1.0].astype(config.floatX)
    sigma = vector("sigma")
    sigma.tag.test_value = (1.0 / get_test_value(tau)).astype(config.floatX)
    value = vector("value")
    value.tag.test_value = np.r_[0.1, -10].astype(config.floatX)

    logp = (-tau * (value - mu)**2 + log(tau / np.pi / 2.0)) / 2.0
    conditions = [sigma > 0]
    alltrue = aet_all([aet_all(1 * val) for val in conditions])
    normal_logp = aet.switch(alltrue, logp, -np.inf)

    fgraph = FunctionGraph([mu, tau, sigma, value], [normal_logp])

    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
예제 #13
0
    def test_grad_log1msigm(self):
        # At some point, this returned nan, because (1 - sigm(x)) was
        # on both the numerator and the denominator of a fraction,
        # but the two nodes in question had not been merged.
        x = matrix("x")
        lr = scalar("lr")

        s = sigmoid(x)
        l = log(1 - s)
        c = l.mean()
        ux = x - lr * aesara.grad(c, x)

        # Before the optimization, inf and NaN will be produced in the graph,
        # and DebugMode will complain. Everything is fine afterwards.
        mode = self.get_mode()
        if not isinstance(mode, aesara.compile.debugmode.DebugMode):
            f = aesara.function([x, lr], ux, mode=mode)
            ux_v = f([[50]], 0.1)
            assert not np.isnan(ux_v)
예제 #14
0
def test_stabilize_log_softmax():
    mode = aesara.compile.mode.get_default_mode()
    mode = mode.including("local_log_softmax", "specialize")

    x = matrix()
    y = softmax(x)
    z = log(y)

    f = aesara.function([x], z, mode=mode)
    assert check_stack_trace(f, ops_to_check="all")

    # check that the softmax has been optimized out
    for node in f.maker.fgraph.toposort():
        assert not isinstance(node.op, y.owner.op.__class__)

    # call the function so debug mode can verify the optimized
    # version matches the unoptimized version
    rng = np.random.default_rng([2012, 8, 22])
    f(np.cast[config.floatX](rng.random((2, 3))))
예제 #15
0
    def test_logsoftmax_grad_true_div_elemwise(self):
        # Checks that the gradient of an expression similar to a log(softmax)
        # but with a different elemwise operation than true_div is not
        # optimized.

        x = matrix("x")
        y = log(softmax(x))
        g = grad(y.sum(), x)

        softmax_grad_node = g.owner
        assert softmax_grad_node.op == softmax_grad_legacy
        true_div_node = softmax_grad_node.inputs[0].owner
        assert true_div_node.op == true_div

        # We replace the elemwise true_div op by an elemwise add.
        new_g = softmax_grad_legacy(add(*true_div_node.inputs),
                                    softmax_grad_node.inputs[1])

        fgraph = FunctionGraph([x], [new_g])
        optdb.query(OPT_FAST_RUN).optimize(fgraph)

        assert softmax_grad_legacy in [n.op for n in fgraph.toposort()]
예제 #16
0
    def negative_log_likelihood(self, y):
        r"""Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
                \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
        # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch
        # aet.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
        # aet.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
        # LP[aet.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]
        # and aet.mean(LP[aet.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        return log(self.p_y_given_x[aet.arange(y.shape[0]), y])
예제 #17
0
    def test_crossentropy_softmax_1hot_with_bias_dxcale_cost(self):
        x = matrix("x")
        y = lvector("y")
        a = scalar("a")

        def validate_grad_graph(func):
            # The graph of the gradient should not have softmaxgrad anymore
            has_cx1hotdx = False
            has_softmax = False
            has_softmaxdx = False
            for node in func.maker.fgraph.toposort():
                if node.op == crossentropy_softmax_1hot_with_bias_dx:
                    has_cx1hotdx = True
                if node.op == softmax_legacy:
                    has_softmax = True
                if node.op == softmax_grad_legacy:
                    has_softmaxdx = True

            assert has_cx1hotdx
            assert has_softmax
            assert not has_softmaxdx

        # Cases to test
        expressions = [
            a * aet_sum(-log(softmax(x)[aet.arange(y.shape[0]), y])),
            -a * aet_sum(log(softmax(x)[aet.arange(y.shape[0]), y])),
            a * (-aet_sum(log(softmax(x)[aet.arange(y.shape[0]), y]))),
            a * aet_sum(log(softmax(x)[aet.arange(y.shape[0]), y])),
            a * aet_sum(-log(softmax(x))[aet.arange(y.shape[0]), y]),
            -a * aet_sum(log(softmax(x))[aet.arange(y.shape[0]), y]),
            a * (-aet_sum(log(softmax(x))[aet.arange(y.shape[0]), y])),
            a * aet_sum(log(softmax(x))[aet.arange(y.shape[0]), y]),
            a * mean(-log(softmax(x)[aet.arange(y.shape[0]), y])),
            -a * mean(log(softmax(x)[aet.arange(y.shape[0]), y])),
            a * (-mean(log(softmax(x)[aet.arange(y.shape[0]), y]))),
            a * mean(log(softmax(x)[aet.arange(y.shape[0]), y])),
            a * mean(-log(softmax(x))[aet.arange(y.shape[0]), y]),
            -a * mean(log(softmax(x))[aet.arange(y.shape[0]), y]),
            a * (-mean(log(softmax(x))[aet.arange(y.shape[0]), y])),
            a * mean(log(softmax(x))[aet.arange(y.shape[0]), y]),
        ]

        for expr in expressions:
            fgraph = FunctionGraph([x, y, a], [expr])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            assert 5 <= len(fgraph.toposort()) <= 10

            ops = {node.op for node in fgraph.toposort()}
            assert crossentropy_softmax_argmax_1hot_with_bias in ops
            assert softmax_legacy not in ops

            # Verify the gradient wrt x
            fgraph = FunctionGraph([x, y, a], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            assert 3 <= len(fgraph.toposort()) <= 6

            ops = {node.op for node in fgraph.toposort()}
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_legacy in ops
            assert softmax_grad_legacy not in ops

            # Verify the gradient when providing output gradient
            fgraph = FunctionGraph(
                [x, y, a], [grad(expr, x, known_grads={expr: a * x.sum()})])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            assert 6 <= len(fgraph.toposort()) <= 8

            ops = {node.op for node in fgraph.toposort()}
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_legacy in ops
            assert softmax_grad_legacy not in ops
예제 #18
0
    def test_get_rid_of_advanced_indexing_version_of_xent(self):
        x = matrix("x")
        b = vector("b")
        y = lvector("y")

        # Basic case
        expressions = [
            aet_sum(-log(softmax(x)[aet.arange(y.shape[0]), y])),
            -aet_sum(log(softmax(x)[aet.arange(y.shape[0]), y])),
            -aet_sum(log(softmax(x))[aet.arange(y.shape[0]), y]),
            aet_sum(-log(softmax(x))[aet.arange(y.shape[0]), y]),
        ]
        for expr in expressions:

            fgraph = FunctionGraph([x, y], [expr])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 4
            assert crossentropy_softmax_argmax_1hot_with_bias in ops
            assert not [1 for o in ops if isinstance(o, AdvancedSubtensor)]

            # Also verify the gradient wrt x
            fgraph = FunctionGraph([x, y], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 2
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_legacy in ops
            assert softmax_grad_legacy not in ops

        # Test that a biased softmax is optimized correctly
        bias_expressions = [
            aet_sum(-log(softmax(x + b)[aet.arange(y.shape[0]), y])),
            -aet_sum(log(softmax(b + x)[aet.arange(y.shape[0]), y])),
            -aet_sum(log(softmax(x + b))[aet.arange(y.shape[0]), y]),
            aet_sum(-log(softmax(b + x))[aet.arange(y.shape[0]), y]),
        ]

        for expr in bias_expressions:
            fgraph = FunctionGraph([x, b, y], [expr, x])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 2  # [big_op, sum]
            assert crossentropy_softmax_argmax_1hot_with_bias in ops

            fgraph = FunctionGraph([x, b, y], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 2
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_with_bias in ops
            assert softmax_grad_legacy not in ops

        # Test that using "mean" instead of sum works, too
        mean_expressions = [
            mean(-log(softmax(x)[aet.arange(y.shape[0]), y])),
            -mean(log(softmax(x)[aet.arange(y.shape[0]), y])),
            -mean(log(softmax(x))[aet.arange(y.shape[0]), y]),
            mean(-log(softmax(x))[aet.arange(y.shape[0]), y]),
        ]

        for expr in mean_expressions:

            fgraph = FunctionGraph([x, y], [expr])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 6
            assert crossentropy_softmax_argmax_1hot_with_bias in ops
            assert not [1 for o in ops if isinstance(o, AdvancedSubtensor)]

            fgraph = FunctionGraph([x, y], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 5
            # there's an extra dimshuffle in there
            # but I can't think of a good rule to get rid of it
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_legacy in ops
            assert softmax_grad_legacy not in ops

        mean_bias_expressions = [
            mean(-log(softmax(x + b)[aet.arange(y.shape[0]), y])),
            -mean(log(softmax(b + x)[aet.arange(y.shape[0]), y])),
            -mean(log(softmax(x + b))[aet.arange(y.shape[0]), y]),
            mean(-log(softmax(b + x))[aet.arange(y.shape[0]), y]),
        ]

        for expr in mean_bias_expressions:

            fgraph = FunctionGraph([x, b, y], [expr])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 4
            assert crossentropy_softmax_argmax_1hot_with_bias in ops
            assert not [1 for o in ops if isinstance(o, AdvancedSubtensor)]

            fgraph = FunctionGraph([x, b, y], [grad(expr, x)])
            optdb.query(OPT_FAST_RUN).optimize(fgraph)

            ops = [node.op for node in fgraph.toposort()]
            assert len(ops) == 5
            assert crossentropy_softmax_1hot_with_bias_dx in ops
            assert softmax_with_bias in ops
            assert softmax_grad_legacy not in ops
예제 #19
0
 def myfunc(x):
     sm = softmax(x, axis=axis)
     logsm = log(sm)
     return logsm
예제 #20
0
def test_test_value_op():

    x = log(np.ones((5, 5)))
    v = op.get_test_value(x)

    assert np.allclose(v, np.zeros((5, 5)))
예제 #21
0
    def normal(
        self,
        size,
        avg=0.0,
        std=1.0,
        ndim=None,
        dtype=None,
        nstreams=None,
        truncate=False,
        **kwargs,
    ):
        """
        Sample a tensor of values from a normal distribution.

        Parameters
        ----------
        size : int_vector_like
            Array dimensions for the output tensor.
        avg : float_like, optional
            The mean value for the truncated normal to sample from (defaults to 0.0).
        std : float_like, optional
            The standard deviation for the truncated normal to sample from (defaults to 1.0).
        truncate : bool, optional
            Truncates the normal distribution at 2 standard deviations if True (defaults to False).
            When this flag is set, the standard deviation of the result will be less than the one specified.
        ndim : int, optional
            The number of dimensions for the output tensor (defaults to None).
            This argument is necessary if the size argument is ambiguous on the number of dimensions.
        dtype : str, optional
            The data-type for the output tensor. If not specified,
            the dtype is inferred from avg and std, but it is at least as precise as floatX.
        kwargs
            Other keyword arguments for random number generation (see uniform).

        Returns
        -------
        samples : TensorVariable
            A Aesara tensor of samples randomly drawn from a normal distribution.

        """
        size = _check_size(size)
        avg = undefined_grad(as_tensor_variable(avg))
        std = undefined_grad(as_tensor_variable(std))

        if dtype is None:
            dtype = aes.upcast(config.floatX, avg.dtype, std.dtype)

        avg = at.cast(avg, dtype=dtype)
        std = at.cast(std, dtype=dtype)

        # generate even number of uniform samples
        # Do manual constant folding to lower optiimizer work.
        if isinstance(size, Constant):
            n_odd_samples = size.prod(dtype="int64")
        else:
            n_odd_samples = prod(size, dtype="int64")
        n_even_samples = n_odd_samples + n_odd_samples % 2
        uniform = self.uniform(
            (n_even_samples, ),
            low=0.0,
            high=1.0,
            ndim=1,
            dtype=dtype,
            nstreams=nstreams,
            **kwargs,
        )

        # box-muller transform
        u1 = uniform[:n_even_samples // 2]
        u2 = uniform[n_even_samples // 2:]
        r = sqrt(-2.0 * log(u1))
        theta = np.array(2.0 * np.pi, dtype=dtype) * u2
        cos_theta, sin_theta = cos(theta), sin(theta)
        z0 = r * cos_theta
        z1 = r * sin_theta

        if truncate:
            # use valid samples
            to_fix0 = (z0 < -2.0) | (z0 > 2.0)
            to_fix1 = (z1 < -2.0) | (z1 > 2.0)
            z0_valid = z0[at.nonzero(~to_fix0)]
            z1_valid = z1[at.nonzero(~to_fix1)]

            # re-sample invalid samples
            to_fix0 = at.nonzero(to_fix0)[0]
            to_fix1 = at.nonzero(to_fix1)[0]
            n_fix_samples = to_fix0.size + to_fix1.size
            lower = at.constant(1.0 / np.e**2, dtype=dtype)
            u_fix = self.uniform(
                (n_fix_samples, ),
                low=lower,
                high=1.0,
                ndim=1,
                dtype=dtype,
                nstreams=nstreams,
                **kwargs,
            )
            r_fix = sqrt(-2.0 * log(u_fix))
            z0_fixed = r_fix[:to_fix0.size] * cos_theta[to_fix0]
            z1_fixed = r_fix[to_fix0.size:] * sin_theta[to_fix1]

            # pack everything together to a useful result
            norm_samples = at.join(0, z0_valid, z0_fixed, z1_valid, z1_fixed)
        else:
            norm_samples = at.join(0, z0, z1)
        if isinstance(n_odd_samples, Variable):
            samples = norm_samples[:n_odd_samples]
        elif n_odd_samples % 2 == 1:
            samples = norm_samples[:-1]
        else:
            samples = norm_samples
        samples = reshape(samples, newshape=size, ndim=ndim)
        samples *= std
        samples += avg

        return samples
예제 #22
0
 def binom_log_prob(n, p, value):
     return binomln(n, value) + value * log(p) + (n - value) * log(1 - p)