예제 #1
0
#!/usr/bin/env python
# Aesara tutorial
# Solution to Exercise in section 'Loop'

import numpy as np

import aesara
import aesara.tensor as aet


# 1. First example

aesara.config.warn__subtensor_merge_bug = False

k = aet.iscalar("k")
A = aet.vector("A")


def inner_fct(prior_result, A):
    return prior_result * A

# Symbolic description of the result
result, updates = aesara.scan(fn=inner_fct,
                              outputs_info=aet.ones_like(A),
                              non_sequences=A, n_steps=k)

# Scan has provided us with A ** 1 through A ** k.  Keep only the last
# value. Scan notices this and does not waste memory saving them.
final_result = result[-1]

power = aesara.function(inputs=[A, k], outputs=final_result,
예제 #2
0
import aesara
import aesara.tensor as tt

rng = np.random

N = 400
feats = 784
D = (
    rng.randn(N, feats).astype(aesara.config.floatX),
    rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX),
)
training_steps = 10000

# Declare Aesara symbolic variables
x = tt.matrix("x")
y = tt.vector("y")
w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w")
b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
# print "Initial model:"
# print w.get_value(), b.get_value()

# Construct Aesara expression graph
p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b))  # Probability of having a one
prediction = p_1 > 0.5  # The prediction that is done: 0 or 1
xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1)  # Cross-entropy
cost = xent.mean() + 0.01 * (w**2).sum()  # The cost to optimize
gw, gb = tt.grad(cost, [w, b])

# Compile expressions to functions
예제 #3
0
    def test_multiple_functions(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")
        v = tt.vector("v")

        # put in some inputs
        list_of_things = [s, x, v]

        # some derived thing, whose inputs aren't all in the list
        list_of_things.append(a * x + s)

        f1 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
        list_of_things.append(f1)

        # now put in a function sharing container with the previous one
        f2 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=f1.container[s], update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
        list_of_things.append(f2)

        assert isinstance(f2.container[s].storage, list)
        assert f2.container[s].storage is f1.container[s].storage

        # now put in a function with non-scalar
        v_value = np.asarray([2, 3, 4.0], dtype=config.floatX)
        f3 = function([x, In(v, value=v_value)], x + v)
        list_of_things.append(f3)

        # try to pickle the entire things
        try:
            saved_format = pickle.dumps(list_of_things, protocol=-1)
            new_list_of_things = pickle.loads(saved_format)
        except NotImplementedError as e:
            if e[0].startswith("DebugMode is not picklable"):
                return
            else:
                raise

        # now test our recovered new_list_of_things
        # it should be totally unrelated to the original
        # it should be interdependent in the same way as the original

        ol = list_of_things
        nl = new_list_of_things

        for i in range(4):
            assert nl[i] != ol[i]
            assert nl[i].type == ol[i].type
            assert nl[i].type is not ol[i].type

        # see if the implicit input got stored
        assert ol[3].owner.inputs[1] is s
        assert nl[3].owner.inputs[1] is not s
        assert nl[3].owner.inputs[1].type == s.type

        # moving on to the functions...
        for i in range(4, 7):
            assert nl[i] != ol[i]

        # looking at function number 1, input 's'
        assert nl[4][nl[0]] is not ol[4][ol[0]]
        assert nl[4][nl[0]] == ol[4][ol[0]]
        assert nl[4](3) == ol[4](3)

        # looking at function number 2, input 's'
        # make sure it's shared with the first function
        assert ol[4].container[ol[0]].storage is ol[5].container[ol[0]].storage
        assert nl[4].container[nl[0]].storage is nl[5].container[nl[0]].storage
        assert nl[5](3) == ol[5](3)
        assert nl[4].value[nl[0]] == 6

        assert np.all(nl[6][nl[2]] == np.asarray([2, 3.0, 4]))
예제 #4
0
    def test_grad_override(self, cls_ofg):
        x, y = tt.vectors("xy")

        def go(inps, gs):
            x, y = inps
            (g, ) = gs
            return [g * y * 2, g * x * 1.5]

        dedz = tt.vector("dedz")
        op_mul_grad = cls_ofg([x, y, dedz], go([x, y], [dedz]))

        op_mul = cls_ofg([x, y], [x * y], grad_overrides=go)
        op_mul2 = cls_ofg([x, y], [x * y], grad_overrides=op_mul_grad)

        # single override case (function or OfG instance)
        xx, yy = tt.vector("xx"), tt.vector("yy")
        for op in [op_mul, op_mul2]:
            zz = tt.sum(op(xx, yy))
            dx, dy = tt.grad(zz, [xx, yy])
            fn = function([xx, yy], [dx, dy])
            xv = np.random.rand(16).astype(config.floatX)
            yv = np.random.rand(16).astype(config.floatX)
            dxv, dyv = fn(xv, yv)
            assert np.allclose(yv * 2, dxv)
            assert np.allclose(xv * 1.5, dyv)

        # list override case
        def go1(inps, gs):
            x, w, b = inps
            g = gs[0]
            return g * w * 2

        def go2(inps, gs):
            x, w, b = inps
            g = gs[0]
            return g * x * 1.5

        w, b = tt.vectors("wb")
        # we make the 3rd gradient default (no override)
        op_linear = cls_ofg([x, w, b], [x * w + b],
                            grad_overrides=[go1, go2, "default"])
        xx, ww, bb = tt.vector("xx"), tt.vector("yy"), tt.vector("bb")
        zz = tt.sum(op_linear(xx, ww, bb))
        dx, dw, db = tt.grad(zz, [xx, ww, bb])
        fn = function([xx, ww, bb], [dx, dw, db])
        xv = np.random.rand(16).astype(config.floatX)
        wv = np.random.rand(16).astype(config.floatX)
        bv = np.random.rand(16).astype(config.floatX)
        dxv, dwv, dbv = fn(xv, wv, bv)
        assert np.allclose(wv * 2, dxv)
        assert np.allclose(xv * 1.5, dwv)
        assert np.allclose(np.ones(16, dtype=config.floatX), dbv)

        # NullType and DisconnectedType
        op_linear2 = cls_ofg(
            [x, w, b],
            [x * w + b],
            grad_overrides=[go1, NullType()(),
                            DisconnectedType()()],
        )
        zz2 = tt.sum(op_linear2(xx, ww, bb))
        dx2, dw2, db2 = tt.grad(
            zz2,
            [xx, ww, bb],
            return_disconnected="Disconnected",
            disconnected_inputs="ignore",
            null_gradients="return",
        )
        assert isinstance(dx2.type, tt.TensorType)
        assert dx2.ndim == 1
        assert isinstance(dw2.type, NullType)
        assert isinstance(db2.type, DisconnectedType)
예제 #5
0
def test_get_test_values_exc():
    """Tests that `get_test_values` raises an exception when debugger is set to raise and a value is missing."""

    with pytest.raises(TestValueError):
        x = tt.vector()
        assert op.get_test_values(x) == []
예제 #6
0
파일: test_model.py 프로젝트: kc611/pymc3
 def test_no_extra(self):
     a = at.vector("a")
     a.tag.test_value = np.zeros(3, dtype=a.dtype)
     f_grad = ValueGradFunction([a.sum()], [a], {}, mode="FAST_COMPILE")
     assert f_grad._extra_vars == []
예제 #7
0
def test_unify_Variable():
    x_at = at.vector("x")
    y_at = at.vector("y")

    z_at = x_at + y_at

    # `Variable`, `Variable`
    s = unify(z_at, z_at)
    assert s == {}

    # These `Variable`s have no owners
    v1 = MyType()()
    v2 = MyType()()

    assert v1 != v2

    s = unify(v1, v2)
    assert s is False

    op_lv = var()
    z_pat_et = etuple(op_lv, x_at, y_at)

    # `Variable`, `ExpressionTuple`
    s = unify(z_at, z_pat_et, {})

    assert op_lv in s
    assert s[op_lv] == z_at.owner.op

    res = reify(z_pat_et, s)

    assert isinstance(res, ExpressionTuple)
    assert equal_computations([res.evaled_obj], [z_at])

    z_et = etuple(at.add, x_at, y_at)

    # `ExpressionTuple`, `ExpressionTuple`
    s = unify(z_et, z_pat_et, {})

    assert op_lv in s
    assert s[op_lv] == z_et[0]

    res = reify(z_pat_et, s)

    assert isinstance(res, ExpressionTuple)
    assert equal_computations([res.evaled_obj], [z_et.evaled_obj])

    # `ExpressionTuple`, `Variable`
    s = unify(z_et, x_at, {})
    assert s is False

    # This `Op` doesn't expand into an `ExpressionTuple`
    op1_np = CustomOpNoProps(1)

    q_at = op1_np(x_at, y_at)

    a_lv = var()
    b_lv = var()
    # `Variable`, `ExpressionTuple`
    s = unify(q_at, etuple(op1_np, a_lv, b_lv))

    assert s[a_lv] == x_at
    assert s[b_lv] == y_at
예제 #8
0
def test_get_test_values_no_debugger():
    """Tests that `get_test_values` returns `[]` when debugger is off."""

    x = tt.vector()
    assert op.get_test_values(x) == []
예제 #9
0
                            accept_inplace=True)
    py_res = aesara_py_fn(*inputs)

    if len(fgraph.outputs) > 1:
        for j, p in zip(numba_res, py_res):
            assert_fn(j, p)
    else:
        assert_fn(numba_res, py_res)

    return numba_res


@pytest.mark.parametrize(
    "inputs, input_vals, output_fn",
    [(
        [aet.vector() for i in range(4)],
        [np.random.randn(100).astype(config.floatX) for i in range(4)],
        lambda x, y, x1, y1: (x + y) * (x1 + y1) * y,
    )],
)
def test_Elemwise(inputs, input_vals, output_fn):
    out_fg = FunctionGraph(inputs, [output_fn(*inputs)])
    compare_numba_and_py(out_fg, input_vals)


@pytest.mark.parametrize(
    "inputs, input_values",
    [
        (
            [scalar("x"), scalar("y")],
            [
예제 #10
0
import aesara
import aesara.tensor as tt


k = tt.iscalar("k")
A = tt.vector("A")


def inner_fct(prior_result, A):
    return prior_result * A


# Symbolic description of the result
result, updates = aesara.scan(
    fn=inner_fct, outputs_info=tt.ones_like(A), non_sequences=A, n_steps=k
)

# Scan has provided us with A**1 through A**k.  Keep only the last
# value. Scan notices this and does not waste memory saving them.
final_result = result[-1]

power = aesara.function(inputs=[A, k], outputs=final_result, updates=updates)

print(power(list(range(10)), 2))
# [  0.   1.   4.   9.  16.  25.  36.  49.  64.  81.]
    def test_function_name(self):
        x = tensor.vector("x")
        func = aesara.function([x], x + 1.0)

        regex = re.compile(os.path.basename(".*test_function_name.pyc?:14"))
        assert regex.match(func.name) is not None
예제 #12
0
aesara.config.floatX = "float32"

rng = np.random.default_rng(428)

N = 400
feats = 784
D = (
    rng.standard_normal((N, feats)).astype(aesara.config.floatX),
    rng.integers(size=N, low=0, high=2).astype(aesara.config.floatX),
)
training_steps = 10000

# Declare Aesara symbolic variables
x = at.matrix("x")
y = at.vector("y")
w = aesara.shared(rng.standard_normal(feats).astype(aesara.config.floatX),
                  name="w")
b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
# print "Initial model:"
# print w.get_value(), b.get_value()

# Construct Aesara expression graph
p_1 = 1 / (1 + at.exp(-at.dot(x, w) - b))  # Probability of having a one
prediction = p_1 > 0.5  # The prediction that is done: 0 or 1
xent = -y * at.log(p_1) - (1 - y) * at.log(1 - p_1)  # Cross-entropy
cost = at.cast(xent.mean(),
               "float32") + 0.01 * (w**2).sum()  # The cost to optimize
gw, gb = at.grad(cost, [w, b])
예제 #13
0
import aesara
import aesara.tensor as aet

aesara.config.floatX = 'float32'

rng = np.random

N = 400
feats = 784
D = (rng.randn(N, feats).astype(aesara.config.floatX),
rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX))
training_steps = 10000

# Declare Aesara symbolic variables
x = aet.matrix("x")
y = aet.vector("y")
w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w")
b = aesara.shared(np.asarray(0., dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()

# Construct Aesara expression graph
p_1 = 1 / (1 + aet.exp(-aet.dot(x, w) - b))  # Probability of having a one
prediction = p_1 > 0.5  # The prediction that is done: 0 or 1
xent = -y * aet.log(p_1) - (1 - y) * aet.log(1 - p_1)  # Cross-entropy
cost = aet.cast(xent.mean(), 'float32') + \
       0.01 * (w ** 2).sum()  # The cost to optimize
gw, gb = aet.grad(cost, [w, b])
예제 #14
0
#!/usr/bin/env python
# Aesara tutorial
# Solution to Exercise in section 'Loop'

import numpy as np

import aesara
import aesara.tensor as tt

# 1. First example

aesara.config.warn.subtensor_merge_bug = False

k = tt.iscalar("k")
A = tt.vector("A")


def inner_fct(prior_result, A):
    return prior_result * A


# Symbolic description of the result
result, updates = aesara.scan(fn=inner_fct,
                              outputs_info=tt.ones_like(A),
                              non_sequences=A,
                              n_steps=k)

# Scan has provided us with A ** 1 through A ** k.  Keep only the last
# value. Scan notices this and does not waste memory saving them.
final_result = result[-1]
예제 #15
0
 def make_node(self, *inputs):
     return Apply(self, list(inputs), [at.vector()])
예제 #16
0
def find_constrained_prior(
    distribution: pm.Distribution,
    lower: float,
    upper: float,
    init_guess: Dict[str, float],
    mass: float = 0.95,
    fixed_params: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
    """
    Find optimal parameters to get `mass` % of probability
    of `pm_dist` between `lower` and `upper`.
    Note: only works for one- and two-parameter distributions, as there
    are exactly two constraints. Fix some combination of parameters
    if you want to use it on >=3-parameter distributions.

    Parameters
    ----------
    distribution : pm.Distribution
        PyMC distribution you want to set a prior on.
        Needs to have a ``logcdf`` method implemented in PyMC.
    lower : float
        Lower bound to get `mass` % of probability of `pm_dist`.
    upper : float
        Upper bound to get `mass` % of probability of `pm_dist`.
    init_guess: Dict[str, float]
        Initial guess for ``scipy.optimize.least_squares`` to find the
        optimal parameters of `pm_dist` fitting the interval constraint.
        Must be a dictionary with the name of the PyMC distribution's
        parameter as keys and the initial guess as values.
    mass: float, default to 0.95
        Share of the probability mass we want between ``lower`` and ``upper``.
        Defaults to 95%.
    fixed_params: Dict[str, float], Optional, default None
        Only used when `pm_dist` has at least three parameters.
        Dictionary of fixed parameters, so that there are only 2 to optimize.
        For instance, for a StudenT, you fix nu to a constant and get the optimized
        mu and sigma.

    Returns
    -------
    The optimized distribution parameters as a dictionary with the parameters'
    name as key and the optimized value as value.

    Examples
    --------
    .. code-block:: python

        # get parameters obeying constraints
        opt_params = pm.find_constrained_prior(
            pm.Gamma, lower=0.1, upper=0.4, mass=0.75, init_guess={"alpha": 1, "beta": 10}
        )

        # use these parameters to draw random samples
        samples = pm.Gamma.dist(**opt_params, size=100).eval()

        # use these parameters in a model
        with pm.Model():
            x = pm.Gamma('x', **opt_params)

        # specify fixed values before optimization
        opt_params = pm.find_constrained_prior(
            pm.StudentT,
            lower=0,
            upper=1,
            init_guess={"mu": 5, "sigma": 2},
            fixed_params={"nu": 7},
        )
    """
    assert 0.01 <= mass <= 0.99, (
        "This function optimizes the mass of the given distribution +/- "
        f"1%, so `mass` has to be between 0.01 and 0.99. You provided {mass}.")

    # exit when any parameter is not scalar:
    if np.any(np.asarray(distribution.rv_op.ndims_params) != 0):
        raise NotImplementedError(
            "`pm.find_constrained_prior` does not work with non-scalar parameters yet.\n"
            "Feel free to open a pull request on PyMC repo if you really need this feature."
        )

    dist_params = aet.vector("dist_params")
    params_to_optim = {
        arg_name: dist_params[i]
        for arg_name, i in zip(init_guess.keys(), range(len(init_guess)))
    }

    if fixed_params is not None:
        params_to_optim.update(fixed_params)

    dist = distribution.dist(**params_to_optim)

    try:
        logcdf_lower = pm.logcdf(dist, pm.floatX(lower))
        logcdf_upper = pm.logcdf(dist, pm.floatX(upper))
    except AttributeError:
        raise AttributeError(
            f"You cannot use `find_constrained_prior` with {distribution} -- it doesn't have a logcdf "
            "method yet.\nOpen an issue or, even better, a pull request on PyMC repo if you really "
            "need it.")

    cdf_error = (pm.math.exp(logcdf_upper) - pm.math.exp(logcdf_lower)) - mass
    cdf_error_fn = pm.aesaraf.compile_pymc([dist_params],
                                           cdf_error,
                                           allow_input_downcast=True)

    jac: Union[str, Callable]
    try:
        aesara_jac = pm.gradient(cdf_error, [dist_params])
        jac = pm.aesaraf.compile_pymc([dist_params],
                                      aesara_jac,
                                      allow_input_downcast=True)
    # when PyMC cannot compute the gradient
    except (NotImplementedError, NullTypeGradError):
        jac = "2-point"

    opt = optimize.least_squares(cdf_error_fn,
                                 x0=list(init_guess.values()),
                                 jac=jac)
    if not opt.success:
        raise ValueError("Optimization of parameters failed.")

    # save optimal parameters
    opt_params = {
        param_name: param_value
        for param_name, param_value in zip(init_guess.keys(), opt.x)
    }
    if fixed_params is not None:
        opt_params.update(fixed_params)

    # check mass in interval is not too far from `mass`
    opt_dist = distribution.dist(**opt_params)
    mass_in_interval = (pm.math.exp(pm.logcdf(opt_dist, upper)) -
                        pm.math.exp(pm.logcdf(opt_dist, lower))).eval()
    if (np.abs(mass_in_interval - mass)) > 0.01:
        warnings.warn(
            f"Final optimization has {(mass_in_interval if mass_in_interval.ndim < 1 else mass_in_interval[0])* 100:.0f}% of probability mass between "
            f"{lower} and {upper} instead of the requested {mass * 100:.0f}%.\n"
            "You may need to use a more flexible distribution, change the fixed parameters in the "
            "`fixed_params` dictionary, or provide better initial guesses.")

    return opt_params
예제 #17
0
 def test_searchsortedOp_on_float_sorter(self):
     sorter = tt.vector("sorter", dtype="float32")
     with pytest.raises(TypeError):
         searchsorted(self.x, self.v, sorter=sorter)
예제 #18
0
def test_get_test_values_ignore():
    """Tests that `get_test_values` returns `[]` when debugger is set to "ignore" and some values are missing."""

    x = tt.vector()
    assert op.get_test_values(x) == []
예제 #19
0
    def test_exp_over_1_plus_exp(self):
        m = self.get_mode(excluding=["local_elemwise_fusion"])

        x = tt.vector()
        data = np.random.rand(54).astype(config.floatX)

        backup = config.warn.identify_1pexp_bug
        config.warn.identify_1pexp_bug = False
        try:
            # tests exp_over_1_plus_exp
            f = aesara.function([x], tt.exp(x) / (1 + tt.exp(x)), mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] == [sigmoid]
            f(data)
            f = aesara.function([x], tt.exp(x) / (2 + tt.exp(x)), mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)
            f = aesara.function([x], tt.exp(x) / (1 - tt.exp(x)), mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)
            f = aesara.function([x], tt.exp(x + 1) / (1 + tt.exp(x)), mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)

            # tests inv_1_plus_exp
            f = aesara.function([x],
                                tt.fill(x, 1.0) / (1 + tt.exp(-x)),
                                mode=m)
            # todo: solve issue #4589 first
            # assert check_stack_trace(f, ops_to_check=sigmoid)
            assert [node.op for node in f.maker.fgraph.toposort()] == [sigmoid]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, 1.0) / (2 + tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, 1.0) / (1 - tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, 1.1) / (1 + tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid]
            f(data)

            # tests inv_1_plus_exp with neg
            f = aesara.function([x],
                                tt.fill(x, -1.0) / (1 + tt.exp(-x)),
                                mode=m)
            # todo: solve issue #4589 first
            # assert check_stack_trace(
            #     f, ops_to_check=[sigmoid, neg_inplace])
            assert [node.op for node in f.maker.fgraph.toposort()] == [
                sigmoid,
                neg_inplace,
            ]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, -1.0) / (1 - tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                neg_inplace,
            ]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, -1.0) / (2 + tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                neg_inplace,
            ]
            f(data)
            f = aesara.function([x],
                                tt.fill(x, -1.1) / (1 + tt.exp(-x)),
                                mode=m)
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                neg_inplace,
            ]
            f(data)

            # tests double inv_1_plus_exp with neg
            # (-1)(exp(x)) / (1+exp(x))(1+exp(-x))
            # = (-1)/(1+exp(-x)) * exp(x)/(1+exp(x))
            # = - (sigm(x) * sigm(x))
            f = aesara.function(
                [x],
                (tt.fill(x, -1.0) * tt.exp(x)) / ((1 + tt.exp(x)) *
                                                  (1 + tt.exp(-x))),
                mode=m,
            )
            # todo: solve issue #4589 first
            # assert check_stack_trace(f, ops_to_check=[sigmoid, tt.mul])
            assert [node.op for node in f.maker.fgraph.toposort()
                    ] == [sigmoid, tt.mul]
            f(data)
            f = aesara.function(
                [x],
                (tt.fill(x, -1.1) * tt.exp(x)) / ((1 + tt.exp(x)) *
                                                  (1 + tt.exp(-x))),
                mode=m,
            )
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                tt.mul,
                neg_inplace,
            ]
            f(data)
            f = aesara.function(
                [x],
                (tt.fill(x, -1.0) * tt.exp(x)) / ((2 + tt.exp(x)) *
                                                  (1 + tt.exp(-x))),
                mode=m,
            )
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                tt.mul,
                neg_inplace,
            ]
            f(data)
            f = aesara.function(
                [x],
                (tt.fill(x, -1.0) * tt.exp(x)) / ((1 + tt.exp(x)) *
                                                  (2 + tt.exp(-x))),
                mode=m,
            )
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                tt.mul,
                neg_inplace,
            ]
            f(data)
            f = aesara.function(
                [x],
                (tt.fill(x, -1.0) * tt.exp(x)) / ((1 + tt.exp(x)) *
                                                  (1 + tt.exp(x))),
                mode=m,
            )
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                tt.mul,
                neg_inplace,
            ]
            f(data)
            f = aesara.function(
                [x],
                (tt.fill(x, -1.0) * tt.exp(x)) / ((1 + tt.exp(x)) *
                                                  (2 + tt.exp(-x))),
                mode=m,
            )
            assert [node.op for node in f.maker.fgraph.toposort()] != [
                sigmoid,
                tt.mul,
                neg_inplace,
            ]
            f(data)

        finally:
            # Restore config option.
            config.warn.identify_1pexp_bug = backup
예제 #20
0
    def test_pickle(self):
        v = tt.vector()
        func = FunctionGraph([v], [v + 1])

        s = pickle.dumps(func)
        pickle.loads(s)
예제 #21
0
    def test_wrong_shape(self):
        a = tt.vector()
        b = tt.matrix()

        with pytest.raises(TypeError):
            make_list((a, b))
예제 #22
0
def augment_system(ode_func, n_states, n_theta):
    """
    Function to create augmented system.

    Take a function which specifies a set of differential equations and return
    a compiled function which allows for computation of gradients of the
    differential equation's solition with repsect to the parameters.

    Uses float64 even if floatX=float32, because the scipy integrator always uses float64.

    Parameters
    ----------
    ode_func: function
        Differential equation.  Returns array-like.
    n_states: int
        Number of rows of the sensitivity matrix. (n_states)
    n_theta: int
        Number of ODE parameters

    Returns
    -------
    system: function
        Augemted system of differential equations.
    """

    # Present state of the system
    t_y = aet.vector("y", dtype="float64")
    t_y.tag.test_value = np.ones((n_states, ), dtype="float64")
    # Parameter(s).  Should be vector to allow for generaliztion to multiparameter
    # systems of ODEs.  Is m dimensional because it includes all initial conditions as well as ode parameters
    t_p = aet.vector("p", dtype="float64")
    t_p.tag.test_value = np.ones((n_states + n_theta, ), dtype="float64")
    # Time.  Allow for non-automonous systems of ODEs to be analyzed
    t_t = aet.scalar("t", dtype="float64")
    t_t.tag.test_value = 2.459

    # Present state of the gradients:
    # Will always be 0 unless the parameter is the inital condition
    # Entry i,j is partial of y[i] wrt to p[j]
    dydp_vec = aet.vector("dydp", dtype="float64")
    dydp_vec.tag.test_value = make_sens_ic(n_states, n_theta, "float64")

    dydp = dydp_vec.reshape((n_states, n_states + n_theta))

    # Get symbolic representation of the ODEs by passing tensors for y, t and theta
    yhat = ode_func(t_y, t_t, t_p[n_states:])
    # Stack the results of the ode_func into a single tensor variable
    if not isinstance(yhat, (list, tuple)):
        yhat = (yhat, )
    t_yhat = aet.stack(yhat, axis=0)

    # Now compute gradients
    J = aet.jacobian(t_yhat, t_y)

    Jdfdy = aet.dot(J, dydp)

    grad_f = aet.jacobian(t_yhat, t_p)

    # This is the time derivative of dydp
    ddt_dydp = (Jdfdy + grad_f).flatten()

    system = aesara.function(inputs=[t_y, t_t, t_p, dydp_vec],
                             outputs=[t_yhat, ddt_dydp],
                             on_unused_input="ignore")

    return system
예제 #23
0
    string = []
    for comp in state_types:
        substring = make_str(state, comp, m)
        string.append(substring.center(text_width))
    return '|'.join(string)


if __name__ == '__main__':

    float_dtype = np.float64
    verbosity = -1
    hess_type = [False, 4]
    Ftol = 1.0E-8
    Stol = 1.0E-3

    x_dev = T.vector('x_dev')
    lambda_dev = T.vector('lda_dev')

    print('Testing unconstrained problems...')

    # make a blacklist of problem states that are non-sensical
    state_blacklist = [
        ['BOTH', 'NULL', None, None, None, None, None, None, None, None],
        ['BOTH', 'auto-diff', None, None, None, None, None, None, None, None],
        [
            'BOTH', 'precompiled', 'auto-diff', None, None, None, None, None,
            None, None
        ],
        [
            'BOTH', 'precompiled', None, 'auto-diff', None, None, None, None,
            None, None