示例#1
0
 def L(self):
     if self.batched:
         L = at.zeros((self.ddim, self.ddim, self.bdim))
         L = at.set_subtensor(L[self.tril_indices],
                              self.params_dict["L_tril"].T)
         L = L.dimshuffle(2, 0, 1)
     else:
         L = at.zeros((self.ddim, self.ddim))
         L = at.set_subtensor(L[self.tril_indices],
                              self.params_dict["L_tril"])
     return L
示例#2
0
 def L(self):
     if self.batched:
         L = at.zeros((self.ddim, self.ddim, self.bdim))
         L = at.set_subtensor(L[self.tril_indices],
                              self.params_dict["L_tril"].T)
         L = L.dimshuffle(2, 0, 1)
     else:
         L = at.zeros((self.ddim, self.ddim))
         L = at.set_subtensor(L[self.tril_indices],
                              self.params_dict["L_tril"])
     Ld = L[..., np.arange(self.ddim), np.arange(self.ddim)]
     L = at.set_subtensor(Ld, rho2sigma(Ld))
     return L
示例#3
0
文件: test_opt.py 项目: mgorny/aesara
 def test_pregreedy_optimizer(self):
     W = at.zeros((5, 4))
     bv = at.zeros((5,))
     bh = at.zeros((4,))
     v = matrix("v")
     (bv_t, bh_t), _ = scan(
         lambda _: [bv, bh], sequences=v, outputs_info=[None, None]
     )
     chain, _ = scan(
         lambda x: dot(dot(x, W) + bh_t, W.T) + bv_t,
         outputs_info=v,
         n_steps=2,
     )
     # TODO FIXME: Make this a real test and assert something.
     function([v], chain)(np.zeros((3, 5), dtype=config.floatX))
示例#4
0
def test_normal_infer_shape():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3
    sd_aet = scalar("sd")
    sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX)

    test_params = [
        ([aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
          sd_aet], None),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (M_aet, ),
        ),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (2, M_aet),
        ),
        ([aet.zeros((M_aet, )), sd_aet], None),
        ([aet.zeros((M_aet, )), sd_aet], (M_aet, )),
        ([aet.zeros((M_aet, )), sd_aet], (2, M_aet)),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], None),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], (2, M_aet)),
        (
            [
                np.array([[-1, 20], [300, -4000]], dtype=config.floatX),
                np.array([[1e-6, 2e-6]], dtype=config.floatX),
            ],
            (3, 2, 2),
        ),
        (
            [
                np.array([1], dtype=config.floatX),
                np.array([10], dtype=config.floatX)
            ],
            (1, 2),
        ),
    ]
    for args, size in test_params:
        rv = normal(*args, size=size)
        rv_shape = tuple(normal._infer_shape(size or (), args, None))
        assert tuple(get_test_value(rv_shape)) == tuple(
            get_test_value(rv).shape)
示例#5
0
文件: test_opt.py 项目: mgorny/aesara
def test_inner_replace_dot():
    """
    This tests that rewrites are applied to the inner-graph.
    In particular, BLAS-based rewrites that remove the original dot product.

    This was previously a test with a name that implied it was testing the
    `Scan` push-out rewrites, but it wasn't testing that at all, because the
    rewrites were never being applied.
    """
    W = matrix("W")
    h = matrix("h")

    mode = get_default_mode().including("scan")  # .excluding("BlasOpt")

    o, _ = scan(
        lambda hi, him1, W: (hi, dot(hi + him1, W)),
        outputs_info=[at.zeros([h.shape[1]]), None],
        sequences=[h],
        non_sequences=[W],
        mode=mode,
    )

    f = function([W, h], o, mode=mode)

    scan_nodes = [x for x in f.maker.fgraph.toposort() if isinstance(x.op, Scan)]
    assert len(scan_nodes) == 1
    scan_op = scan_nodes[0].op
    assert not any(isinstance(n.op, Dot) for n in scan_op.fn.maker.fgraph.apply_nodes)
示例#6
0
    def test_bounded_dist(self):
        with pm.Model() as model:
            BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
            x = BoundedNormal("x", mu=aet.zeros((3, 1)), sd=1 * aet.ones((3, 1)), shape=(3, 1))

        with model:
            prior_trace = pm.sample_prior_predictive(5)
            assert prior_trace["x"].shape == (5, 3, 1)
示例#7
0
 def jacobian_det(self, y_):
     y = y_.T
     Km1 = y.shape[0] + 1
     sy = aet.sum(y, 0, keepdims=True)
     r = aet.concatenate([y + sy, aet.zeros(sy.shape)])
     sr = logsumexp(r, 0, keepdims=True)
     d = aet.log(Km1) + (Km1 * sy) - (Km1 * sr)
     return aet.sum(d, 0).T
示例#8
0
def expand_packed_triangular(n, packed, lower=True, diagonal_only=False):
    r"""Convert a packed triangular matrix into a two dimensional array.

    Triangular matrices can be stored with better space efficiency by
    storing the non-zero values in a one-dimensional array. We number
    the elements by row like this (for lower or upper triangular matrices):

        [[0 - - -]     [[0 1 2 3]
         [1 2 - -]      [- 4 5 6]
         [3 4 5 -]      [- - 7 8]
         [6 7 8 9]]     [- - - 9]

    Parameters
    ----------
    n: int
        The number of rows of the triangular matrix.
    packed: aesara.vector
        The matrix in packed format.
    lower: bool, default=True
        If true, assume that the matrix is lower triangular.
    diagonal_only: bool
        If true, return only the diagonal of the matrix.
    """
    if packed.ndim != 1:
        raise ValueError("Packed triangular is not one dimensional.")
    if not isinstance(n, int):
        raise TypeError("n must be an integer")

    if diagonal_only and lower:
        diag_idxs = np.arange(1, n + 1).cumsum() - 1
        return packed[diag_idxs]
    elif diagonal_only and not lower:
        diag_idxs = np.arange(2, n + 2)[::-1].cumsum() - n - 1
        return packed[diag_idxs]
    elif lower:
        out = at.zeros((n, n), dtype=aesara.config.floatX)
        idxs = np.tril_indices(n)
        return at.set_subtensor(out[idxs], packed)
    elif not lower:
        out = at.zeros((n, n), dtype=aesara.config.floatX)
        idxs = np.triu_indices(n)
        return at.set_subtensor(out[idxs], packed)
示例#9
0
def default_moment(rv, size, *rv_inputs, rv_name=None, has_fallback=False, ndim_supp=0):
    if ndim_supp == 0:
        return at.zeros(size, dtype=rv.dtype)
    elif has_fallback:
        return at.zeros_like(rv)
    else:
        raise TypeError(
            "Cannot safely infer the size of a multivariate random variable's moment. "
            f"Please provide a moment function when instantiating the {rv_name} "
            "random variable."
        )
示例#10
0
    def jacobian_det(self, rv_var, rv_value):
        if rv_var.broadcastable[-1]:
            # If this variable is just a bunch of scalars/degenerate
            # Dirichlets, we can't transform it
            return at.ones_like(rv_value)

        y = rv_value.T
        Km1 = y.shape[0] + 1
        sy = at.sum(y, 0, keepdims=True)
        r = at.concatenate([y + sy, at.zeros(sy.shape)])
        sr = logsumexp(r, 0, keepdims=True)
        d = at.log(Km1) + (Km1 * sy) - (Km1 * sr)
        return at.sum(d, 0).T
示例#11
0
def test_alltrue_scalar():
    assert alltrue_scalar([]).eval()
    assert alltrue_scalar([True]).eval()
    assert alltrue_scalar([at.ones(10)]).eval()
    assert alltrue_scalar([at.ones(10), 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.ones(10), 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.ones(10), True, 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.array([1, 2, 3]), True, 5 * at.ones(101)]).eval()

    assert not alltrue_scalar([False]).eval()
    assert not alltrue_scalar([at.zeros(10)]).eval()
    assert not alltrue_scalar([True, False]).eval()
    assert not alltrue_scalar([np.array([0, -1]), at.ones(60)]).eval()
    assert not alltrue_scalar([np.ones(10), False, 5 * at.ones(101)]).eval()
示例#12
0
    def test_inplace(self):
        """Make sure that in-place optimizations are *not* performed on the output of a ``BroadcastTo``."""
        a = aet.zeros((5, ))
        d = aet.vector("d")
        c = aet.set_subtensor(a[np.r_[0, 1, 3]], d)
        b = broadcast_to(c, (5, ))
        q = b[np.r_[0, 1, 3]]
        e = aet.set_subtensor(q, np.r_[0, 0, 0])

        opts = Query(include=["inplace"])
        py_mode = Mode("py", opts)
        e_fn = function([d], e, mode=py_mode)

        advincsub_node = e_fn.maker.fgraph.outputs[0].owner
        assert isinstance(advincsub_node.op, AdvancedIncSubtensor1)
        assert isinstance(advincsub_node.inputs[0].owner.op, BroadcastTo)

        assert advincsub_node.op.inplace is False
def test_Gpujoin_inplace():
    # Test Gpujoin to work inplace.
    #
    # This function tests the case when several elements are passed to the
    # Gpujoin function but all except one of them are empty. In this case
    # Gpujoin should work inplace and the output should be the view of the
    # non-empty element.
    s = tt.lscalar()
    data = np.array([3, 4, 5], dtype=aesara.config.floatX)
    x = gpuarray_shared_constructor(data, borrow=True)
    z = tt.zeros((s,))

    join = GpuJoin(view=0)
    c = join(0, x, z)

    f = aesara.function([s], aesara.Out(c, borrow=True))
    if not isinstance(mode_with_gpu, aesara.compile.DebugMode):
        assert x.get_value(borrow=True, return_internal_type=True) is f(0)
    assert np.allclose(f(0), [3, 4, 5])
示例#14
0
 def test_neibs_half_step_by_valid(self):
     neib_shapes = ((3, 3), (3, 5), (5, 3))
     for shp_idx, (shape, neib_step) in enumerate([
         [(7, 8, 5, 5), (1, 1)],
         [(7, 8, 5, 5), (2, 2)],
         [(7, 8, 5, 5), (4, 4)],
         [(7, 8, 5, 5), (1, 4)],
         [(7, 8, 5, 5), (4, 1)],
         [(80, 90, 5, 5), (1, 2)],
         [(1025, 9, 5, 5), (2, 1)],
         [(1, 1, 5, 1037), (2, 4)],
         [(1, 1, 1045, 5), (4, 2)],
     ]):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = aesara.shared(
                     np.random.standard_normal(shape).astype(dtype))
                 extra = (neib_shape[0] // 2, neib_shape[1] // 2)
                 padded_shape = (
                     x.shape[0],
                     x.shape[1],
                     x.shape[2] + 2 * extra[0],
                     x.shape[3] + 2 * extra[1],
                 )
                 padded_x = at.zeros(padded_shape)
                 padded_x = at.set_subtensor(
                     padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]],
                     x)
                 x_using_valid = images2neibs(padded_x,
                                              neib_shape,
                                              neib_step,
                                              mode="valid")
                 x_using_half = images2neibs(x,
                                             neib_shape,
                                             neib_step,
                                             mode="half")
                 f_valid = aesara.function([],
                                           x_using_valid,
                                           mode="FAST_RUN")
                 f_half = aesara.function([], x_using_half, mode=self.mode)
                 unittest_tools.assert_allclose(f_valid(), f_half())
示例#15
0
 def test_neibs_full_step_by_valid(self):
     for shp_idx, (shape, neib_step, neib_shapes) in enumerate([
         [(7, 8, 5, 5), (1, 1), ((3, 3), (3, 5), (5, 3))],
         [(7, 8, 5, 5), (2, 2), ((3, 3), (3, 5), (5, 3))],
         [(7, 8, 6, 6), (3, 3), ((2, 2), (2, 5), (5, 2))],
         [(7, 8, 6, 6), (1, 3), ((2, 2), (2, 5), (5, 2))],
         [(7, 8, 6, 6), (3, 1), ((2, 2), (2, 5), (5, 2))],
         [(80, 90, 5, 5), (1, 2), ((3, 3), (3, 5), (5, 3))],
         [(1025, 9, 5, 5), (2, 1), ((3, 3), (3, 5), (5, 3))],
         [(1, 1, 11, 1037), (2, 3), ((3, 3), (5, 3))],
         [(1, 1, 1043, 11), (3, 2), ((3, 3), (3, 5))],
     ]):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = aesara.shared(
                     np.random.standard_normal(shape).astype(dtype))
                 extra = (neib_shape[0] - 1, neib_shape[1] - 1)
                 padded_shape = (
                     x.shape[0],
                     x.shape[1],
                     x.shape[2] + 2 * extra[0],
                     x.shape[3] + 2 * extra[1],
                 )
                 padded_x = at.zeros(padded_shape)
                 padded_x = at.set_subtensor(
                     padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]],
                     x)
                 x_using_valid = images2neibs(padded_x,
                                              neib_shape,
                                              neib_step,
                                              mode="valid")
                 x_using_full = images2neibs(x,
                                             neib_shape,
                                             neib_step,
                                             mode="full")
                 f_valid = aesara.function([],
                                           x_using_valid,
                                           mode="FAST_RUN")
                 f_full = aesara.function([], x_using_full, mode=self.mode)
                 unittest_tools.assert_allclose(f_valid(), f_full())
示例#16
0
def test_bound():
    logp = at.ones((10, 10))
    cond = at.ones((10, 10))
    assert np.all(bound(logp, cond).eval() == logp.eval())

    logp = at.ones((10, 10))
    cond = at.zeros((10, 10))
    assert np.all(bound(logp, cond).eval() == (-np.inf * logp).eval())

    logp = at.ones((10, 10))
    cond = True
    assert np.all(bound(logp, cond).eval() == logp.eval())

    logp = at.ones(3)
    cond = np.array([1, 0, 1])
    assert not np.all(bound(logp, cond).eval() == 1)
    assert np.prod(bound(logp, cond).eval()) == -np.inf

    logp = at.ones((2, 3))
    cond = np.array([[1, 1, 1], [1, 0, 1]])
    assert not np.all(bound(logp, cond).eval() == 1)
    assert np.prod(bound(logp, cond).eval()) == -np.inf
示例#17
0
 def backward(self, y):
     x = aet.zeros(y.shape)
     x = aet.inc_subtensor(x[..., 0], y[..., 0])
     x = aet.inc_subtensor(x[..., 1:], aet.exp(y[..., 1:]))
     return aet.cumsum(x, axis=-1)
示例#18
0
 def forward(self, rv_var, rv_value):
     y = at.zeros(rv_value.shape)
     y = at.inc_subtensor(y[..., 0], rv_value[..., 0])
     y = at.inc_subtensor(y[..., 1:], at.log(rv_value[..., 1:] - rv_value[..., :-1]))
     return y
示例#19
0
 def jacobian_det(self, rv_var, rv_value):
     y = at.zeros(rv_value.shape)
     return at.sum(y, axis=-1)
示例#20
0
 def logdet(self):
     return aet.zeros((self.z0.shape[0], ))
示例#21
0
def test_alltrue_shape():
    vals = [True, at.ones(10), at.zeros(5)]

    assert alltrue_scalar(vals).eval().shape == ()
示例#22
0
 def jacobian_det(self, rv_var, rv_value):
     return at.zeros(rv_value.shape)
示例#23
0
@pytest.mark.parametrize(
    "M, sd, size",
    [
        (at.as_tensor_variable(np.array(1.0, dtype=config.floatX)), sd_at, ()),
        (
            at.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
            sd_at,
            (M_at, ),
        ),
        (
            at.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
            sd_at,
            (2, M_at),
        ),
        (at.zeros((M_at, )), sd_at, ()),
        (at.zeros((M_at, )), sd_at, (M_at, )),
        (at.zeros((M_at, )), sd_at, (2, M_at)),
        (at.zeros((M_at, )), at.ones((M_at, )), ()),
        (at.zeros((M_at, )), at.ones((M_at, )), (2, M_at)),
        (
            create_aesara_param(
                np.array([[-1, 20], [300, -4000]], dtype=config.floatX)),
            create_aesara_param(np.array([[1e-6, 2e-6]], dtype=config.floatX)),
            (3, 2, 2),
        ),
        (
            create_aesara_param(np.array([1], dtype=config.floatX)),
            create_aesara_param(np.array([10], dtype=config.floatX)),
            (1, 2),
        ),
示例#24
0
 def jacobian_det(self, x):
     return aet.zeros(x.shape)
示例#25
0
def test_TransMatConjugateStep_subtensors():

    # Confirm that Dirichlet/non-Dirichlet mixed rows can be
    # parsed
    with pm.Model():
        d_0_rv = pm.Dirichlet("p_0", np.r_[1, 1], shape=2)
        d_1_rv = pm.Dirichlet("p_1", np.r_[1, 1], shape=2)

        p_0_rv = at.as_tensor([0, 0, 1])
        p_1_rv = at.zeros(3)
        p_1_rv = at.set_subtensor(p_0_rv[[0, 2]], d_0_rv)
        p_2_rv = at.zeros(3)
        p_2_rv = at.set_subtensor(p_1_rv[[1, 2]], d_1_rv)

        P_tt = at.stack([p_0_rv, p_1_rv, p_2_rv])
        P_rv = pm.Deterministic("P_tt", at.shape_padleft(P_tt))
        DiscreteMarkovChain("S_t", P_rv, np.r_[1, 0, 0], shape=(10, ))

        transmat = TransMatConjugateStep(P_rv)

    assert transmat.row_remaps == {0: 1, 1: 2}
    exp_slices = {0: np.r_[0, 2], 1: np.r_[1, 2]}
    assert exp_slices.keys() == transmat.row_slices.keys()
    assert all(
        np.array_equal(transmat.row_slices[i], exp_slices[i])
        for i in exp_slices.keys())

    # Same thing, just with some manipulations of the transition matrix
    with pm.Model():
        d_0_rv = pm.Dirichlet("p_0", np.r_[1, 1], shape=2)
        d_1_rv = pm.Dirichlet("p_1", np.r_[1, 1], shape=2)

        p_0_rv = at.as_tensor([0, 0, 1])
        p_1_rv = at.zeros(3)
        p_1_rv = at.set_subtensor(p_0_rv[[0, 2]], d_0_rv)
        p_2_rv = at.zeros(3)
        p_2_rv = at.set_subtensor(p_1_rv[[1, 2]], d_1_rv)

        P_tt = at.horizontal_stack(p_0_rv[..., None], p_1_rv[..., None],
                                   p_2_rv[..., None])
        P_rv = pm.Deterministic("P_tt", at.shape_padleft(P_tt.T))
        DiscreteMarkovChain("S_t", P_rv, np.r_[1, 0, 0], shape=(10, ))

        transmat = TransMatConjugateStep(P_rv)

    assert transmat.row_remaps == {0: 1, 1: 2}
    exp_slices = {0: np.r_[0, 2], 1: np.r_[1, 2]}
    assert exp_slices.keys() == transmat.row_slices.keys()
    assert all(
        np.array_equal(transmat.row_slices[i], exp_slices[i])
        for i in exp_slices.keys())

    # Use an observed `DiscreteMarkovChain` and check the conjugate results
    with pm.Model():
        d_0_rv = pm.Dirichlet("p_0", np.r_[1, 1], shape=2)
        d_1_rv = pm.Dirichlet("p_1", np.r_[1, 1], shape=2)

        p_0_rv = at.as_tensor([0, 0, 1])
        p_1_rv = at.zeros(3)
        p_1_rv = at.set_subtensor(p_0_rv[[0, 2]], d_0_rv)
        p_2_rv = at.zeros(3)
        p_2_rv = at.set_subtensor(p_1_rv[[1, 2]], d_1_rv)

        P_tt = at.horizontal_stack(p_0_rv[..., None], p_1_rv[..., None],
                                   p_2_rv[..., None])
        P_rv = pm.Deterministic("P_tt", at.shape_padleft(P_tt.T))
        DiscreteMarkovChain("S_t",
                            P_rv,
                            np.r_[1, 0, 0],
                            shape=(4, ),
                            observed=np.r_[0, 1, 0, 2])

        transmat = TransMatConjugateStep(P_rv)
示例#26
0
 def jacobian_det(self, x):
     y = aet.zeros(x.shape)
     return aet.sum(y, axis=-1)
示例#27
0
from pymc.tests.checks import close_to
from pymc.tests.helpers import verify_grad


@pytest.mark.parametrize(
    "conditions, succeeds",
    [
        ([], True),
        ([True], True),
        ([at.ones(10)], True),
        ([at.ones(10), 5 * at.ones(101)], True),
        ([np.ones(10), 5 * at.ones(101)], True),
        ([np.ones(10), True, 5 * at.ones(101)], True),
        ([np.array([1, 2, 3]), True, 5 * at.ones(101)], True),
        ([False], False),
        ([at.zeros(10)], False),
        ([True, False], False),
        ([np.array([0, -1]), at.ones(60)], False),
        ([np.ones(10), False, 5 * at.ones(101)], False),
    ],
)
def test_check_parameters(conditions, succeeds):
    ret = check_parameters(1, *conditions, msg="parameter check msg")
    if succeeds:
        assert ret.eval()
    else:
        with pytest.raises(ParameterValueError, match="^parameter check msg$"):
            ret.eval()


def test_check_parameters_shape():
示例#28
0
文件: conv3d2d.py 项目: mgorny/aesara
def conv3d(signals,
           filters,
           signals_shape=None,
           filters_shape=None,
           border_mode="valid"):
    """
    Convolve spatio-temporal filters with a movie.

    It flips the filters.

    Parameters
    ----------
    signals
        Timeseries of images whose pixels have color channels.
        Shape: [Ns, Ts, C, Hs, Ws].
    filters
        Spatio-temporal filters.
        Shape: [Nf, Tf, C, Hf, Wf].
    signals_shape
        None or a tuple/list with the shape of signals.
    filters_shape
        None or a tuple/list with the shape of filters.
    border_mode
        One of 'valid', 'full' or 'half'.

    Notes
    -----
    Another way to define signals: (batch,  time, in channel, row, column)
    Another way to define filters: (out channel,time,in channel, row, column)

    See Also
    --------
    Someone made a script that shows how to swap the axes between
    both 3d convolution implementations in Aesara. See the last
    `attachment <https://groups.google.com/d/msg/aesara-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_

    """

    if isinstance(border_mode, str):
        border_mode = (border_mode, border_mode, border_mode)

    if signals_shape is None:
        _signals_shape_5d = signals.shape
    else:
        _signals_shape_5d = signals_shape

    if filters_shape is None:
        _filters_shape_5d = filters.shape
    else:
        _filters_shape_5d = filters_shape

    Ns, Ts, C, Hs, Ws = _signals_shape_5d
    Nf, Tf, C, Hf, Wf = _filters_shape_5d

    _signals_shape_4d = (Ns * Ts, C, Hs, Ws)
    _filters_shape_4d = (Nf * Tf, C, Hf, Wf)

    if border_mode[1] != border_mode[2]:
        raise NotImplementedError("height and width bordermodes must match")
    conv2d_signal_shape = _signals_shape_4d
    conv2d_filter_shape = _filters_shape_4d
    if signals_shape is None:
        conv2d_signal_shape = None
    if filters_shape is None:
        conv2d_filter_shape = None

    out_4d = aesara.tensor.nnet.conv2d(
        signals.reshape(_signals_shape_4d),
        filters.reshape(_filters_shape_4d),
        input_shape=conv2d_signal_shape,
        filter_shape=conv2d_filter_shape,
        border_mode=border_mode[1],
    )  # ignoring border_mode[2]

    # compute the intended output size
    if border_mode[1] == "valid":
        Hout = Hs - Hf + 1
        Wout = Ws - Wf + 1
    elif border_mode[1] == "full":
        Hout = Hs + Hf - 1
        Wout = Ws + Wf - 1
    elif border_mode[1] == "half":
        Hout = Hs - (Hf % 2) + 1
        Wout = Ws - (Wf % 2) + 1
    elif border_mode[1] == "same":
        raise NotImplementedError()
    else:
        raise ValueError("invalid border mode", border_mode[1])

    # reshape the temporary output to restore its original size
    out_tmp = out_4d.reshape((Ns, Ts, Nf, Tf, Hout, Wout))

    # now sum out along the Tf to get the output
    # but we have to sum on a diagonal through the Tf and Ts submatrix.
    if Tf == 1:
        # for Tf==1, no sum along Tf, the Ts-axis of the output is unchanged!
        out_5d = out_tmp.reshape((Ns, Ts, Nf, Hout, Wout))
    else:
        # for some types of convolution, pad out_tmp with zeros
        if border_mode[0] == "valid":
            Tpad = 0
        elif border_mode[0] == "full":
            Tpad = Tf - 1
        elif border_mode[0] == "half":
            Tpad = Tf // 2
        elif border_mode[0] == "same":
            raise NotImplementedError()
        else:
            raise ValueError("invalid border mode", border_mode[0])

        if Tpad == 0:
            out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
        else:
            # pad out_tmp with zeros before summing over the diagonal
            out_tmp_padded = at.zeros(dtype=out_tmp.dtype,
                                      shape=(Ns, Ts + 2 * Tpad, Nf, Tf, Hout,
                                             Wout))
            out_tmp_padded = aesara.tensor.subtensor.set_subtensor(
                out_tmp_padded[:, Tpad:(Ts + Tpad), :, :, :, :], out_tmp)
            out_5d = diagonal_subtensor(out_tmp_padded, 1, 3).sum(axis=3)

    return out_5d
示例#29
0
    if str(x.dtype).startswith("float"):
        x = floatX(x)
    return x


"""
Aesara derivative functions
"""


def gradient1(f, v):
    """flat gradient of f wrt v"""
    return at.flatten(grad(f, v, disconnected_inputs="warn"))


empty_gradient = at.zeros(0, dtype="float32")


def gradient(f, vars=None):
    if vars is None:
        vars = cont_inputs(f)

    if vars:
        return at.concatenate([gradient1(f, v) for v in vars], axis=0)
    else:
        return empty_gradient


def jacobian1(f, v):
    """jacobian of f wrt v"""
    f = at.flatten(f)
示例#30
0
 def forward(self, x):
     y = aet.zeros(x.shape)
     y = aet.inc_subtensor(y[..., 0], x[..., 0])
     y = aet.inc_subtensor(y[..., 1:], aet.log(x[..., 1:] - x[..., :-1]))
     return y