Пример #1
0
def lift_rv_shapes(node):
    """Lift `RandomVariable`'s shape-related parameters.

    In other words, this will broadcast the distribution parameters and
    extra dimensions added by the `size` parameter.

    For example, ``normal([0.0, 1.0], 5.0, size=(3, 2))`` becomes
    ``normal([[0., 1.], [0., 1.], [0., 1.]], [[5., 5.], [5., 5.], [5., 5.]])``.

    """

    if not isinstance(node.op, RandomVariable):
        return False

    rng, size, dtype, *dist_params = node.inputs

    dist_params = broadcast_params(dist_params, node.op.ndims_params)

    dist_params = [
        broadcast_to(
            p, (tuple(size) + tuple(p.shape)) if node.op.ndim_supp > 0 else size
        )
        for p in dist_params
    ]

    return node.op.make_node(rng, None, dtype, *dist_params)
Пример #2
0
    def rng_fn(cls, rng, mean, cov, size):

        if mean.ndim > 1 or cov.ndim > 2:
            # Neither SciPy nor NumPy implement parameter broadcasting for
            # multivariate normals (or any other multivariate distributions),
            # so we need to implement that here
            mean, cov = broadcast_params([mean, cov], cls.ndims_params)
            size = tuple(size or ())

            if size:
                if (0 < mean.ndim - 1 <= len(size)
                        and size[-mean.ndim + 1:] != mean.shape[:-1]):
                    raise ValueError(
                        "shape mismatch: objects cannot be broadcast to a single shape"
                    )
                mean = np.broadcast_to(mean, size + mean.shape[-1:])
                cov = np.broadcast_to(cov, size + cov.shape[-2:])

            res = np.empty(mean.shape)
            for idx in np.ndindex(mean.shape[:-1]):
                m = mean[idx]
                c = cov[idx]
                res[idx] = safe_multivariate_normal(m, c, rng=rng)
            return res
        else:
            return safe_multivariate_normal(mean, cov, size=size, rng=rng)
Пример #3
0
def local_rv_size_lift(fgraph, node):
    """Lift the ``size`` parameter in a ``RandomVariable``.

    In other words, this will broadcast the distribution parameters by adding
    the extra dimensions implied by the ``size`` parameter, and remove the
    ``size`` parameter in the process.

    For example, ``normal(0, 1, size=(1, 2))`` becomes
    ``normal([[0, 0]], [[1, 1]], size=())``.

    """

    if not isinstance(node.op, RandomVariable):
        return

    rng, size, dtype, *dist_params = node.inputs

    dist_params = broadcast_params(dist_params, node.op.ndims_params)

    if get_vector_length(size) > 0:
        dist_params = [
            broadcast_to(p,
                         (tuple(size) +
                          tuple(p.shape)) if node.op.ndim_supp > 0 else size)
            for p in dist_params
        ]
    else:
        return

    new_node = node.op.make_node(rng, None, dtype, *dist_params)

    if config.compute_test_value != "off":
        compute_test_value(new_node)

    return new_node.outputs
Пример #4
0
    def rng_fn(cls, rng, n, p, size):
        if n.ndim > 0 or p.ndim > 1:
            n, p = broadcast_params([n, p], cls.ndims_params)
            size = tuple(size or ())

            if size:
                n = np.broadcast_to(n, size + n.shape)
                p = np.broadcast_to(p, size + p.shape)

            res = np.empty(p.shape)
            for idx in np.ndindex(p.shape[:-1]):
                res[idx] = rng.multinomial(n[idx], p[idx])
            return res
        else:
            return rng.multinomial(n, p, size=size)
Пример #5
0
    def rng_fn(cls, rng, n, p, size):
        if n.ndim > 0 or p.ndim > 1:
            n, p = broadcast_params([n, p], cls.ndims_params)
            size = tuple(size or ())

            if size:
                if 0 < p.ndim - 1 <= len(size) and size[-p.ndim +
                                                        1:] != p.shape[:-1]:
                    raise ValueError(
                        "shape mismatch: objects cannot be broadcast to a single shape"
                    )
                n = np.broadcast_to(n, size)
                p = np.broadcast_to(p, size + p.shape[-1:])

            res = np.empty(p.shape, dtype=cls.dtype)
            for idx in np.ndindex(p.shape[:-1]):
                res[idx] = rng.multinomial(n[idx], p[idx])
            return res
        else:
            return rng.multinomial(n, p, size=size)
Пример #6
0
    def rng_fn(cls, rng, mean, cov, size):

        if mean.ndim > 1 or cov.ndim > 2:
            # Neither SciPy nor NumPy implement parameter broadcasting for
            # multivariate normals (or many other multivariate distributions),
            # so we have implement a quick and dirty one here
            mean, cov = broadcast_params([mean, cov], cls.ndims_params)
            size = tuple(size or ())

            if size:
                mean = np.broadcast_to(mean, size + mean.shape)
                cov = np.broadcast_to(cov, size + cov.shape)

            res = np.empty(mean.shape)
            for idx in np.ndindex(mean.shape[:-1]):
                m = mean[idx]
                c = cov[idx]
                res[idx] = safe_multivariate_normal(m, c, rng=rng)
            return res
        else:
            return safe_multivariate_normal(mean, cov, size=size, rng=rng)
Пример #7
0
def local_subtensor_rv_lift(fgraph, node):
    """Lift ``*Subtensor`` `Op`s up to a `RandomVariable`'s parameters.

    In a fashion similar to `local_dimshuffle_rv_lift`, the indexed dimensions
    need to be separated into distinct replication-space and (independent)
    parameter-space ``*Subtensor``s.

    The replication-space ``*Subtensor`` can be used to determine a
    sub/super-set of the replication-space and, thus, a "smaller"/"larger"
    ``size`` tuple.  The parameter-space ``*Subtensor`` is simply lifted and
    applied to the `RandomVariable`'s distribution parameters.

    Consider the following example graph:
    ``normal(mu, std, size=(d1, d2, d3))[idx1, idx2, idx3]``.  The
    ``*Subtensor`` `Op` requests indices ``idx1``, ``idx2``, and ``idx3``,
    which correspond to all three ``size`` dimensions.  Now, depending on the
    broadcasted dimensions of ``mu`` and ``std``, this ``*Subtensor`` `Op`
    could be reducing the ``size`` parameter and/or subsetting the independent
    ``mu`` and ``std`` parameters.  Only once the dimensions are properly
    separated into the two replication/parameter subspaces can we determine how
    the ``*Subtensor`` indices are distributed.
    For instance, ``normal(mu, std, size=(d1, d2, d3))[idx1, idx2, idx3]``
    could become ``normal(mu[idx1], std[idx2], size=np.shape(idx1) + np.shape(idx2) + np.shape(idx3))``
    if ``mu.shape == std.shape == ()``

    ``normal`` is a rather simple case, because it's univariate.  Multivariate
    cases require a mapping between the parameter space and the image of the
    random variable.  This may not always be possible, but for many common
    distributions it is.  For example, the dimensions of the multivariate
    normal's image can be mapped directly to each dimension of its parameters.
    We use these mappings to change a graph like ``multivariate_normal(mu, Sigma)[idx1]``
    into ``multivariate_normal(mu[idx1], Sigma[idx1, idx1])``.  Notice how

    Also, there's the important matter of "advanced" indexing, which may not
    only subset an array, but also broadcast it to a larger size.

    """

    st_op = node.op

    if not isinstance(st_op, (AdvancedSubtensor, AdvancedSubtensor1, Subtensor)):
        return False

    base_rv = node.inputs[0]

    rv_node = base_rv.owner
    if not (rv_node and isinstance(rv_node.op, RandomVariable)):
        return False

    # If no one else is using the underlying `RandomVariable`, then we can
    # do this; otherwise, the graph would be internally inconsistent.
    if not all(
        (n == node or isinstance(n.op, Shape)) for n, i in fgraph.clients[base_rv]
    ):
        return False

    rv_op = rv_node.op
    rng, size, dtype, *dist_params = rv_node.inputs

    # TODO: Remove this once the multi-dimensional changes described below are
    # in place.
    if rv_op.ndim_supp > 0:
        return False

    rv_op = base_rv.owner.op
    rng, size, dtype, *dist_params = base_rv.owner.inputs

    idx_list = getattr(st_op, "idx_list", None)
    if idx_list:
        cdata = get_idx_list(node.inputs, idx_list)
    else:
        cdata = node.inputs[1:]

    st_indices, st_is_bool = zip(
        *tuple(
            (as_index_variable(i), getattr(i, "dtype", None) == "bool") for i in cdata
        )
    )

    # We need to separate dimensions into replications and independents
    num_ind_dims = None
    if len(dist_params) == 1:
        num_ind_dims = dist_params[0].ndim
    else:
        # When there is more than one distribution parameter, assume that all
        # of them will broadcast to the maximum number of dimensions
        num_ind_dims = max(d.ndim for d in dist_params)

    reps_ind_split_idx = base_rv.ndim - (num_ind_dims + rv_op.ndim_supp)

    if len(st_indices) > reps_ind_split_idx:
        # These are the indices that need to be applied to the parameters
        ind_indices = tuple(st_indices[reps_ind_split_idx:])

        # We need to broadcast the parameters before applying the `*Subtensor*`
        # with these indices, because the indices could be referencing broadcast
        # dimensions that don't exist (yet)
        bcast_dist_params = broadcast_params(dist_params, rv_op.ndims_params)

        # TODO: For multidimensional distributions, we need a map that tells us
        # which dimensions of the parameters need to be indexed.
        #
        # For example, `multivariate_normal` would have the following:
        # `RandomVariable.param_to_image_dims = ((0,), (0, 1))`
        #
        # I.e. the first parameter's (i.e. mean's) first dimension maps directly to
        # the dimension of the RV's image, and its second parameter's
        # (i.e. covariance's) first and second dimensions map directly to the
        # dimension of the RV's image.

        args_lifted = tuple(p[ind_indices] for p in bcast_dist_params)
    else:
        # In this case, no indexing is applied to the parameters; only the
        # `size` parameter is affected.
        args_lifted = dist_params

    # TODO: Could use `ShapeFeature` info.  We would need to be sure that
    # `node` isn't in the results, though.
    # if hasattr(fgraph, "shape_feature"):
    #     output_shape = fgraph.shape_feature.shape_of(node.outputs[0])
    # else:
    output_shape = indexed_result_shape(base_rv.shape, st_indices)

    size_lifted = (
        output_shape if rv_op.ndim_supp == 0 else output_shape[: -rv_op.ndim_supp]
    )

    # Boolean indices can actually change the `size` value (compared to just
    # *which* dimensions of `size` are used).
    if any(st_is_bool):
        size_lifted = tuple(
            tt_sum(idx) if is_bool else s
            for s, is_bool, idx in zip(
                size_lifted, st_is_bool, st_indices[: (reps_ind_split_idx + 1)]
            )
        )

    new_node = rv_op.make_node(rng, size_lifted, dtype, *args_lifted)
    _, new_rv = new_node.outputs

    # Calling `Op.make_node` directly circumvents test value computations, so
    # we need to compute the test values manually
    if config.compute_test_value != "off":
        compute_test_value(new_node)

    return [new_rv]
Пример #8
0
def test_broadcast_params():

    ndims_params = [0, 0]

    mean = np.array([0, 1, 2])
    cov = np.array(1e-6)
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (3, )))

    ndims_params = [1, 2]

    mean = np.r_[1, 2, 3]
    cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], np.broadcast_to(mean, (2, 3)))
    assert np.array_equal(res[1], cov)

    mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])
    cov = np.arange(3 * 3).reshape((3, 3))
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (2, 3, 3)))

    mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])
    cov = np.stack([
        np.arange(3 * 3).reshape((3, 3)),
        np.arange(3 * 3).reshape((3, 3)) * 10
    ])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], cov)

    mean = np.array([[1, 2, 3]])
    cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], np.array([[1, 2, 3], [1, 2, 3]]))
    assert np.array_equal(res[1], cov)

    mean = np.array([[0], [10], [100]])
    cov = np.diag(np.array([1e-6]))
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (3, 1, 1)))

    # Try it in Aesara
    with config.change_flags(compute_test_value="raise"):
        mean = tensor(config.floatX, [False, True])
        mean.tag.test_value = np.array([[0], [10], [100]], dtype=config.floatX)
        cov = matrix()
        cov.tag.test_value = np.diag(np.array([1e-6], dtype=config.floatX))
        params = [mean, cov]
        res = broadcast_params(params, ndims_params)
        assert np.array_equal(res[0].get_test_value(), mean.get_test_value())
        assert np.array_equal(res[1].get_test_value(),
                              np.broadcast_to(cov.get_test_value(), (3, 1, 1)))