示例#1
0
def local_rv_size_lift(fgraph, node):
    """Lift the ``size`` parameter in a ``RandomVariable``.

    In other words, this will broadcast the distribution parameters by adding
    the extra dimensions implied by the ``size`` parameter, and remove the
    ``size`` parameter in the process.

    For example, ``normal(0, 1, size=(1, 2))`` becomes
    ``normal([[0, 0]], [[1, 1]], size=())``.

    """

    if not isinstance(node.op, RandomVariable):
        return

    rng, size, dtype, *dist_params = node.inputs

    dist_params = broadcast_params(dist_params, node.op.ndims_params)

    if get_vector_length(size) > 0:
        dist_params = [
            broadcast_to(p,
                         (tuple(size) +
                          tuple(p.shape)) if node.op.ndim_supp > 0 else size)
            for p in dist_params
        ]
    else:
        return

    new_node = node.op.make_node(rng, None, dtype, *dist_params)

    if config.compute_test_value != "off":
        compute_test_value(new_node)

    return new_node.outputs
示例#2
0
def lift_rv_shapes(node):
    """Lift `RandomVariable`'s shape-related parameters.

    In other words, this will broadcast the distribution parameters and
    extra dimensions added by the `size` parameter.

    For example, ``normal([0.0, 1.0], 5.0, size=(3, 2))`` becomes
    ``normal([[0., 1.], [0., 1.], [0., 1.]], [[5., 5.], [5., 5.], [5., 5.]])``.

    """

    if not isinstance(node.op, RandomVariable):
        return False

    rng, size, dtype, *dist_params = node.inputs

    dist_params = broadcast_params(dist_params, node.op.ndims_params)

    if get_vector_length(size) > 0:
        dist_params = [
            broadcast_to(p,
                         (tuple(size) +
                          tuple(p.shape)) if node.op.ndim_supp > 0 else size)
            for p in dist_params
        ]

    new_node = node.op.make_node(rng, None, dtype, *dist_params)

    if config.compute_test_value != "off":
        compute_test_value(new_node)

    return new_node
示例#3
0
文件: aesaraf.py 项目: bwengals/pymc3
def change_rv_size(
    rv: TensorVariable,
    new_size: PotentialShapeType,
    expand: Optional[bool] = False,
) -> TensorVariable:
    """Change or expand the size of a `RandomVariable`.

    Parameters
    ==========
    rv
        The old `RandomVariable` output.
    new_size
        The new size.
    expand:
        Expand the existing size by `new_size`.

    """
    # Check the dimensionality of the `new_size` kwarg
    new_size_ndim = np.ndim(new_size)
    if new_size_ndim > 1:
        raise ShapeError("The `new_size` must be ≤1-dimensional.",
                         actual=new_size_ndim)
    elif new_size_ndim == 0:
        new_size = (new_size, )

    # Extract the RV node that is to be resized, together with its inputs, name and tag
    assert rv.owner.op is not None
    if isinstance(rv.owner.op, SpecifyShape):
        rv = rv.owner.inputs[0]
    rv_node = rv.owner
    rng, size, dtype, *dist_params = rv_node.inputs
    name = rv.name
    tag = rv.tag

    if expand:
        shape = tuple(rv_node.op._infer_shape(size, dist_params))
        size = shape[:len(shape) - rv_node.op.ndim_supp]
        new_size = tuple(new_size) + tuple(size)

    # Make sure the new size is a tensor. This dtype-aware conversion helps
    # to not unnecessarily pick up a `Cast` in some cases (see #4652).
    new_size = at.as_tensor(new_size, ndim=1, dtype="int64")

    new_rv_node = rv_node.op.make_node(rng, new_size, dtype, *dist_params)
    new_rv = new_rv_node.outputs[-1]
    new_rv.name = name
    for k, v in tag.__dict__.items():
        new_rv.tag.__dict__.setdefault(k, v)

    # Update "traditional" rng default_update, if that was set for old RV
    default_update = getattr(rng, "default_update", None)
    if default_update is not None and default_update is rv_node.outputs[0]:
        rng.default_update = new_rv_node.outputs[0]

    if config.compute_test_value != "off":
        compute_test_value(new_rv_node)

    return new_rv
示例#4
0
def change_rv_size(
    rv_var: TensorVariable,
    new_size: PotentialShapeType,
    expand: Optional[bool] = False,
) -> TensorVariable:
    """Change or expand the size of a `RandomVariable`.

    Parameters
    ==========
    rv_var
        The `RandomVariable` output.
    new_size
        The new size.
    expand:
        Expand the existing size by `new_size`.

    """
    # Check the dimensionality of the `new_size` kwarg
    new_size_ndim = np.ndim(new_size)
    if new_size_ndim > 1:
        raise ShapeError("The `new_size` must be ≤1-dimensional.",
                         actual=new_size_ndim)
    elif new_size_ndim == 0:
        new_size = (new_size, )

    # Extract the RV node that is to be resized, together with its inputs, name and tag
    if isinstance(rv_var.owner.op, SpecifyShape):
        rv_var = rv_var.owner.inputs[0]
    rv_node = rv_var.owner
    rng, size, dtype, *dist_params = rv_node.inputs
    name = rv_var.name
    tag = rv_var.tag

    if expand:
        if rv_node.op.ndim_supp == 0 and at.get_vector_length(size) == 0:
            size = rv_node.op._infer_shape(size, dist_params)
        new_size = tuple(new_size) + tuple(size)

    # Make sure the new size is a tensor. This dtype-aware conversion helps
    # to not unnecessarily pick up a `Cast` in some cases (see #4652).
    new_size = at.as_tensor(new_size, ndim=1, dtype="int64")

    new_rv_node = rv_node.op.make_node(rng, new_size, dtype, *dist_params)
    rv_var = new_rv_node.outputs[-1]
    rv_var.name = name
    for k, v in tag.__dict__.items():
        rv_var.tag.__dict__.setdefault(k, v)

    if config.compute_test_value != "off":
        compute_test_value(new_rv_node)

    return rv_var
示例#5
0
def local_dimshuffle_rv_lift(fgraph, node):
    """Lift `DimShuffle`s through `RandomVariable` `Op`s.

    For example, ``normal(mu, std).T == normal(mu.T, std.T)``.

    The basic idea behind this optimization is that we need to separate the
    `DimShuffle`ing into independent `DimShuffle`s that each occur in two
    distinct sub-spaces: the parameters and ``size`` (i.e. replications)
    sub-spaces.

    If a `DimShuffle` exchanges dimensions across those two sub-spaces, then we
    don't do anything.

    Otherwise, if the `DimShuffle` only exchanges dimensions within each of
    those sub-spaces, we can break it apart and apply the parameter-space
    `DimShuffle` to the `RandomVariable`'s distribution parameters, and the
    apply the replications-space `DimShuffle` to the `RandomVariable`'s``size``
    tuple.  The latter is a particularly simple rearranging of a tuple, but the
    former requires a little more work.
    """

    ds_op = node.op

    if not isinstance(ds_op, DimShuffle):
        return False

    base_rv = node.inputs[0]
    rv_node = base_rv.owner

    if not (
        rv_node and isinstance(rv_node.op, RandomVariable) and rv_node.op.ndim_supp == 0
    ):
        return False

    # If no one else is using the underlying `RandomVariable`, then we can
    # do this; otherwise, the graph would be internally inconsistent.
    if not all(
        (n == node or isinstance(n.op, Shape)) for n, i in fgraph.clients[base_rv]
    ):
        return False

    rv_op = rv_node.op
    rng, size, dtype, *dist_params = rv_node.inputs

    # We need to know the dimensions that were *not* added by the `size`
    # parameter (i.e. the dimensions corresponding to independent variates with
    # different parameter values)
    num_ind_dims = None
    if len(dist_params) == 1:
        num_ind_dims = dist_params[0].ndim
    else:
        # When there is more than one distribution parameter, assume that all
        # of them will broadcast to the maximum number of dimensions
        num_ind_dims = max(d.ndim for d in dist_params)

    # If the indices in `ds_new_order` are entirely within the replication
    # indices group or the independent variates indices group, then we can apply
    # this optimization.

    ds_new_order = ds_op.new_order
    # Create a map from old index order to new/`DimShuffled` index order
    dim_orders = [(n, d) for n, d in enumerate(ds_new_order) if isinstance(d, int)]

    # Find the index at which the replications/independents split occurs
    reps_ind_split_idx = len(dim_orders) - (num_ind_dims + rv_op.ndim_supp)

    ds_reps_new_dims = dim_orders[:reps_ind_split_idx]
    ds_ind_new_dims = dim_orders[reps_ind_split_idx:]
    ds_only_in_ind = ds_ind_new_dims and all(
        d >= reps_ind_split_idx for n, d in ds_ind_new_dims
    )

    if ds_only_in_ind:

        # Update the `size` array to reflect the `DimShuffle`d dimensions,
        # since the trailing dimensions in `size` represent the independent
        # variates dimensions (for univariate distributions, at least)
        new_size = (
            [constant(1, dtype="int64") if o == "x" else size[o] for o in ds_new_order]
            if get_vector_length(size) > 0
            else size
        )

        # Compute the new axes parameter(s) for the `DimShuffle` that will be
        # applied to the `RandomVariable` parameters (they need to be offset)
        rv_params_new_order = [
            d - reps_ind_split_idx if isinstance(d, int) else d
            for d in ds_new_order[ds_ind_new_dims[0][0] :]
        ]

        # Lift the `DimShuffle`s into the parameters
        # NOTE: The parameters might not be broadcasted against each other, so
        # we can only apply the parts of the `DimShuffle` that are relevant.
        new_dist_params = []
        for d in dist_params:
            if d.ndim < len(ds_ind_new_dims):
                _rv_params_new_order = [
                    o
                    for o in rv_params_new_order
                    if (isinstance(o, int) and o < d.ndim) or o == "x"
                ]
            else:
                _rv_params_new_order = rv_params_new_order

            new_dist_params.append(
                type(ds_op)(d.type.broadcastable, _rv_params_new_order)(d)
            )
        new_node = rv_op.make_node(rng, new_size, dtype, *new_dist_params)

        if config.compute_test_value != "off":
            compute_test_value(new_node)

        return [new_node.outputs[1]]

    ds_only_in_reps = ds_reps_new_dims and all(
        d < reps_ind_split_idx for n, d in ds_reps_new_dims
    )

    if ds_only_in_reps:
        # Update the `size` array to reflect the `DimShuffle`d dimensions.
        # There should be no need to `DimShuffle` now.
        new_size = [
            constant(1, dtype="int64") if o == "x" else size[o] for o in ds_new_order
        ]

        new_node = rv_op.make_node(rng, new_size, dtype, *dist_params)

        if config.compute_test_value != "off":
            compute_test_value(new_node)

        return [new_node.outputs[1]]

    return False
示例#6
0
def local_subtensor_rv_lift(fgraph, node):
    """Lift ``*Subtensor`` `Op`s up to a `RandomVariable`'s parameters.

    In a fashion similar to `local_dimshuffle_rv_lift`, the indexed dimensions
    need to be separated into distinct replication-space and (independent)
    parameter-space ``*Subtensor``s.

    The replication-space ``*Subtensor`` can be used to determine a
    sub/super-set of the replication-space and, thus, a "smaller"/"larger"
    ``size`` tuple.  The parameter-space ``*Subtensor`` is simply lifted and
    applied to the `RandomVariable`'s distribution parameters.

    Consider the following example graph:
    ``normal(mu, std, size=(d1, d2, d3))[idx1, idx2, idx3]``.  The
    ``*Subtensor`` `Op` requests indices ``idx1``, ``idx2``, and ``idx3``,
    which correspond to all three ``size`` dimensions.  Now, depending on the
    broadcasted dimensions of ``mu`` and ``std``, this ``*Subtensor`` `Op`
    could be reducing the ``size`` parameter and/or subsetting the independent
    ``mu`` and ``std`` parameters.  Only once the dimensions are properly
    separated into the two replication/parameter subspaces can we determine how
    the ``*Subtensor`` indices are distributed.
    For instance, ``normal(mu, std, size=(d1, d2, d3))[idx1, idx2, idx3]``
    could become ``normal(mu[idx1], std[idx2], size=np.shape(idx1) + np.shape(idx2) + np.shape(idx3))``
    if ``mu.shape == std.shape == ()``

    ``normal`` is a rather simple case, because it's univariate.  Multivariate
    cases require a mapping between the parameter space and the image of the
    random variable.  This may not always be possible, but for many common
    distributions it is.  For example, the dimensions of the multivariate
    normal's image can be mapped directly to each dimension of its parameters.
    We use these mappings to change a graph like ``multivariate_normal(mu, Sigma)[idx1]``
    into ``multivariate_normal(mu[idx1], Sigma[idx1, idx1])``.  Notice how

    Also, there's the important matter of "advanced" indexing, which may not
    only subset an array, but also broadcast it to a larger size.

    """

    st_op = node.op

    if not isinstance(st_op, (AdvancedSubtensor, AdvancedSubtensor1, Subtensor)):
        return False

    base_rv = node.inputs[0]

    rv_node = base_rv.owner
    if not (rv_node and isinstance(rv_node.op, RandomVariable)):
        return False

    # If no one else is using the underlying `RandomVariable`, then we can
    # do this; otherwise, the graph would be internally inconsistent.
    if not all(
        (n == node or isinstance(n.op, Shape)) for n, i in fgraph.clients[base_rv]
    ):
        return False

    rv_op = rv_node.op
    rng, size, dtype, *dist_params = rv_node.inputs

    # TODO: Remove this once the multi-dimensional changes described below are
    # in place.
    if rv_op.ndim_supp > 0:
        return False

    rv_op = base_rv.owner.op
    rng, size, dtype, *dist_params = base_rv.owner.inputs

    idx_list = getattr(st_op, "idx_list", None)
    if idx_list:
        cdata = get_idx_list(node.inputs, idx_list)
    else:
        cdata = node.inputs[1:]

    st_indices, st_is_bool = zip(
        *tuple(
            (as_index_variable(i), getattr(i, "dtype", None) == "bool") for i in cdata
        )
    )

    # We need to separate dimensions into replications and independents
    num_ind_dims = None
    if len(dist_params) == 1:
        num_ind_dims = dist_params[0].ndim
    else:
        # When there is more than one distribution parameter, assume that all
        # of them will broadcast to the maximum number of dimensions
        num_ind_dims = max(d.ndim for d in dist_params)

    reps_ind_split_idx = base_rv.ndim - (num_ind_dims + rv_op.ndim_supp)

    if len(st_indices) > reps_ind_split_idx:
        # These are the indices that need to be applied to the parameters
        ind_indices = tuple(st_indices[reps_ind_split_idx:])

        # We need to broadcast the parameters before applying the `*Subtensor*`
        # with these indices, because the indices could be referencing broadcast
        # dimensions that don't exist (yet)
        bcast_dist_params = broadcast_params(dist_params, rv_op.ndims_params)

        # TODO: For multidimensional distributions, we need a map that tells us
        # which dimensions of the parameters need to be indexed.
        #
        # For example, `multivariate_normal` would have the following:
        # `RandomVariable.param_to_image_dims = ((0,), (0, 1))`
        #
        # I.e. the first parameter's (i.e. mean's) first dimension maps directly to
        # the dimension of the RV's image, and its second parameter's
        # (i.e. covariance's) first and second dimensions map directly to the
        # dimension of the RV's image.

        args_lifted = tuple(p[ind_indices] for p in bcast_dist_params)
    else:
        # In this case, no indexing is applied to the parameters; only the
        # `size` parameter is affected.
        args_lifted = dist_params

    # TODO: Could use `ShapeFeature` info.  We would need to be sure that
    # `node` isn't in the results, though.
    # if hasattr(fgraph, "shape_feature"):
    #     output_shape = fgraph.shape_feature.shape_of(node.outputs[0])
    # else:
    output_shape = indexed_result_shape(base_rv.shape, st_indices)

    size_lifted = (
        output_shape if rv_op.ndim_supp == 0 else output_shape[: -rv_op.ndim_supp]
    )

    # Boolean indices can actually change the `size` value (compared to just
    # *which* dimensions of `size` are used).
    if any(st_is_bool):
        size_lifted = tuple(
            tt_sum(idx) if is_bool else s
            for s, is_bool, idx in zip(
                size_lifted, st_is_bool, st_indices[: (reps_ind_split_idx + 1)]
            )
        )

    new_node = rv_op.make_node(rng, size_lifted, dtype, *args_lifted)
    _, new_rv = new_node.outputs

    # Calling `Op.make_node` directly circumvents test value computations, so
    # we need to compute the test values manually
    if config.compute_test_value != "off":
        compute_test_value(new_node)

    return [new_rv]
示例#7
0
def logpt(
    var: TensorVariable,
    rv_values: Optional[Union[TensorVariable, Dict[TensorVariable,
                                                   TensorVariable]]] = None,
    *,
    jacobian: bool = True,
    scaling: bool = True,
    transformed: bool = True,
    sum: bool = True,
    **kwargs,
) -> TensorVariable:
    """Create a measure-space (i.e. log-likelihood) graph for a random variable
    or a list of random variables at a given point.

    The input `var` determines which log-likelihood graph is used and
    `rv_value` is that graph's input parameter.  For example, if `var` is
    the output of a ``NormalRV`` ``Op``, then the output is a graph of the
    density function for `var` set to the value `rv_value`.

    Parameters
    ==========
    var
        The `RandomVariable` output that determines the log-likelihood graph.
        Can also be a list of variables. The final log-likelihood graph will
        be the sum total of all individual log-likelihood graphs of variables
        in the list.
    rv_values
        A variable, or ``dict`` of variables, that represents the value of
        `var` in its log-likelihood.  If no `rv_value` is provided,
        ``var.tag.value_var`` will be checked and, when available, used.
    jacobian
        Whether or not to include the Jacobian term.
    scaling
        A scaling term to apply to the generated log-likelihood graph.
    transformed
        Apply transforms.
    sum
        Sum the log-likelihood.

    """
    # TODO: In future when we drop support for tag.value_var most of the following
    # logic can be removed and logpt can just be a wrapper function that calls aeppl's
    # joint_logprob directly.

    # If var is not a list make it one.
    if not isinstance(var, list):
        var = [var]

    # If logpt isn't provided values and the variable (provided in var)
    # is an RV, it is assumed that the tagged value var or observation is
    # the value variable for that particular RV.
    if rv_values is None:
        rv_values = {}
        for _var in var:
            if isinstance(_var.owner.op, RandomVariable):
                rv_value_var = getattr(_var.tag, "observations",
                                       getattr(_var.tag, "value_var", _var))
                rv_values = {_var: rv_value_var}
    elif not isinstance(rv_values, Mapping):
        # Else if we're given a single value and a single variable we assume a mapping among them.
        rv_values = ({
            var[0]:
            at.as_tensor_variable(rv_values).astype(var[0].type)
        } if len(var) == 1 else {})

    # Since the filtering of logp graph is based on value variables
    # provided to this function
    if not rv_values:
        warnings.warn(
            "No value variables provided the logp will be an empty graph")

    if scaling:
        rv_scalings = {}
        for _var in var:
            rv_value_var = getattr(_var.tag, "observations",
                                   getattr(_var.tag, "value_var", _var))
            rv_scalings[rv_value_var] = _get_scaling(
                getattr(_var.tag, "total_size", None), rv_value_var.shape,
                rv_value_var.ndim)

    # Aeppl needs all rv-values pairs, not just that of the requested var.
    # Hence we iterate through the graph to collect them.
    tmp_rvs_to_values = rv_values.copy()
    transform_map = {}
    for node in io_toposort(graph_inputs(var), var):
        try:
            curr_vars = [node.default_output()]
        except ValueError:
            curr_vars = node.outputs
        for curr_var in curr_vars:
            rv_value_var = getattr(curr_var.tag, "observations",
                                   getattr(curr_var.tag, "value_var", None))
            if rv_value_var is None:
                continue
            rv_value = rv_values.get(curr_var, rv_value_var)
            tmp_rvs_to_values[curr_var] = rv_value
            # Along with value variables we also check for transforms if any.
            if hasattr(rv_value_var.tag, "transform") and transformed:
                transform_map[rv_value] = rv_value_var.tag.transform

    transform_opt = TransformValuesOpt(transform_map)
    temp_logp_var_dict = factorized_joint_logprob(tmp_rvs_to_values,
                                                  extra_rewrites=transform_opt,
                                                  use_jacobian=jacobian,
                                                  **kwargs)

    # aeppl returns the logpt for every single value term we provided to it. This includes
    # the extra values we plugged in above so we need to filter those out.
    logp_var_dict = {}
    for value_var, _logp in temp_logp_var_dict.items():
        if value_var in rv_values.values():
            logp_var_dict[value_var] = _logp

    # If it's an empty dictionary the logp is None
    if not logp_var_dict:
        logp_var = None
    else:
        # Otherwise apply appropriate scalings and at.add and/or at.sum the
        # graphs accordingly.
        if scaling:
            for _value in logp_var_dict.keys():
                if _value in rv_scalings:
                    logp_var_dict[_value] *= rv_scalings[_value]

        if len(logp_var_dict) == 1:
            logp_var_dict = tuple(logp_var_dict.values())[0]
            if sum:
                logp_var = at.sum(logp_var_dict)
            else:
                logp_var = logp_var_dict
        else:
            if sum:
                logp_var = at.sum(
                    [at.sum(factor) for factor in logp_var_dict.values()])
            else:
                logp_var = at.add(*logp_var_dict.values())

        # Recompute test values for the changes introduced by the replacements
        # above.
        if config.compute_test_value != "off":
            for node in io_toposort(graph_inputs((logp_var, )), (logp_var, )):
                compute_test_value(node)

    return logp_var
示例#8
0
def logcdfpt(
    var: TensorVariable,
    rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None,
    *,
    scaling: bool = True,
    sum: bool = True,
    **kwargs,
) -> TensorVariable:
    """Create a measure-space (i.e. log-cdf) graph for a random variable at a given point.

    Parameters
    ==========
    var
        The `RandomVariable` output that determines the log-likelihood graph.
    rv_values
        A variable, or ``dict`` of variables, that represents the value of
        `var` in its log-likelihood.  If no `rv_value` is provided,
        ``var.tag.value_var`` will be checked and, when available, used.
    jacobian
        Whether or not to include the Jacobian term.
    scaling
        A scaling term to apply to the generated log-likelihood graph.
    transformed
        Apply transforms.
    sum
        Sum the log-likelihood.

    """
    if not isinstance(rv_values, Mapping):
        rv_values = {var: rv_values} if rv_values is not None else {}

    rv_var, rv_value_var = extract_rv_and_value_vars(var)

    rv_value = rv_values.get(rv_var, rv_value_var)

    if rv_var is not None and rv_value is None:
        raise ValueError(f"No value variable specified or associated with {rv_var}")

    if rv_value is not None:
        rv_value = at.as_tensor(rv_value)

        if rv_var is not None:
            # Make sure that the value is compatible with the random variable
            rv_value = rv_var.type.filter_variable(rv_value.astype(rv_var.dtype))

        if rv_value_var is None:
            rv_value_var = rv_value

    rv_node = rv_var.owner

    rng, size, dtype, *dist_params = rv_node.inputs

    # Here, we plug the actual random variable into the log-likelihood graph,
    # because we want a log-likelihood graph that only contains
    # random variables.  This is important, because a random variable's
    # parameters can contain random variables themselves.
    # Ultimately, with a graph containing only random variables and
    # "deterministics", we can simply replace all the random variables with
    # their value variables and be done.
    tmp_rv_values = rv_values.copy()
    tmp_rv_values[rv_var] = rv_var

    logp_var = _logcdf(rv_node.op, rv_var, tmp_rv_values, *dist_params, **kwargs)

    transform = getattr(rv_value_var.tag, "transform", None) if rv_value_var else None

    # Replace random variables with their value variables
    replacements = rv_values.copy()
    replacements.update({rv_var: rv_value, rv_value_var: rv_value})

    (logp_var,), _ = rvs_to_value_vars(
        (logp_var,),
        apply_transforms=False,
        initial_replacements=replacements,
    )

    if sum:
        logp_var = at.sum(logp_var)

    if scaling:
        logp_var *= _get_scaling(
            getattr(rv_var.tag, "total_size", None), rv_value.shape, rv_value.ndim
        )

    # Recompute test values for the changes introduced by the replacements
    # above.
    if config.compute_test_value != "off":
        for node in io_toposort(graph_inputs((logp_var,)), (logp_var,)):
            compute_test_value(node)

    if rv_var.name is not None:
        logp_var.name = f"__logp_{rv_var.name}"

    return logp_var
示例#9
0
def logpt(
    var: TensorVariable,
    rv_values: Optional[Union[TensorVariable, Dict[TensorVariable,
                                                   TensorVariable]]] = None,
    *,
    jacobian: bool = True,
    scaling: bool = True,
    transformed: bool = True,
    cdf: bool = False,
    sum: bool = False,
    **kwargs,
) -> TensorVariable:
    """Create a measure-space (i.e. log-likelihood) graph for a random variable at a given point.

    The input `var` determines which log-likelihood graph is used and
    `rv_value` is that graph's input parameter.  For example, if `var` is
    the output of a ``NormalRV`` ``Op``, then the output is a graph of the
    density function for `var` set to the value `rv_value`.

    Parameters
    ==========
    var
        The `RandomVariable` output that determines the log-likelihood graph.
    rv_values
        A variable, or ``dict`` of variables, that represents the value of
        `var` in its log-likelihood.  If no `rv_value` is provided,
        ``var.tag.value_var`` will be checked and, when available, used.
    jacobian
        Whether or not to include the Jacobian term.
    scaling
        A scaling term to apply to the generated log-likelihood graph.
    transformed
        Apply transforms.
    cdf
        Return the log cumulative distribution.
    sum
        Sum the log-likelihood.

    """
    if not isinstance(rv_values, Mapping):
        rv_values = {var: rv_values} if rv_values is not None else {}

    rv_var, rv_value_var = extract_rv_and_value_vars(var)

    rv_value = rv_values.get(rv_var, rv_value_var)

    if rv_var is not None and rv_value is None:
        raise ValueError(
            f"No value variable specified or associated with {rv_var}")

    if rv_value is not None:
        rv_value = at.as_tensor(rv_value)

        if rv_var is not None:
            # Make sure that the value is compatible with the random variable
            rv_value = rv_var.type.filter_variable(
                rv_value.astype(rv_var.dtype))

        if rv_value_var is None:
            rv_value_var = rv_value

    if rv_var is None:
        if var.owner is not None:
            return _logp(
                var.owner.op,
                var,
                rv_values,
                *var.owner.inputs,
                jacobian=jacobian,
                scaling=scaling,
                transformed=transformed,
                cdf=cdf,
                sum=sum,
            )

        return at.zeros_like(var)

    rv_node = rv_var.owner

    rng, size, dtype, *dist_params = rv_node.inputs

    # Here, we plug the actual random variable into the log-likelihood graph,
    # because we want a log-likelihood graph that only contains
    # random variables.  This is important, because a random variable's
    # parameters can contain random variables themselves.
    # Ultimately, with a graph containing only random variables and
    # "deterministics", we can simply replace all the random variables with
    # their value variables and be done.
    tmp_rv_values = rv_values.copy()
    tmp_rv_values[rv_var] = rv_var

    if not cdf:
        logp_var = _logp(rv_node.op, rv_var, tmp_rv_values, *dist_params,
                         **kwargs)
    else:
        logp_var = _logcdf(rv_node.op, rv_var, tmp_rv_values, *dist_params,
                           **kwargs)

    transform = getattr(rv_value_var.tag, "transform",
                        None) if rv_value_var else None

    if transform and transformed and not cdf and jacobian:
        transformed_jacobian = transform.jacobian_det(rv_var, rv_value)
        if transformed_jacobian:
            if logp_var.ndim > transformed_jacobian.ndim:
                logp_var = logp_var.sum(axis=-1)
            logp_var += transformed_jacobian

    # Replace random variables with their value variables
    replacements = rv_values.copy()
    replacements.update({rv_var: rv_value, rv_value_var: rv_value})

    (logp_var, ), _ = rvs_to_value_vars(
        (logp_var, ),
        apply_transforms=transformed and not cdf,
        initial_replacements=replacements,
    )

    if sum:
        logp_var = at.sum(logp_var)

    if scaling:
        logp_var *= _get_scaling(getattr(rv_var.tag, "total_size", None),
                                 rv_value.shape, rv_value.ndim)

    # Recompute test values for the changes introduced by the replacements
    # above.
    if config.compute_test_value != "off":
        for node in io_toposort(graph_inputs((logp_var, )), (logp_var, )):
            compute_test_value(node)

    if rv_var.name is not None:
        logp_var.name = "__logp_%s" % rv_var.name

    return logp_var