Example #1
0
def test_distribution_constraint1():
    """
    Test the xor distribution.
    """
    d = Xor()
    ad = get_abstract_dist(d)
    A, b = distribution_constraint([0], [1], ad)
    true_A = np.array([[0, 0, 1, 1, -1, -1, 0, 0], [0, 0, -1, -1, 1, 1, 0, 0]])
    true_b = np.array([0, 0, 0, 0, 0, 0, 0, 0])
    assert (A == true_A).all()
    assert (b == true_b).all()
Example #2
0
def test_distribution_constraint1():
    """
    Test the xor distribution.
    """
    d = Xor()
    ad = get_abstract_dist(d)
    A, b = distribution_constraint([0], [1], ad)
    true_A = np.array([[0, 0, 1, 1, -1, -1, 0, 0],
                       [0, 0, -1, -1, 1, 1, 0, 0]])
    true_b = np.array([0, 0, 0, 0, 0, 0, 0, 0])
    assert (A == true_A).all()
    assert (b == true_b).all()
Example #3
0
def isolate_zeros_generic(dist, rvs):
    """
    Determines if there are any elements of the optimization vector that must
    be zero.

    If p(marginal) = 0, then every component of the joint that contributes to
    that marginal probability must be exactly zero for all feasible solutions.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    rvs_, indexes = parse_rvs(dist, set(flatten(rvs)), unique=True, sort=True)
    rvs = [[indexes[rvs_.index(rv)] for rv in subrv] for subrv in rvs]

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf

    for subrvs in rvs:
        marray = d.parameter_array(subrvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements, dtype=int)
            bvec[idx] = 1
            p = pmf[idx].sum()
            if np.isclose(p, 0):
                zero_elements += bvec

    zero = np.nonzero(zero_elements)[0]
    zeroset = set(zero)
    nonzero = [i for i in range(n_elements) if i not in zeroset]
    variables = Bunch(nonzero=nonzero, zero=zero)

    return variables
Example #4
0
def isolate_zeros_generic(dist, rvs):
    """
    Determines if there are any elements of the optimization vector that must
    be zero.

    If p(marginal) = 0, then every component of the joint that contributes to
    that marginal probability must be exactly zero for all feasible solutions.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    rvs_, indexes = parse_rvs(dist, set(flatten(rvs)), unique=True, sort=True)
    rvs = [[indexes[rvs_.index(rv)] for rv in subrv] for subrv in rvs]

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf

    for subrvs in rvs:
        marray = d.parameter_array(subrvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements, dtype=int)
            bvec[idx] = 1
            p = pmf[idx].sum()
            if np.isclose(p, 0):
                zero_elements += bvec

    zero = np.nonzero(zero_elements)[0]
    zeroset = set(zero)
    nonzero = [i for i in range(n_elements) if i not in zeroset]
    variables = Bunch(nonzero=nonzero, zero=zero)

    return variables
Example #5
0
def isolate_zeros(dist, k):
    """
    Determines if there are any elements of the optimization vector that must
    be zero.

    If :math:`p(marginal) = 0`, then every component of the joint that
    contributes to that marginal probability must be exactly zero for all
    feasible solutions.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    rvs = range(n_variables)
    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf
    if k > 0:
        for subrvs in itertools.combinations(rvs, k):
            marray = d.parameter_array(subrvs, cache=cache)
            for idx in marray:
                # Convert the sparse nonzero elements to a dense boolean array
                bvec = np.zeros(n_elements, dtype=int)
                bvec[idx] = 1
                p = pmf[idx].sum()
                if np.isclose(p, 0):
                    zero_elements += bvec

    zero = np.nonzero(zero_elements)[0]
    zeroset = set(zero)
    nonzero = [i for i in range(n_elements) if i not in zeroset]
    variables = Bunch(nonzero=nonzero, zero=zero)

    return variables
Example #6
0
def isolate_zeros(dist, k):
    """
    Determines if there are any elements of the optimization vector that must
    be zero.

    If p(marginal) = 0, then every component of the joint that contributes to
    that marginal probability must be exactly zero for all feasible solutions.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    rvs = range(n_variables)
    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf
    if k > 0:
        for subrvs in itertools.combinations(rvs, k):
            marray = d.parameter_array(subrvs, cache=cache)
            for idx in marray:
                # Convert the sparse nonzero elements to a dense boolean array
                bvec = np.zeros(n_elements, dtype=int)
                bvec[idx] = 1
                p = pmf[idx].sum()
                if np.isclose(p, 0):
                    zero_elements += bvec

    zero = np.nonzero(zero_elements)[0]
    zeroset = set(zero)
    nonzero = [i for i in range(n_elements) if i not in zeroset]
    variables = Bunch(nonzero=nonzero, zero=zero)

    return variables
Example #7
0
def marginal_constraints_generic(dist, rvs, rv_mode=None,
                                 with_normalization=True):
    """
    Returns `A` and `b` in `A x = b`, for a system of marginal constraints.

    In general, the resulting matrix `A` will not have full rank.

    Parameters
    ----------
    dist : distribution
        The distribution used to calculate the marginal constraints.

    rvs : sequence
        A sequence whose elements are also sequences.  Each inner sequence
        specifies a marginal distribution as a set of random variable from
        `dist`. The inner sequences need not be pairwise mutually exclusive
        with one another. A random variable can only appear once within
        each inner sequence, but it can occur in multiple inner sequences.

    rv_mode : str, None
        Specifies how to interpret the elements of `rvs`. Valid options
        are: {'indices', 'names'}. If equal to 'indices', then the elements
        of `rvs` are interpreted as random variable indices. If equal to
        'names', the the elements are interpreted as random variable names.
        If `None`, then the value of `dist._rv_mode` is consulted.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
                                 unique=True, sort=True)[1]

    # potential inclusion: include implied constraints
    # rvs = set().union(*[set(r for r in powerset(rv) if r) for rv in rvs])
    indexes = [parse(rv) for rv in rvs]

    pmf = dist.pmf

    d = get_abstract_dist(dist)

    A = []
    b = []

    # Begin with the normalization constraint.
    if with_normalization:
        A.append(np.ones(d.n_elements))
        b.append(1)

    # Now add all the marginal constraints.
    cache = {}
    for rvec in indexes:
        for idx in d.parameter_array(rvec, cache=cache):
            bvec = np.zeros(d.n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    A = np.asarray(A, dtype=float)
    b = np.asarray(b, dtype=float)

    return A, b
Example #8
0
def marginal_constraints(dist, k, normalization=True, source_marginal=False):
    """
    Builds the k-way marginal constraints.

    This assumes the target random variable is the last one.
    All others are source random variables.
    Each constraint involves (k-1) sources and the target random variable.

    Explicitly, we demand that:

        p( source_{k-1}, target ) = q( source_{k-1}, target)

    For each (k-1)-combination of the sources, the distribution involving those
    sources and the target random variable must match the true distribution p
    for all possible candidate distributions.

    For unique information, k=2 is used, but we allow more general constraints.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    pmf = dist.pmf

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    #
    # Linear equality constraints (these are not independent constraints)
    #
    A = []
    b = []

    # Normalization: \sum_i q_i = 1
    if normalization:
        A.append(np.ones(n_elements))
        b.append(1)

    # Random variables
    rvs = range(n_variables)
    target_rvs = tuple(rvs[-1:])
    source_rvs = tuple(rvs[:-1])

    try:
        k, source_rvs = k
        source_rvs = tuple(source_rvs)
    except TypeError:
        pass

    assert k >= 1
    marginal_size = k
    submarginal_size = marginal_size - 1
    #assert submarginal_size >= 1

    # p( source_{k-1}, target ) = q( source_{k-1}, target )
    cache = {}
    for subrvs in itertools.combinations(source_rvs, submarginal_size):
        marg_rvs = subrvs + target_rvs
        marray = d.parameter_array(marg_rvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    if source_marginal:
        marray = d.parameter_array(source_rvs, cache=cache)
        for idx in marray:
            bvec = np.zeros(n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    A = np.asarray(A, dtype=float)
    b = np.asarray(b, dtype=float)
    return A, b
Example #9
0
def extra_constraints(dist, k):
    """
    Builds a list of additional constraints using the specific properties
    of the random variables. The goal is to determine if there are any
    (input, output) pairs such that p(input, output) = q(input, output)
    for all q in the feasible set.

    This can happen in a few ways:

    1. If any marginal probability is zero, then all joint probabilities which
       contributed to that marginal probability must also be zero.

    2. Now suppose we want to find out when p({x_i}, y) = q({x_i}, y).

       For every (k-1)-subset, g_k(x), we have:

          p(g_k(x), y) = q(g_k(x), y)

       So we have:

          p(y) = q(y)
          p(g_k(x) | y) = q(g_k(x) | y)    provided k > 1.

       If k = 1, we cannot proceed.

       Now suppose that for every i, we had:

          p(x_i | y) = \delta(x_i, f_i(y))

       Then, it follows that:

          q(x_i | y) = \delta(x_i, f_i(y))

       as well since we match all k-way marginals with k >= 2. E.g., For k=4,
       we have: p( x_1, x_2, x_3, y ) = q( x_1, x_2, x_3, y ) which implies
       that p( x_1 | y ) = q( x_1 | y). So generally, we have:

          p({x_i} | y) = q({x_i} | y)

       for each i. Then, since p(y) = q(y), we also have:

          p({x_i}, y) = q({x_i}, y).

       Note, we do not require that y be some function of the x_i.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    rvs = range(n_variables)
    target_rvs = tuple(rvs[-1:])
    source_rvs = tuple(rvs[:-1])

    try:
        # Ignore source restrictions.
        k, _ = k
    except TypeError:
        pass

    marginal_size = k
    assert k >= 1
    submarginal_size = marginal_size - 1
    #assert submarginal_size >= 1

    ### Find values that are fixed at zero

    # This finds marginal probabilities that are zero and infers that the
    # joint probabilities that contributed to it are also zero.
    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf
    for subrvs in itertools.combinations(source_rvs, submarginal_size):
        rvs = subrvs + target_rvs
        marray = d.parameter_array(rvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements, dtype=int)
            bvec[idx] = 1
            p = pmf[idx].sum()
            if np.isclose(p, 0):
                zero_elements += bvec

    ### Find values that match the original joint.

    # First identify each p(input_i | output) = 1
    determined = defaultdict(lambda: [0] * len(source_rvs))

    # If there is no source rv because k=1, then nothing can be determined.
    if submarginal_size:
        for i, source_rv in enumerate(source_rvs):
            md, cdists = dist.condition_on(target_rvs, rvs=[source_rv])
            for target_outcome, cdist in zip(md.outcomes, cdists):
                # cdist is dense
                if np.isclose(cdist.pmf, 1).sum() == 1:
                    # Then p(source_rv | target_rvs) = 1
                    determined[target_outcome][i] = 1

    is_determined = {}
    for outcome, det_vector in determined.items():
        if all(det_vector):
            is_determined[outcome] = True
        else:
            is_determined[outcome] = False

    # Need to find joint indexes j for which all p(a_i | b) = 1.
    # For these j, p_j = q_j.
    determined = {}
    for i, (outcome, p) in enumerate(dist.zipped()):
        if not (p > 0):
            continue
        target = dist._outcome_ctor([outcome[target] for target in target_rvs])
        if is_determined.get(target, False):
            determined[i] = p

    ###

    fixed = {}
    zeros = []
    nonzeros = []
    for i, is_zero in enumerate(zero_elements):
        if is_zero:
            fixed[i] = 0
            zeros.append(i)
        else:
            nonzeros.append(i)
    for i, p in determined.items():
        fixed[i] = p

    if fixed:
        fixed = sorted(fixed.items())
        fixed_indexes, fixed_values = list(zip(*fixed))
        fixed_indexes = list(fixed_indexes)
        fixed_values = list(fixed_values)
    else:
        fixed_indexes = []
        fixed_values = []

    free = [i for i in range(n_elements) if i not in set(fixed_indexes)]
    fixed_nonzeros = [i for i in fixed_indexes if i in set(nonzeros)]

    # all indexes   = free + fixed
    # all indexes   = nonzeros + zeros
    # fixed         = fixed_nonzeros + zeros
    # nonzero >= free
    variables = Bunch(
        free=free,
        fixed=fixed_indexes,
        fixed_values=fixed_values,
        fixed_nonzeros=fixed_nonzeros,
        zeros=zeros,
        nonzeros=nonzeros)

    return variables
Example #10
0
def marginal_constraints(dist, k, normalization=True, source_marginal=False):
    """
    Builds the k-way marginal constraints.

    This assumes the target random variable is the last one.
    All others are source random variables.
    Each constraint involves (k-1) sources and the target random variable.

    Explicitly, we demand that:

        p( source_{k-1}, target ) = q( source_{k-1}, target)

    For each (k-1)-combination of the sources, the distribution involving those
    sources and the target random variable must match the true distribution p
    for all possible candidate distributions.

    For unique information, k=2 is used, but we allow more general constraints.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    pmf = dist.pmf

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    #
    # Linear equality constraints (these are not independent constraints)
    #
    A = []
    b = []

    # Normalization: \sum_i q_i = 1
    if normalization:
        A.append(np.ones(n_elements))
        b.append(1)

    # Random variables
    rvs = range(n_variables)
    target_rvs = tuple(rvs[-1:])
    source_rvs = tuple(rvs[:-1])

    try:
        k, source_rvs = k
        source_rvs = tuple(source_rvs)
    except TypeError:
        pass

    assert k >= 1
    marginal_size = k
    submarginal_size = marginal_size - 1
    #assert submarginal_size >= 1

    # p( source_{k-1}, target ) = q( source_{k-1}, target )
    cache = {}
    for subrvs in itertools.combinations(source_rvs, submarginal_size):
        marg_rvs = subrvs + target_rvs
        marray = d.parameter_array(marg_rvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    if source_marginal:
        marray = d.parameter_array(source_rvs, cache=cache)
        for idx in marray:
            bvec = np.zeros(n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    A = np.asarray(A, dtype=float)
    b = np.asarray(b, dtype=float)
    return A, b
Example #11
0
def extra_constraints(dist, k):
    """
    Builds a list of additional constraints using the specific properties
    of the random variables. The goal is to determine if there are any
    (input, output) pairs such that p(input, output) = q(input, output)
    for all q in the feasible set.

    This can happen in a few ways:

    1. If any marginal probability is zero, then all joint probabilities which
       contributed to that marginal probability must also be zero.

    2. Now suppose we want to find out when p({x_i}, y) = q({x_i}, y).

       For every (k-1)-subset, g_k(x), we have:

          p(g_k(x), y) = q(g_k(x), y)

       So we have:

          p(y) = q(y)
          p(g_k(x) | y) = q(g_k(x) | y)    provided k > 1.

       If k = 1, we cannot proceed.

       Now suppose that for every i, we had:

          p(x_i | y) = \delta(x_i, f_i(y))

       Then, it follows that:

          q(x_i | y) = \delta(x_i, f_i(y))

       as well since we match all k-way marginals with k >= 2. E.g., For k=4,
       we have: p( x_1, x_2, x_3, y ) = q( x_1, x_2, x_3, y ) which implies
       that p( x_1 | y ) = q( x_1 | y). So generally, we have:

          p({x_i} | y) = q({x_i} | y)

       for each i. Then, since p(y) = q(y), we also have:

          p({x_i}, y) = q({x_i}, y).

       Note, we do not require that y be some function of the x_i.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    d = get_abstract_dist(dist)
    n_variables = d.n_variables
    n_elements = d.n_elements

    rvs = range(n_variables)
    target_rvs = tuple(rvs[-1:])
    source_rvs = tuple(rvs[:-1])

    try:
        # Ignore source restrictions.
        k, _ = k
    except TypeError:
        pass

    marginal_size = k
    assert k >= 1
    submarginal_size = marginal_size - 1
    #assert submarginal_size >= 1

    ### Find values that are fixed at zero

    # This finds marginal probabilities that are zero and infers that the
    # joint probabilities that contributed to it are also zero.
    zero_elements = np.zeros(n_elements, dtype=int)
    cache = {}
    pmf = dist.pmf
    for subrvs in itertools.combinations(source_rvs, submarginal_size):
        rvs = subrvs + target_rvs
        marray = d.parameter_array(rvs, cache=cache)
        for idx in marray:
            # Convert the sparse nonzero elements to a dense boolean array
            bvec = np.zeros(n_elements, dtype=int)
            bvec[idx] = 1
            p = pmf[idx].sum()
            if np.isclose(p, 0):
                zero_elements += bvec

    ### Find values that match the original joint.

    # First identify each p(input_i | output) = 1
    determined = defaultdict(lambda: [0] * len(source_rvs))

    # If there is no source rv because k=1, then nothing can be determined.
    if submarginal_size:
        for i, source_rv in enumerate(source_rvs):
            md, cdists = dist.condition_on(target_rvs, rvs=[source_rv])
            for target_outcome, cdist in zip(md.outcomes, cdists):
                # cdist is dense
                if np.isclose(cdist.pmf, 1).sum() == 1:
                    # Then p(source_rv | target_rvs) = 1
                    determined[target_outcome][i] = 1

    is_determined = {}
    for outcome, det_vector in determined.items():
        if all(det_vector):
            is_determined[outcome] = True
        else:
            is_determined[outcome] = False

    # Need to find joint indexes j for which all p(a_i | b) = 1.
    # For these j, p_j = q_j.
    determined = {}
    for i, (outcome, p) in enumerate(dist.zipped()):
        if not (p > 0):
            continue
        target = dist._outcome_ctor([outcome[target] for target in target_rvs])
        if is_determined.get(target, False):
            determined[i] = p

    ###

    fixed = {}
    zeros = []
    nonzeros = []
    for i, is_zero in enumerate(zero_elements):
        if is_zero:
            fixed[i] = 0
            zeros.append(i)
        else:
            nonzeros.append(i)
    for i, p in determined.items():
        fixed[i] = p

    if fixed:
        fixed = sorted(fixed.items())
        fixed_indexes, fixed_values = list(zip(*fixed))
        fixed_indexes = list(fixed_indexes)
        fixed_values = list(fixed_values)
    else:
        fixed_indexes = []
        fixed_values = []

    free = [i for i in range(n_elements) if i not in set(fixed_indexes)]
    fixed_nonzeros = [i for i in fixed_indexes if i in set(nonzeros)]

    # all indexes   = free + fixed
    # all indexes   = nonzeros + zeros
    # fixed         = fixed_nonzeros + zeros
    # nonzero >= free
    variables = Bunch(
        free=free,
        fixed=fixed_indexes,
        fixed_values=fixed_values,
        fixed_nonzeros=fixed_nonzeros,
        zeros=zeros,
        nonzeros=nonzeros)

    return variables
Example #12
0
def marginal_constraints(dist, m, with_normalization=True):
    """
    Returns `A` and `b` in `A x = b`, for a system of marginal constraints.

    The resulting matrix `A` is not guaranteed to have full rank.

    Parameters
    ----------
    dist : distribution
        The distribution from which the marginal constraints are constructed.

    m : int
        The size of the marginals to constrain. When `m=2`, pairwise marginals
        are constrained to equal the pairwise marginals in `pmf`. When `m=3`,
        three-way marginals are constrained to equal those in `pmf.

    with_normalization : bool
        If true, include a constraint for normalization.

    Returns
    -------
    A : array-like, shape (p, q)
        The matrix defining the marginal equality constraints and also the
        normalization constraint. The number of rows is:
            p = C(n_variables, m) * n_symbols ** m + 1
        where C() is the choose formula. The number of columns is:
            q = n_symbols ** n_variables

    b : array-like, (p,)
        The RHS of the linear equality constraints.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    pmf = dist.pmf

    d = get_abstract_dist(dist)

    if m > d.n_variables:
        msg = "Cannot constrain {0}-way marginals"
        msg += " with only {1} random variables."
        msg = msg.format(m, d.n_variables)
        raise ValueError(msg)

    A = []
    b = []

    # Begin with the normalization constraint.
    if with_normalization:
        A.append(np.ones(d.n_elements))
        b.append(1)

    # Now add all the marginal constraints.
    if m > 0:
        cache = {}
        for rvs in itertools.combinations(range(d.n_variables), m):
            for idx in d.parameter_array(rvs, cache=cache):
                bvec = np.zeros(d.n_elements)
                bvec[idx] = 1
                A.append(bvec)
                b.append(pmf[idx].sum())

    A = np.asarray(A, dtype=float)
    b = np.asarray(b, dtype=float)

    return A, b
Example #13
0
def marginal_constraints_generic(dist, rvs, rv_mode=None,
                                 with_normalization=True):
    """
    Returns `A` and `b` in `A x = b`, for a system of marginal constraints.

    In general, the resulting matrix `A` will not have full rank.

    Parameters
    ----------
    dist : distribution
        The distribution used to calculate the marginal constraints.

    rvs : sequence
        A sequence whose elements are also sequences.  Each inner sequence
        specifies a marginal distribution as a set of random variable from
        `dist`. The inner sequences need not be pairwise mutually exclusive
        with one another. A random variable can only appear once within
        each inner sequence, but it can occur in multiple inner sequences.

    rv_mode : str, None
        Specifies how to interpret the elements of `rvs`. Valid options
        are: {'indices', 'names'}. If equal to 'indices', then the elements
        of `rvs` are interpreted as random variable indices. If equal to
        'names', the the elements are interpreted as random variable names.
        If `None`, then the value of `dist._rv_mode` is consulted.

    """
    assert dist.is_dense()
    assert dist.get_base() == 'linear'

    parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
                                 unique=True, sort=True)[1]

    # potential inclusion: include implied constraints
    # rvs = set().union(*[set(r for r in powerset(rv) if r) for rv in rvs])
    indexes = [parse(rv) for rv in rvs]

    pmf = dist.pmf

    d = get_abstract_dist(dist)

    A = []
    b = []

    # Begin with the normalization constraint.
    if with_normalization:
        A.append(np.ones(d.n_elements))
        b.append(1)

    # Now add all the marginal constraints.
    cache = {}
    for rvec in indexes:
        for idx in d.parameter_array(rvec, cache=cache):
            bvec = np.zeros(d.n_elements)
            bvec[idx] = 1
            A.append(bvec)
            b.append(pmf[idx].sum())

    A = np.asarray(A, dtype=float)
    b = np.asarray(b, dtype=float)

    return A, b