Exemple #1
0
    def get_outer_approx(self, algorithm=None):
        """Generate an outer approximation.

        :parameter algorithm: a :class:`~string` denoting the algorithm used:
            ``None``, ``'linvac'``, ``'irm'``, ``'imrm'``, or ``'lpbelfunc'``
        :rtype: :class:`~improb.lowprev.lowprob.LowProb`

        This method replaces the lower probability :math:`\underline{P}` by
        a lower probability :math:`\underline{R}` determined by the
        ``algorithm`` argument:

        ``None``
            returns the original lower probability.

            >>> pspace = PSpace('abc')
            >>> lprob = LowProb(pspace,
            ...             lprob={'ab': .5, 'ac': .5, 'bc': .5},
            ...             number_type='fraction')
            >>> lprob.extend()
            >>> print(lprob)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : 1
            >>> lprob == lprob.get_outer_approx()
            True

        ``'linvac'``
            replaces the imprecise part :math:`\underline{Q}` by the vacuous
            lower probability :math:`\underline{R}=\min` to generate a simple
            outer approximation.

        ``'irm'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}` that is obtained by using the
            IRM algorithm of Hall & Lawry [#hall2004]_. The Moebius transform
            of a lower probability that is not completely monotone contains
            negative belief assignments. Consider such a lower probability and
            an event with such a negative belief assignment. The approximation
            consists of removing this negative assignment and compensating for
            this by correspondingly reducing the positive masses for events
            below it; for details, see the paper.

            The following example illustrates the procedure:

            >>> pspace = PSpace('abc')
            >>> lprob = LowProb(pspace,
            ...             lprob={'ab': .5, 'ac': .5, 'bc': .5},
            ...             number_type='fraction')
            >>> lprob.extend()
            >>> print(lprob)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : 1
            >>> lprob.is_completely_monotone()
            False
            >>> print(lprob.mobius)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : -1/2
            >>> belfunc = lprob.get_outer_approx('irm')
            >>> print(belfunc.mobius)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/3
            a   c : 1/3
              b c : 1/3
            a b c : 0
            >>> print(belfunc)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/3
            a   c : 1/3
              b c : 1/3
            a b c : 1
            >>> belfunc.is_completely_monotone()
            True

            The next is Example 2 from Hall & Lawry's 2004 paper [#hall2004]_:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1,
            ...                                'A': .0895, 'B': .2743,
            ...                                'C': .2668, 'D': .1063,
            ...                                'AB': .3947, 'AC': .4506,
            ...                                'AD': .2959, 'BC': .5837,
            ...                                'BD': .4835, 'CD': .4079,
            ...                                'ABC': .7248, 'ABD': .6224,
            ...                                'ACD': .6072, 'BCD': .7502})
            >>> lprob.is_avoiding_sure_loss()
            True
            >>> lprob.is_coherent()
            False
            >>> lprob.is_completely_monotone()
            False
            >>> belfunc = lprob.get_outer_approx('irm')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.375789766751
            A   C   : 0.405080300695
            A     D : 0.259553087227
              B C   : 0.560442004097
              B   D : 0.43812301076
                C D : 0.399034985143
            A B C   : 0.710712071543
            A B   D : 0.603365864737
            A   C D : 0.601068373065
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0119897667507
            A   C   : 0.0487803006948
            A     D : 0.0637530872268
              B C   : 0.019342004097
              B   D : 0.0575230107598
                C D : 0.0259349851432
            A B C   : 3.33066907388e-16
            A B   D : -1.11022302463e-16
            A   C D : -1.11022302463e-16
              B C D : 0.0
            A B C D : 0.0357768453276
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace))
            0.013595658498933991

            .. note::

                This algorithm is *not* invariant under permutation of the
                possibility space.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        ``'imrm'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}` that is obtained by using an
            algorithm by Quaeghebeur that is as of yet unpublished.

            We apply it to Example 2 from Hall & Lawry's 2004 paper
            [#hall2004]_:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={
            ...     '': 0, 'ABCD': 1,
            ...     'A': .0895, 'B': .2743,
            ...     'C': .2668, 'D': .1063,
            ...     'AB': .3947, 'AC': .4506,
            ...     'AD': .2959, 'BC': .5837,
            ...     'BD': .4835, 'CD': .4079,
            ...     'ABC': .7248, 'ABD': .6224,
            ...     'ACD': .6072, 'BCD': .7502})
            >>> belfunc = lprob.get_outer_approx('imrm')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.381007057096
            A   C   : 0.411644226231
            A     D : 0.26007767078
              B C   : 0.562748716673
              B   D : 0.4404197271
                C D : 0.394394926787
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0172070570962
            A   C   : 0.0553442262305
            A     D : 0.0642776707797
              B C   : 0.0216487166733
              B   D : 0.0598197271
                C D : 0.0212949267869
            A B C   : 2.22044604925e-16
            A B   D : 0.0109955450242
            A   C D : 0.00368317620293
              B C D : 3.66294398528e-05
            A B C D : 0.00879232466651
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace))
            0.010375479708342836

            .. note::

                This algorithm *is* invariant under permutation of the
                possibility space.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        ``'lpbelfunc'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}_\mu` that is obtained via the zeta
            transform of the basic belief assignment :math:`\mu`, a solution of
            the following optimization (linear programming) problem:

            .. math::

                \min\{
                \sum_{A\subseteq\Omega}(\underline{P}(A)-\underline{R}_\mu(A)):
                \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1,
                \underline{R}_\mu(A)\leq\underline{P}(A), A\subseteq\Omega
                \},

            which, because constants in the objective function do not influence
            the solution and because
            :math:`\underline{R}_\mu(A)=\sum_{B\subseteq A}\mu(B)`,
            is equivalent to:

            .. math::

                \max\{
                \sum_{B\subseteq\Omega}2^{|\Omega|-|B|}\mu(B):
                \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1,
                \sum_{B\subseteq A}\mu(B)
                 \leq\underline{P}(A), A\subseteq\Omega
                \},

            the version that is implemented.

            We apply this to Example 2 from Hall & Lawry's 2004 paper
            [#hall2004]_, which we also used for ``'irm'``:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1,
            ...                                'A': .0895, 'B': .2743,
            ...                                'C': .2668, 'D': .1063,
            ...                                'AB': .3947, 'AC': .4506,
            ...                                'AD': .2959, 'BC': .5837,
            ...                                'BD': .4835, 'CD': .4079,
            ...                                'ABC': .7248, 'ABD': .6224,
            ...                                'ACD': .6072, 'BCD': .7502})
            >>> belfunc = lprob.get_outer_approx('lpbelfunc')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3638
            A   C   : 0.4079
            A     D : 0.28835
              B C   : 0.5837
              B   D : 0.44035
                C D : 0.37355
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0
            A   C   : 0.0516
            A     D : 0.09255
              B C   : 0.0426
              B   D : 0.05975
                C D : 0.00045
            A B C   : 0.0
            A B   D : 1.11022302463e-16
            A   C D : 0.0
              B C D : 0.0
            A B C D : 0.01615
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace)
            ...     ) # doctest: +ELLIPSIS
            0.00991562...

            .. note::

                This algorithm is *not* invariant under permutation of the
                possibility space or changes in the LP-solver:
                there may be a nontrivial convex set of optimal solutions.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        """
        if algorithm is None:
            return self
        elif algorithm == 'linvac':
            prob, coeff = self.get_precise_part()
            return prob.get_linvac(1 - coeff)
        elif algorithm == 'irm':
            # Initialize the algorithm
            pspace = self.pspace
            bba = SetFunction(pspace, number_type=self.number_type)
            bba[False] = 0

            def mass_below(event):
                subevents = pspace.subsets(event, full=False, empty=False)
                return sum(bba[subevent] for subevent in subevents)

            def basin_for_negmass(event):
                mass = 0
                index = len(event)
                while bba[event] + mass < 0:
                    index -= 1
                    subevents = pspace.subsets(event, size=index)
                    mass += sum(bba[subevent] for subevent in subevents)
                return (index, mass)

            lprob = self.set_function
            # The algoritm itself:
            # we climb the algebra of events, calculating the belief assignment
            # for each and compensate negative ones by proportionally reducing
            # the assignments in the smallest basin of subevents needed
            for cardinality in range(1, len(pspace) + 1):
                for event in pspace.subsets(size=cardinality):
                    bba[event] = lprob[event] - mass_below(event)
                    if bba[event] < 0:
                        index, mass = basin_for_negmass(event)
                        subevents = chain.from_iterable(
                            pspace.subsets(event, size=k)
                            for k in range(index, cardinality))
                        for subevent in subevents:
                            bba[subevent] = (bba[subevent] *
                                             (1 + (bba[event] / mass)))
                        bba[event] = 0
            return LowProb(pspace,
                           lprob=dict((event, bba.get_zeta(event))
                                      for event in bba.iterkeys()))
        elif algorithm == 'imrm':
            # Initialize the algorithm
            pspace = self.pspace
            number_type = self.number_type
            bba = SetFunction(pspace, number_type=number_type)
            bba[False] = 0

            def mass_below(event, cardinality=None):
                subevents = pspace.subsets(event,
                                           full=False,
                                           empty=False,
                                           size=cardinality)
                return sum(bba[subevent] for subevent in subevents)

            def basin_for_negmass(event):
                mass = 0
                index = len(event)
                while bba[event] + mass < 0:
                    index -= 1
                    subevents = pspace.subsets(event, size=index)
                    mass += sum(bba[subevent] for subevent in subevents)
                return (index, mass)

            lprob = self.set_function
            # The algorithm itself:
            cardinality = 1
            while cardinality <= len(pspace):
                temp_bba = SetFunction(pspace, number_type=number_type)
                for event in pspace.subsets(size=cardinality):
                    bba[event] = lprob[event] - mass_below(event)
                offenders = dict((event, basin_for_negmass(event))
                                 for event in pspace.subsets(size=cardinality)
                                 if bba[event] < 0)
                if len(offenders) == 0:
                    cardinality += 1
                else:
                    minindex = min(pair[0] for pair in offenders.itervalues())
                    for event in offenders:
                        if offenders[event][0] == minindex:
                            mass = mass_below(event, cardinality=minindex)
                            scalef = (offenders[event][1] + bba[event]) / mass
                            for subevent in pspace.subsets(event,
                                                           size=minindex):
                                if subevent not in temp_bba:
                                    temp_bba[subevent] = 0
                                temp_bba[subevent] = max(
                                    temp_bba[subevent], scalef * bba[subevent])
                    for event, value in temp_bba.iteritems():
                        bba[event] = value
                    cardinality = minindex + 1
            return LowProb(pspace,
                           lprob=dict((event, bba.get_zeta(event))
                                      for event in bba.iterkeys()))
        elif algorithm == 'lpbelfunc':
            # Initialize the algorithm
            lprob = self.set_function
            pspace = lprob.pspace
            number_type = lprob.number_type
            n = 2**len(pspace)
            # Set up the linear program
            mat = cdd.Matrix(list(
                chain(
                    [[-1] + n * [1], [1] + n * [-1]],
                    [[0] + [int(event == other) for other in pspace.subsets()]
                     for event in pspace.subsets()],
                    [[lprob[event]] +
                     [-int(other <= event) for other in pspace.subsets()]
                     for event in pspace.subsets()])),
                             number_type=number_type)
            mat.obj_type = cdd.LPObjType.MAX
            mat.obj_func = (0, ) + tuple(2**(len(pspace) - len(event))
                                         for event in pspace.subsets())
            lp = cdd.LinProg(mat)
            # Solve the linear program and check the solution
            lp.solve()
            if lp.status == cdd.LPStatusType.OPTIMAL:
                bba = SetFunction(pspace,
                                  data=dict(
                                      izip(list(pspace.subsets()),
                                           list(lp.primal_solution))),
                                  number_type=number_type)
                return LowProb(pspace,
                               lprob=dict((event, bba.get_zeta(event))
                                          for event in bba.iterkeys()))
            else:
                raise RuntimeError('No optimal solution found.')
        else:
            raise NotImplementedError
Exemple #2
0
    def get_outer_approx(self, algorithm=None):
        """Generate an outer approximation.

        :parameter algorithm: a :class:`~string` denoting the algorithm used:
            ``None``, ``'linvac'``, ``'irm'``, ``'imrm'``, or ``'lpbelfunc'``
        :rtype: :class:`~improb.lowprev.lowprob.LowProb`

        This method replaces the lower probability :math:`\underline{P}` by
        a lower probability :math:`\underline{R}` determined by the
        ``algorithm`` argument:

        ``None``
            returns the original lower probability.

            >>> pspace = PSpace('abc')
            >>> lprob = LowProb(pspace,
            ...             lprob={'ab': .5, 'ac': .5, 'bc': .5},
            ...             number_type='fraction')
            >>> lprob.extend()
            >>> print(lprob)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : 1
            >>> lprob == lprob.get_outer_approx()
            True

        ``'linvac'``
            replaces the imprecise part :math:`\underline{Q}` by the vacuous
            lower probability :math:`\underline{R}=\min` to generate a simple
            outer approximation.

        ``'irm'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}` that is obtained by using the
            IRM algorithm of Hall & Lawry [#hall2004]_. The Moebius transform
            of a lower probability that is not completely monotone contains
            negative belief assignments. Consider such a lower probability and
            an event with such a negative belief assignment. The approximation
            consists of removing this negative assignment and compensating for
            this by correspondingly reducing the positive masses for events
            below it; for details, see the paper.

            The following example illustrates the procedure:

            >>> pspace = PSpace('abc')
            >>> lprob = LowProb(pspace,
            ...             lprob={'ab': .5, 'ac': .5, 'bc': .5},
            ...             number_type='fraction')
            >>> lprob.extend()
            >>> print(lprob)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : 1
            >>> lprob.is_completely_monotone()
            False
            >>> print(lprob.mobius)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/2
            a   c : 1/2
              b c : 1/2
            a b c : -1/2
            >>> belfunc = lprob.get_outer_approx('irm')
            >>> print(belfunc.mobius)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/3
            a   c : 1/3
              b c : 1/3
            a b c : 0
            >>> print(belfunc)
                  : 0
            a     : 0
              b   : 0
                c : 0
            a b   : 1/3
            a   c : 1/3
              b c : 1/3
            a b c : 1
            >>> belfunc.is_completely_monotone()
            True

            The next is Example 2 from Hall & Lawry's 2004 paper [#hall2004]_:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1,
            ...                                'A': .0895, 'B': .2743,
            ...                                'C': .2668, 'D': .1063,
            ...                                'AB': .3947, 'AC': .4506,
            ...                                'AD': .2959, 'BC': .5837,
            ...                                'BD': .4835, 'CD': .4079,
            ...                                'ABC': .7248, 'ABD': .6224,
            ...                                'ACD': .6072, 'BCD': .7502})
            >>> lprob.is_avoiding_sure_loss()
            True
            >>> lprob.is_coherent()
            False
            >>> lprob.is_completely_monotone()
            False
            >>> belfunc = lprob.get_outer_approx('irm')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.375789766751
            A   C   : 0.405080300695
            A     D : 0.259553087227
              B C   : 0.560442004097
              B   D : 0.43812301076
                C D : 0.399034985143
            A B C   : 0.710712071543
            A B   D : 0.603365864737
            A   C D : 0.601068373065
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0119897667507
            A   C   : 0.0487803006948
            A     D : 0.0637530872268
              B C   : 0.019342004097
              B   D : 0.0575230107598
                C D : 0.0259349851432
            A B C   : 3.33066907388e-16
            A B   D : -1.11022302463e-16
            A   C D : -1.11022302463e-16
              B C D : 0.0
            A B C D : 0.0357768453276
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace))
            0.013595658498933991

            .. note::

                This algorithm is *not* invariant under permutation of the
                possibility space.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        ``'imrm'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}` that is obtained by using an
            algorithm by Quaeghebeur that is as of yet unpublished.

            We apply it to Example 2 from Hall & Lawry's 2004 paper
            [#hall2004]_:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={
            ...     '': 0, 'ABCD': 1,
            ...     'A': .0895, 'B': .2743,
            ...     'C': .2668, 'D': .1063,
            ...     'AB': .3947, 'AC': .4506,
            ...     'AD': .2959, 'BC': .5837,
            ...     'BD': .4835, 'CD': .4079,
            ...     'ABC': .7248, 'ABD': .6224,
            ...     'ACD': .6072, 'BCD': .7502})
            >>> belfunc = lprob.get_outer_approx('imrm')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.381007057096
            A   C   : 0.411644226231
            A     D : 0.26007767078
              B C   : 0.562748716673
              B   D : 0.4404197271
                C D : 0.394394926787
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0172070570962
            A   C   : 0.0553442262305
            A     D : 0.0642776707797
              B C   : 0.0216487166733
              B   D : 0.0598197271
                C D : 0.0212949267869
            A B C   : 2.22044604925e-16
            A B   D : 0.0109955450242
            A   C D : 0.00368317620293
              B C D : 3.66294398528e-05
            A B C D : 0.00879232466651
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace))
            0.010375479708342836

            .. note::

                This algorithm *is* invariant under permutation of the
                possibility space.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        ``'lpbelfunc'``
            replaces :math:`\underline{P}` by a completely monotone lower
            probability :math:`\underline{R}_\mu` that is obtained via the zeta
            transform of the basic belief assignment :math:`\mu`, a solution of
            the following optimization (linear programming) problem:

            .. math::

                \min\{
                \sum_{A\subseteq\Omega}(\underline{P}(A)-\underline{R}_\mu(A)):
                \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1,
                \underline{R}_\mu(A)\leq\underline{P}(A), A\subseteq\Omega
                \},

            which, because constants in the objective function do not influence
            the solution and because
            :math:`\underline{R}_\mu(A)=\sum_{B\subseteq A}\mu(B)`,
            is equivalent to:

            .. math::

                \max\{
                \sum_{B\subseteq\Omega}2^{|\Omega|-|B|}\mu(B):
                \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1,
                \sum_{B\subseteq A}\mu(B)
                 \leq\underline{P}(A), A\subseteq\Omega
                \},

            the version that is implemented.

            We apply this to Example 2 from Hall & Lawry's 2004 paper
            [#hall2004]_, which we also used for ``'irm'``:

            >>> pspace = PSpace('ABCD')
            >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1,
            ...                                'A': .0895, 'B': .2743,
            ...                                'C': .2668, 'D': .1063,
            ...                                'AB': .3947, 'AC': .4506,
            ...                                'AD': .2959, 'BC': .5837,
            ...                                'BD': .4835, 'CD': .4079,
            ...                                'ABC': .7248, 'ABD': .6224,
            ...                                'ACD': .6072, 'BCD': .7502})
            >>> belfunc = lprob.get_outer_approx('lpbelfunc')
            >>> belfunc.is_completely_monotone()
            True
            >>> print(lprob)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3947
            A   C   : 0.4506
            A     D : 0.2959
              B C   : 0.5837
              B   D : 0.4835
                C D : 0.4079
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(belfunc)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.3638
            A   C   : 0.4079
            A     D : 0.28835
              B C   : 0.5837
              B   D : 0.44035
                C D : 0.37355
            A B C   : 0.7248
            A B   D : 0.6224
            A   C D : 0.6072
              B C D : 0.7502
            A B C D : 1.0
            >>> print(lprob.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0309
            A   C   : 0.0943
            A     D : 0.1001
              B C   : 0.0426
              B   D : 0.1029
                C D : 0.0348
            A B C   : -0.0736
            A B   D : -0.0816
            A   C D : -0.0846
              B C D : -0.0775
            A B C D : 0.1748
            >>> print(belfunc.mobius)
                    : 0.0
            A       : 0.0895
              B     : 0.2743
                C   : 0.2668
                  D : 0.1063
            A B     : 0.0
            A   C   : 0.0516
            A     D : 0.09255
              B C   : 0.0426
              B   D : 0.05975
                C D : 0.00045
            A B C   : 0.0
            A B   D : 1.11022302463e-16
            A   C D : 0.0
              B C D : 0.0
            A B C D : 0.01615
            >>> sum(lprev for (lprev, uprev)
            ...           in (lprob - belfunc).itervalues())/(2 ** len(pspace)
            ...     ) # doctest: +ELLIPSIS
            0.00991562...

            .. note::

                This algorithm is *not* invariant under permutation of the
                possibility space or changes in the LP-solver:
                there may be a nontrivial convex set of optimal solutions.

            .. warning::

                The lower probability must be defined for all events. If
                needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend`
                first.

        """
        if algorithm is None:
            return self
        elif algorithm == 'linvac':
            prob, coeff = self.get_precise_part()
            return prob.get_linvac(1 - coeff)
        elif algorithm == 'irm':
            # Initialize the algorithm
            pspace = self.pspace
            bba = SetFunction(pspace, number_type=self.number_type)
            bba[False] = 0
            def mass_below(event):
                subevents = pspace.subsets(event, full=False, empty=False)
                return sum(bba[subevent] for subevent in subevents)
            def basin_for_negmass(event):
                mass = 0
                index = len(event)
                while bba[event] + mass < 0:
                    index -= 1
                    subevents = pspace.subsets(event, size=index)
                    mass += sum(bba[subevent] for subevent in subevents)
                return (index, mass)
            lprob = self.set_function
            # The algoritm itself:
            # we climb the algebra of events, calculating the belief assignment
            # for each and compensate negative ones by proportionally reducing
            # the assignments in the smallest basin of subevents needed
            for cardinality in range(1,len(pspace) + 1):
                for event in pspace.subsets(size=cardinality):
                    bba[event] = lprob[event] - mass_below(event)
                    if bba[event] < 0:
                        index, mass = basin_for_negmass(event)
                        subevents = chain.from_iterable(
                                        pspace.subsets(event, size=k)
                                            for k in range(index, cardinality))
                        for subevent in subevents:
                            bba[subevent] = (bba[subevent]
                                             * (1 + (bba[event] / mass)))
                        bba[event] = 0
            return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) 
                                              for event in bba.iterkeys()))
        elif algorithm == 'imrm':
            # Initialize the algorithm
            pspace = self.pspace
            number_type = self.number_type
            bba = SetFunction(pspace, number_type=number_type)
            bba[False] = 0
            def mass_below(event, cardinality=None):
                subevents = pspace.subsets(event, full=False, empty=False,
                                           size=cardinality)
                return sum(bba[subevent] for subevent in subevents)
            def basin_for_negmass(event):
                mass = 0
                index = len(event)
                while bba[event] + mass < 0:
                    index -= 1
                    subevents = pspace.subsets(event, size=index)
                    mass += sum(bba[subevent] for subevent in subevents)
                return (index, mass)
            lprob = self.set_function
            # The algorithm itself:
            cardinality = 1
            while cardinality <= len(pspace):
                temp_bba = SetFunction(pspace, number_type=number_type)
                for event in pspace.subsets(size=cardinality):
                    bba[event] = lprob[event] - mass_below(event)
                offenders = dict((event, basin_for_negmass(event))
                                 for event in pspace.subsets(size=cardinality)
                                 if bba[event] < 0)
                if len(offenders) == 0:
                    cardinality += 1
                else:
                    minindex = min(pair[0] for pair in offenders.itervalues())
                    for event in offenders:
                        if offenders[event][0] == minindex:
                            mass = mass_below(event, cardinality=minindex)
                            scalef = (offenders[event][1] + bba[event]) / mass
                            for subevent in pspace.subsets(event,
                                                           size=minindex):
                                if subevent not in temp_bba:
                                    temp_bba[subevent] = 0
                                temp_bba[subevent] = max(temp_bba[subevent],
                                                         scalef * bba[subevent])
                    for event, value in temp_bba.iteritems():
                        bba[event] = value
                    cardinality = minindex + 1
            return LowProb(pspace, lprob=dict((event, bba.get_zeta(event))
                                              for event in bba.iterkeys()))
        elif algorithm == 'lpbelfunc':
            # Initialize the algorithm
            lprob = self.set_function
            pspace = lprob.pspace
            number_type = lprob.number_type
            n = 2 ** len(pspace)
            # Set up the linear program
            mat = cdd.Matrix(list(chain(
                      [[-1] + n * [1], [1] + n * [-1]],
                      [[0] + [int(event == other)
                              for other in pspace.subsets()]
                       for event in pspace.subsets()],
                      [[lprob[event]] + [-int(other <= event)
                                         for other in pspace.subsets()]
                       for event in pspace.subsets()]
                  )), number_type=number_type)
            mat.obj_type = cdd.LPObjType.MAX
            mat.obj_func = (0,) + tuple(2 ** (len(pspace) - len(event))
                                        for event in pspace.subsets())
            lp = cdd.LinProg(mat)
            # Solve the linear program and check the solution
            lp.solve()
            if lp.status == cdd.LPStatusType.OPTIMAL:
                bba = SetFunction(pspace,
                                  data=dict(izip(list(pspace.subsets()),
                                                 list(lp.primal_solution))),
                                  number_type=number_type)
                return LowProb(pspace, lprob=dict((event, bba.get_zeta(event))
                                                  for event in bba.iterkeys()))
            else:
                raise RuntimeError('No optimal solution found.')
        else:
            raise NotImplementedError