def _make_set_function(self): return SetFunction(pspace=self.pspace, data=dict( (self.pspace.make_event(gamble), lprev) for (gamble, cond_event), (lprev, uprev) in self.iteritems()), number_type=self.number_type)
def _make_mobius(self): """Constructs basic belief assignment corresponding to the assigned unconditional lower probabilities. """ # construct set function corresponding to this lower probability return SetFunction(pspace=self.pspace, data=dict( (event, self.set_function.get_mobius(event)) for event in self.pspace.subsets()), number_type=self.number_type)
def timeit(n=10, m=10): k = 10000 // n # number of trials, for timing gambles = [ dict((i, random.randint(1, m)) for i in xrange(n)) for j in xrange(k)] pspace = PSpace(n) s = SetFunction( pspace=pspace, number_type='fraction') # hack so we do not need to fill the set function with data # (this solves issues when testing for large n) s._data = defaultdict(lambda: random.randint(-len(pspace), len(pspace))) t = time.clock() for gamble in gambles: s.get_choquet(gamble) return time.clock() - t
def get_outer_approx(self, algorithm=None): """Generate an outer approximation. :parameter algorithm: a :class:`~string` denoting the algorithm used: ``None``, ``'linvac'``, ``'irm'``, ``'imrm'``, or ``'lpbelfunc'`` :rtype: :class:`~improb.lowprev.lowprob.LowProb` This method replaces the lower probability :math:`\underline{P}` by a lower probability :math:`\underline{R}` determined by the ``algorithm`` argument: ``None`` returns the original lower probability. >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob == lprob.get_outer_approx() True ``'linvac'`` replaces the imprecise part :math:`\underline{Q}` by the vacuous lower probability :math:`\underline{R}=\min` to generate a simple outer approximation. ``'irm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using the IRM algorithm of Hall & Lawry [#hall2004]_. The Moebius transform of a lower probability that is not completely monotone contains negative belief assignments. Consider such a lower probability and an event with such a negative belief assignment. The approximation consists of removing this negative assignment and compensating for this by correspondingly reducing the positive masses for events below it; for details, see the paper. The following example illustrates the procedure: >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob.is_completely_monotone() False >>> print(lprob.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : -1/2 >>> belfunc = lprob.get_outer_approx('irm') >>> print(belfunc.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 0 >>> print(belfunc) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 1 >>> belfunc.is_completely_monotone() True The next is Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> lprob.is_avoiding_sure_loss() True >>> lprob.is_coherent() False >>> lprob.is_completely_monotone() False >>> belfunc = lprob.get_outer_approx('irm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.375789766751 A C : 0.405080300695 A D : 0.259553087227 B C : 0.560442004097 B D : 0.43812301076 C D : 0.399034985143 A B C : 0.710712071543 A B D : 0.603365864737 A C D : 0.601068373065 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0119897667507 A C : 0.0487803006948 A D : 0.0637530872268 B C : 0.019342004097 B D : 0.0575230107598 C D : 0.0259349851432 A B C : 3.33066907388e-16 A B D : -1.11022302463e-16 A C D : -1.11022302463e-16 B C D : 0.0 A B C D : 0.0357768453276 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.013595658498933991 .. note:: This algorithm is *not* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'imrm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using an algorithm by Quaeghebeur that is as of yet unpublished. We apply it to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={ ... '': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('imrm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.381007057096 A C : 0.411644226231 A D : 0.26007767078 B C : 0.562748716673 B D : 0.4404197271 C D : 0.394394926787 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0172070570962 A C : 0.0553442262305 A D : 0.0642776707797 B C : 0.0216487166733 B D : 0.0598197271 C D : 0.0212949267869 A B C : 2.22044604925e-16 A B D : 0.0109955450242 A C D : 0.00368317620293 B C D : 3.66294398528e-05 A B C D : 0.00879232466651 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.010375479708342836 .. note:: This algorithm *is* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'lpbelfunc'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}_\mu` that is obtained via the zeta transform of the basic belief assignment :math:`\mu`, a solution of the following optimization (linear programming) problem: .. math:: \min\{ \sum_{A\subseteq\Omega}(\underline{P}(A)-\underline{R}_\mu(A)): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \underline{R}_\mu(A)\leq\underline{P}(A), A\subseteq\Omega \}, which, because constants in the objective function do not influence the solution and because :math:`\underline{R}_\mu(A)=\sum_{B\subseteq A}\mu(B)`, is equivalent to: .. math:: \max\{ \sum_{B\subseteq\Omega}2^{|\Omega|-|B|}\mu(B): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \sum_{B\subseteq A}\mu(B) \leq\underline{P}(A), A\subseteq\Omega \}, the version that is implemented. We apply this to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_, which we also used for ``'irm'``: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('lpbelfunc') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3638 A C : 0.4079 A D : 0.28835 B C : 0.5837 B D : 0.44035 C D : 0.37355 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0 A C : 0.0516 A D : 0.09255 B C : 0.0426 B D : 0.05975 C D : 0.00045 A B C : 0.0 A B D : 1.11022302463e-16 A C D : 0.0 B C D : 0.0 A B C D : 0.01615 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace) ... ) # doctest: +ELLIPSIS 0.00991562... .. note:: This algorithm is *not* invariant under permutation of the possibility space or changes in the LP-solver: there may be a nontrivial convex set of optimal solutions. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. """ if algorithm is None: return self elif algorithm == 'linvac': prob, coeff = self.get_precise_part() return prob.get_linvac(1 - coeff) elif algorithm == 'irm': # Initialize the algorithm pspace = self.pspace bba = SetFunction(pspace, number_type=self.number_type) bba[False] = 0 def mass_below(event): subevents = pspace.subsets(event, full=False, empty=False) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algoritm itself: # we climb the algebra of events, calculating the belief assignment # for each and compensate negative ones by proportionally reducing # the assignments in the smallest basin of subevents needed for cardinality in range(1, len(pspace) + 1): for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) if bba[event] < 0: index, mass = basin_for_negmass(event) subevents = chain.from_iterable( pspace.subsets(event, size=k) for k in range(index, cardinality)) for subevent in subevents: bba[subevent] = (bba[subevent] * (1 + (bba[event] / mass))) bba[event] = 0 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'imrm': # Initialize the algorithm pspace = self.pspace number_type = self.number_type bba = SetFunction(pspace, number_type=number_type) bba[False] = 0 def mass_below(event, cardinality=None): subevents = pspace.subsets(event, full=False, empty=False, size=cardinality) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algorithm itself: cardinality = 1 while cardinality <= len(pspace): temp_bba = SetFunction(pspace, number_type=number_type) for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) offenders = dict((event, basin_for_negmass(event)) for event in pspace.subsets(size=cardinality) if bba[event] < 0) if len(offenders) == 0: cardinality += 1 else: minindex = min(pair[0] for pair in offenders.itervalues()) for event in offenders: if offenders[event][0] == minindex: mass = mass_below(event, cardinality=minindex) scalef = (offenders[event][1] + bba[event]) / mass for subevent in pspace.subsets(event, size=minindex): if subevent not in temp_bba: temp_bba[subevent] = 0 temp_bba[subevent] = max( temp_bba[subevent], scalef * bba[subevent]) for event, value in temp_bba.iteritems(): bba[event] = value cardinality = minindex + 1 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'lpbelfunc': # Initialize the algorithm lprob = self.set_function pspace = lprob.pspace number_type = lprob.number_type n = 2**len(pspace) # Set up the linear program mat = cdd.Matrix(list( chain( [[-1] + n * [1], [1] + n * [-1]], [[0] + [int(event == other) for other in pspace.subsets()] for event in pspace.subsets()], [[lprob[event]] + [-int(other <= event) for other in pspace.subsets()] for event in pspace.subsets()])), number_type=number_type) mat.obj_type = cdd.LPObjType.MAX mat.obj_func = (0, ) + tuple(2**(len(pspace) - len(event)) for event in pspace.subsets()) lp = cdd.LinProg(mat) # Solve the linear program and check the solution lp.solve() if lp.status == cdd.LPStatusType.OPTIMAL: bba = SetFunction(pspace, data=dict( izip(list(pspace.subsets()), list(lp.primal_solution))), number_type=number_type) return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) else: raise RuntimeError('No optimal solution found.') else: raise NotImplementedError
def get_outer_approx(self, algorithm=None): """Generate an outer approximation. :parameter algorithm: a :class:`~string` denoting the algorithm used: ``None``, ``'linvac'``, ``'irm'``, ``'imrm'``, or ``'lpbelfunc'`` :rtype: :class:`~improb.lowprev.lowprob.LowProb` This method replaces the lower probability :math:`\underline{P}` by a lower probability :math:`\underline{R}` determined by the ``algorithm`` argument: ``None`` returns the original lower probability. >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob == lprob.get_outer_approx() True ``'linvac'`` replaces the imprecise part :math:`\underline{Q}` by the vacuous lower probability :math:`\underline{R}=\min` to generate a simple outer approximation. ``'irm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using the IRM algorithm of Hall & Lawry [#hall2004]_. The Moebius transform of a lower probability that is not completely monotone contains negative belief assignments. Consider such a lower probability and an event with such a negative belief assignment. The approximation consists of removing this negative assignment and compensating for this by correspondingly reducing the positive masses for events below it; for details, see the paper. The following example illustrates the procedure: >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob.is_completely_monotone() False >>> print(lprob.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : -1/2 >>> belfunc = lprob.get_outer_approx('irm') >>> print(belfunc.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 0 >>> print(belfunc) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 1 >>> belfunc.is_completely_monotone() True The next is Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> lprob.is_avoiding_sure_loss() True >>> lprob.is_coherent() False >>> lprob.is_completely_monotone() False >>> belfunc = lprob.get_outer_approx('irm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.375789766751 A C : 0.405080300695 A D : 0.259553087227 B C : 0.560442004097 B D : 0.43812301076 C D : 0.399034985143 A B C : 0.710712071543 A B D : 0.603365864737 A C D : 0.601068373065 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0119897667507 A C : 0.0487803006948 A D : 0.0637530872268 B C : 0.019342004097 B D : 0.0575230107598 C D : 0.0259349851432 A B C : 3.33066907388e-16 A B D : -1.11022302463e-16 A C D : -1.11022302463e-16 B C D : 0.0 A B C D : 0.0357768453276 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.013595658498933991 .. note:: This algorithm is *not* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'imrm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using an algorithm by Quaeghebeur that is as of yet unpublished. We apply it to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={ ... '': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('imrm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.381007057096 A C : 0.411644226231 A D : 0.26007767078 B C : 0.562748716673 B D : 0.4404197271 C D : 0.394394926787 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0172070570962 A C : 0.0553442262305 A D : 0.0642776707797 B C : 0.0216487166733 B D : 0.0598197271 C D : 0.0212949267869 A B C : 2.22044604925e-16 A B D : 0.0109955450242 A C D : 0.00368317620293 B C D : 3.66294398528e-05 A B C D : 0.00879232466651 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.010375479708342836 .. note:: This algorithm *is* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'lpbelfunc'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}_\mu` that is obtained via the zeta transform of the basic belief assignment :math:`\mu`, a solution of the following optimization (linear programming) problem: .. math:: \min\{ \sum_{A\subseteq\Omega}(\underline{P}(A)-\underline{R}_\mu(A)): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \underline{R}_\mu(A)\leq\underline{P}(A), A\subseteq\Omega \}, which, because constants in the objective function do not influence the solution and because :math:`\underline{R}_\mu(A)=\sum_{B\subseteq A}\mu(B)`, is equivalent to: .. math:: \max\{ \sum_{B\subseteq\Omega}2^{|\Omega|-|B|}\mu(B): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \sum_{B\subseteq A}\mu(B) \leq\underline{P}(A), A\subseteq\Omega \}, the version that is implemented. We apply this to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_, which we also used for ``'irm'``: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('lpbelfunc') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3638 A C : 0.4079 A D : 0.28835 B C : 0.5837 B D : 0.44035 C D : 0.37355 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0 A C : 0.0516 A D : 0.09255 B C : 0.0426 B D : 0.05975 C D : 0.00045 A B C : 0.0 A B D : 1.11022302463e-16 A C D : 0.0 B C D : 0.0 A B C D : 0.01615 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace) ... ) # doctest: +ELLIPSIS 0.00991562... .. note:: This algorithm is *not* invariant under permutation of the possibility space or changes in the LP-solver: there may be a nontrivial convex set of optimal solutions. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. """ if algorithm is None: return self elif algorithm == 'linvac': prob, coeff = self.get_precise_part() return prob.get_linvac(1 - coeff) elif algorithm == 'irm': # Initialize the algorithm pspace = self.pspace bba = SetFunction(pspace, number_type=self.number_type) bba[False] = 0 def mass_below(event): subevents = pspace.subsets(event, full=False, empty=False) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algoritm itself: # we climb the algebra of events, calculating the belief assignment # for each and compensate negative ones by proportionally reducing # the assignments in the smallest basin of subevents needed for cardinality in range(1,len(pspace) + 1): for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) if bba[event] < 0: index, mass = basin_for_negmass(event) subevents = chain.from_iterable( pspace.subsets(event, size=k) for k in range(index, cardinality)) for subevent in subevents: bba[subevent] = (bba[subevent] * (1 + (bba[event] / mass))) bba[event] = 0 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'imrm': # Initialize the algorithm pspace = self.pspace number_type = self.number_type bba = SetFunction(pspace, number_type=number_type) bba[False] = 0 def mass_below(event, cardinality=None): subevents = pspace.subsets(event, full=False, empty=False, size=cardinality) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algorithm itself: cardinality = 1 while cardinality <= len(pspace): temp_bba = SetFunction(pspace, number_type=number_type) for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) offenders = dict((event, basin_for_negmass(event)) for event in pspace.subsets(size=cardinality) if bba[event] < 0) if len(offenders) == 0: cardinality += 1 else: minindex = min(pair[0] for pair in offenders.itervalues()) for event in offenders: if offenders[event][0] == minindex: mass = mass_below(event, cardinality=minindex) scalef = (offenders[event][1] + bba[event]) / mass for subevent in pspace.subsets(event, size=minindex): if subevent not in temp_bba: temp_bba[subevent] = 0 temp_bba[subevent] = max(temp_bba[subevent], scalef * bba[subevent]) for event, value in temp_bba.iteritems(): bba[event] = value cardinality = minindex + 1 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'lpbelfunc': # Initialize the algorithm lprob = self.set_function pspace = lprob.pspace number_type = lprob.number_type n = 2 ** len(pspace) # Set up the linear program mat = cdd.Matrix(list(chain( [[-1] + n * [1], [1] + n * [-1]], [[0] + [int(event == other) for other in pspace.subsets()] for event in pspace.subsets()], [[lprob[event]] + [-int(other <= event) for other in pspace.subsets()] for event in pspace.subsets()] )), number_type=number_type) mat.obj_type = cdd.LPObjType.MAX mat.obj_func = (0,) + tuple(2 ** (len(pspace) - len(event)) for event in pspace.subsets()) lp = cdd.LinProg(mat) # Solve the linear program and check the solution lp.solve() if lp.status == cdd.LPStatusType.OPTIMAL: bba = SetFunction(pspace, data=dict(izip(list(pspace.subsets()), list(lp.primal_solution))), number_type=number_type) return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) else: raise RuntimeError('No optimal solution found.') else: raise NotImplementedError
def __init__(self, pspace=None, mapping=None, lprev=None, uprev=None, prev=None, lprob=None, uprob=None, prob=None, bba=None, credalset=None, number_type=None): """Construct a polyhedral lower prevision on *pspace*. :param pspace: The possibility space. :type pspace: |pspacetype| :param mapping: Mapping from (gamble, event) to (lower prevision, upper prevision). :type mapping: :class:`collections.Mapping` :param lprev: Mapping from gamble to lower prevision. :type lprev: :class:`collections.Mapping` :param uprev: Mapping from gamble to upper prevision. :type uprev: :class:`collections.Mapping` :param prev: Mapping from gamble to precise prevision. :type prev: :class:`collections.Mapping` :param lprob: Mapping from event to lower probability. :type lprob: :class:`collections.Mapping` or :class:`collections.Sequence` :param uprob: Mapping from event to upper probability. :type uprob: :class:`collections.Mapping` or :class:`collections.Sequence` :param prob: Mapping from event to precise probability. :type prob: :class:`collections.Mapping` or :class:`collections.Sequence` :param bba: Mapping from event to basic belief assignment (useful for constructing belief functions). :type bba: :class:`collections.Mapping` :param credalset: Sequence of probability mass functions. :type credalset: :class:`collections.Sequence` :param number_type: The number type. If not specified, it is determined using :func:`~cdd.get_number_type_from_sequences` on all values. :type number_type: :class:`str` Generally, you can pass a :class:`dict` as a keyword argument in order to initialize the lower and upper previsions and/or probabilities: >>> print(LowPoly(pspace=3, mapping={ ... ((3, 1, 2), True): (1.5, None), ... ((1, 0, -1), (1, 2)): (0.25, 0.3)})) # doctest: +NORMALIZE_WHITESPACE 0 1 2 3.0 1.0 2.0 | 0 1 2 : [1.5 , ] 1.0 0.0 -1.0 | 1 2 : [0.25, 0.3 ] >>> print(LowPoly(pspace=3, ... lprev={(1, 3, 2): 1.5, (2, 0, -1): 1}, ... uprev={(2, 0, -1): 1.9}, ... prev={(9, 8, 20): 15}, ... lprob={(1, 2): 0.2, (1,): 0.1}, ... uprob={(1, 2): 0.3, (0,): 0.9}, ... prob={(2,): '0.3'})) # doctest: +NORMALIZE_WHITESPACE 0 1 2 0.0 0.0 1.0 | 0 1 2 : [0.3 , 0.3 ] 0.0 1.0 0.0 | 0 1 2 : [0.1 , ] 0.0 1.0 1.0 | 0 1 2 : [0.2 , 0.3 ] 1.0 0.0 0.0 | 0 1 2 : [ , 0.9 ] 1.0 3.0 2.0 | 0 1 2 : [1.5 , ] 2.0 0.0 -1.0 | 0 1 2 : [1.0 , 1.9 ] 9.0 8.0 20.0 | 0 1 2 : [15.0, 15.0] A credal set can be specified simply as a list: >>> print(LowPoly(pspace=3, ... credalset=[['0.1', '0.45', '0.45'], ... ['0.4', '0.3', '0.3'], ... ['0.3', '0.2', '0.5']])) 0 1 2 -10 10 0 | 0 1 2 : [-1, ] -1 -2 0 | 0 1 2 : [-1, ] 1 1 1 | 0 1 2 : [1 , 1 ] 50/23 40/23 0 | 0 1 2 : [1 , ] As a special case, for lower/upper/precise probabilities, if you need to set values on singletons, you can use a list instead of a dictionary: >>> print(LowPoly(pspace='abc', lprob=['0.1', '0.2', '0.3'])) # doctest: +NORMALIZE_WHITESPACE a b c 0 0 1 | a b c : [3/10, ] 0 1 0 | a b c : [1/5 , ] 1 0 0 | a b c : [1/10, ] If the first argument is a :class:`LowPoly` instance, then it is copied. For example: >>> from improb.lowprev.lowprob import LowProb >>> lpr = LowPoly(pspace='abc', lprob=['0.1', '0.1', '0.1']) >>> print(lpr) a b c 0 0 1 | a b c : [1/10, ] 0 1 0 | a b c : [1/10, ] 1 0 0 | a b c : [1/10, ] >>> lprob = LowProb(lpr) >>> print(lprob) a : 1/10 b : 1/10 c : 1/10 """ def iter_items(obj): """Return an iterator over all items of the mapping or the sequence. """ if isinstance(obj, collections.Mapping): return obj.iteritems() elif isinstance(obj, collections.Sequence): if len(obj) < len(self.pspace): raise ValueError('sequence too short') return (((omega,), value) for omega, value in itertools.izip(self.pspace, obj)) else: raise TypeError( 'expected collections.Mapping or collections.Sequence') def get_number_type(xprevs, xprobs): """Determine number type from arguments.""" # special case: nothing specified, defaults to float if (all(xprev is None for xprev in xprevs) and all(xprob is None for xprob in xprobs)): return 'float' # inspect all values for xprev in xprevs: if xprev is None: continue for key, value in xprev.iteritems(): # inspect gamble if isinstance(key, Gamble): if key.number_type == 'float': return 'float' elif isinstance(key, collections.Sequence): if cdd.get_number_type_from_sequences(key) == 'float': return 'float' elif isinstance(key, collections.Mapping): if cdd.get_number_type_from_sequences(key.itervalues()) == 'float': return 'float' # inspect value(s) if isinstance(value, collections.Sequence): if cdd.get_number_type_from_sequences(value) == 'float': return 'float' else: if cdd.get_number_type_from_value(value) == 'float': return 'float' for xprob in xprobs: if xprob is None: continue for key, value in iter_items(xprob): if cdd.get_number_type_from_value(value) == 'float': return 'float' # everything is fraction return 'fraction' # if first argument is a LowPoly, then override all other arguments if isinstance(pspace, LowPoly): mapping = dict(pspace.iteritems()) number_type = pspace.number_type pspace = pspace.pspace # initialize everything self._pspace = PSpace.make(pspace) if number_type is None: number_type = get_number_type( [mapping, lprev, uprev, prev, bba], [lprob, uprob, prob] + (credalset if credalset else [])) cdd.NumberTypeable.__init__(self, number_type) self._mapping = {} if mapping: for key, value in mapping.iteritems(): self[key] = value if lprev: for gamble, value in lprev.iteritems(): self.set_lower(gamble, value) if uprev: for gamble, value in uprev.iteritems(): self.set_upper(gamble, value) if prev: for gamble, value in prev.iteritems(): self.set_precise(gamble, value) if lprob: for event, value in iter_items(lprob): event = self.pspace.make_event(event) self.set_lower(event, value) if uprob: for event, value in iter_items(uprob): event = self.pspace.make_event(event) self.set_upper(event, value) if prob: for event, value in iter_items(prob): event = self.pspace.make_event(event) self.set_precise(event, value) if bba: setfunc = SetFunction( pspace=self.pspace, data=bba, number_type=self.number_type) for event in self.pspace.subsets(): self.set_lower(event, setfunc.get_zeta(event)) if credalset: # set up polyhedral representation mat = cdd.Matrix([(['1'] + credalprob) for credalprob in credalset]) mat.rep_type = cdd.RepType.GENERATOR poly = cdd.Polyhedron(mat) dualmat = poly.get_inequalities() #print(mat) #print(dualmat) for rownum, row in enumerate(dualmat): if rownum in dualmat.lin_set: self.set_precise(row[1:], -row[0]) else: self.set_lower(row[1:], -row[0])
def __init__(self, pspace=None, mapping=None, lprev=None, uprev=None, prev=None, lprob=None, uprob=None, prob=None, bba=None, credalset=None, number_type=None): """Construct a polyhedral lower prevision on *pspace*. :param pspace: The possibility space. :type pspace: |pspacetype| :param mapping: Mapping from (gamble, event) to (lower prevision, upper prevision). :type mapping: :class:`collections.Mapping` :param lprev: Mapping from gamble to lower prevision. :type lprev: :class:`collections.Mapping` :param uprev: Mapping from gamble to upper prevision. :type uprev: :class:`collections.Mapping` :param prev: Mapping from gamble to precise prevision. :type prev: :class:`collections.Mapping` :param lprob: Mapping from event to lower probability. :type lprob: :class:`collections.Mapping` or :class:`collections.Sequence` :param uprob: Mapping from event to upper probability. :type uprob: :class:`collections.Mapping` or :class:`collections.Sequence` :param prob: Mapping from event to precise probability. :type prob: :class:`collections.Mapping` or :class:`collections.Sequence` :param bba: Mapping from event to basic belief assignment (useful for constructing belief functions). :type bba: :class:`collections.Mapping` :param credalset: Sequence of probability mass functions. :type credalset: :class:`collections.Sequence` :param number_type: The number type. If not specified, it is determined using :func:`~cdd.get_number_type_from_sequences` on all values. :type number_type: :class:`str` Generally, you can pass a :class:`dict` as a keyword argument in order to initialize the lower and upper previsions and/or probabilities: >>> print(LowPoly(pspace=3, mapping={ ... ((3, 1, 2), True): (1.5, None), ... ((1, 0, -1), (1, 2)): (0.25, 0.3)})) # doctest: +NORMALIZE_WHITESPACE 0 1 2 3.0 1.0 2.0 | 0 1 2 : [1.5 , ] 1.0 0.0 -1.0 | 1 2 : [0.25, 0.3 ] >>> print(LowPoly(pspace=3, ... lprev={(1, 3, 2): 1.5, (2, 0, -1): 1}, ... uprev={(2, 0, -1): 1.9}, ... prev={(9, 8, 20): 15}, ... lprob={(1, 2): 0.2, (1,): 0.1}, ... uprob={(1, 2): 0.3, (0,): 0.9}, ... prob={(2,): '0.3'})) # doctest: +NORMALIZE_WHITESPACE 0 1 2 0.0 0.0 1.0 | 0 1 2 : [0.3 , 0.3 ] 0.0 1.0 0.0 | 0 1 2 : [0.1 , ] 0.0 1.0 1.0 | 0 1 2 : [0.2 , 0.3 ] 1.0 0.0 0.0 | 0 1 2 : [ , 0.9 ] 1.0 3.0 2.0 | 0 1 2 : [1.5 , ] 2.0 0.0 -1.0 | 0 1 2 : [1.0 , 1.9 ] 9.0 8.0 20.0 | 0 1 2 : [15.0, 15.0] A credal set can be specified simply as a list: >>> print(LowPoly(pspace=3, ... credalset=[['0.1', '0.45', '0.45'], ... ['0.4', '0.3', '0.3'], ... ['0.3', '0.2', '0.5']])) 0 1 2 -10 10 0 | 0 1 2 : [-1, ] -1 -2 0 | 0 1 2 : [-1, ] 1 1 1 | 0 1 2 : [1 , 1 ] 50/23 40/23 0 | 0 1 2 : [1 , ] As a special case, for lower/upper/precise probabilities, if you need to set values on singletons, you can use a list instead of a dictionary: >>> print(LowPoly(pspace='abc', lprob=['0.1', '0.2', '0.3'])) # doctest: +NORMALIZE_WHITESPACE a b c 0 0 1 | a b c : [3/10, ] 0 1 0 | a b c : [1/5 , ] 1 0 0 | a b c : [1/10, ] If the first argument is a :class:`LowPoly` instance, then it is copied. For example: >>> from improb.lowprev.lowprob import LowProb >>> lpr = LowPoly(pspace='abc', lprob=['0.1', '0.1', '0.1']) >>> print(lpr) a b c 0 0 1 | a b c : [1/10, ] 0 1 0 | a b c : [1/10, ] 1 0 0 | a b c : [1/10, ] >>> lprob = LowProb(lpr) >>> print(lprob) a : 1/10 b : 1/10 c : 1/10 """ def iter_items(obj): """Return an iterator over all items of the mapping or the sequence. """ if isinstance(obj, collections.Mapping): return obj.iteritems() elif isinstance(obj, collections.Sequence): if len(obj) < len(self.pspace): raise ValueError('sequence too short') return (((omega, ), value) for omega, value in itertools.izip(self.pspace, obj)) else: raise TypeError( 'expected collections.Mapping or collections.Sequence') def get_number_type(xprevs, xprobs): """Determine number type from arguments.""" # special case: nothing specified, defaults to float if (all(xprev is None for xprev in xprevs) and all(xprob is None for xprob in xprobs)): return 'float' # inspect all values for xprev in xprevs: if xprev is None: continue for key, value in xprev.iteritems(): # inspect gamble if isinstance(key, Gamble): if key.number_type == 'float': return 'float' elif isinstance(key, collections.Sequence): if cdd.get_number_type_from_sequences(key) == 'float': return 'float' elif isinstance(key, collections.Mapping): if cdd.get_number_type_from_sequences( key.itervalues()) == 'float': return 'float' # inspect value(s) if isinstance(value, collections.Sequence): if cdd.get_number_type_from_sequences( value) == 'float': return 'float' else: if cdd.get_number_type_from_value(value) == 'float': return 'float' for xprob in xprobs: if xprob is None: continue for key, value in iter_items(xprob): if cdd.get_number_type_from_value(value) == 'float': return 'float' # everything is fraction return 'fraction' # if first argument is a LowPoly, then override all other arguments if isinstance(pspace, LowPoly): mapping = dict(pspace.iteritems()) number_type = pspace.number_type pspace = pspace.pspace # initialize everything self._pspace = PSpace.make(pspace) if number_type is None: number_type = get_number_type([mapping, lprev, uprev, prev, bba], [lprob, uprob, prob] + (credalset if credalset else [])) cdd.NumberTypeable.__init__(self, number_type) self._mapping = {} if mapping: for key, value in mapping.iteritems(): self[key] = value if lprev: for gamble, value in lprev.iteritems(): self.set_lower(gamble, value) if uprev: for gamble, value in uprev.iteritems(): self.set_upper(gamble, value) if prev: for gamble, value in prev.iteritems(): self.set_precise(gamble, value) if lprob: for event, value in iter_items(lprob): event = self.pspace.make_event(event) self.set_lower(event, value) if uprob: for event, value in iter_items(uprob): event = self.pspace.make_event(event) self.set_upper(event, value) if prob: for event, value in iter_items(prob): event = self.pspace.make_event(event) self.set_precise(event, value) if bba: setfunc = SetFunction(pspace=self.pspace, data=bba, number_type=self.number_type) for event in self.pspace.subsets(): self.set_lower(event, setfunc.get_zeta(event)) if credalset: # set up polyhedral representation mat = cdd.Matrix([(['1'] + credalprob) for credalprob in credalset]) mat.rep_type = cdd.RepType.GENERATOR poly = cdd.Polyhedron(mat) dualmat = poly.get_inequalities() #print(mat) #print(dualmat) for rownum, row in enumerate(dualmat): if rownum in dualmat.lin_set: self.set_precise(row[1:], -row[0]) else: self.set_lower(row[1:], -row[0])