def test_pareto_frontier_does_not_depend_on_order(): u1 = [ 0.5337723805661662, 0.8532272031479199, 0.4781281413197942, 0.7242899747791032, 0.3461879818432919, 0.2608677043479706, 0.9419131964655383, 0.29368079952747694, 0.6093201983562316, 0.7066918086398718, ] u2 = [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0] welfare = [_1 + _2 for _1, _2 in zip(u1, u2)] assert welfare.index(max(welfare)) == 3 f1 = MappingUtilityFunction(lambda o: u1[o[0]]) f2 = MappingUtilityFunction(lambda o: u2[o[0]]) assert all(f1((i,)) == u1[i] for i in range(10)) assert all(f2((i,)) == u2[i] for i in range(10)) p1, l1 = pareto_frontier([f1, f2], outcomes=[(_,) for _ in range(10)]) p2, l2 = pareto_frontier([f2, f1], outcomes=[(_,) for _ in range(10)]) assert p1 == [(0.9419131964655383, 0.0), (0.7242899747791032, 1.0)] assert p2 == [(1.0, 0.7242899747791032), (0.0, 0.9419131964655383)] assert l1 == [6, 3] assert l2 == list(reversed(l1)) assert len(p1) == len(p2) # reverse order of p2 p2 = [(_[1], _[0]) for _ in p2] for a in p1: assert a in p2
def pareto_frontier(self, n_max=None, sort_by_welfare=True, consider_costs=False) \ -> Tuple[List[Tuple[float]], List['Outcome']]: ufuns = self._get_ufuns(consider_costs=consider_costs) frontier, indices = pareto_frontier( ufuns=ufuns, n_discretization=None, sort_by_welfare=sort_by_welfare, outcomes=self.discrete_outcomes(n_max=n_max)) return frontier, [ self.discrete_outcomes(n_max=n_max)[_] for _ in indices ]
def pareto_frontier( self, n_max=None, sort_by_welfare=True ) -> Tuple[List[Tuple[float]], List["Outcome"]]: ufuns = self._get_ufuns() if any(_ is None for _ in ufuns): return [], [] frontier, indices = pareto_frontier( ufuns=ufuns, n_discretization=None, sort_by_welfare=sort_by_welfare, outcomes=self.discrete_outcomes(n_max=n_max), ) return frontier, [self.discrete_outcomes(n_max=n_max)[_] for _ in indices]
def test_pareto_frontier_2(self): n_outcomes = 10 strategy = "titration-0.5" cost = 0.01 reserved_value = 0.1 outcomes = [(_,) for _ in range(n_outcomes)] accepted = [(2,), (3,), (4,), (5,)] elicitor_utilities = [ 0.5337723805661662, 0.8532272031479199, 0.4781281413197942, 0.7242899747791032, 0.3461879818432919, 0.2608677043479706, 0.9419131964655383, 0.29368079952747694, 0.6093201983562316, 0.7066918086398718, ] # list(np.random.rand(n_outcomes).tolist()) opponent_utilities = [ 1.0 if (_,) in accepted else 0.0 for _ in range(n_outcomes) ] frontier, frontier_locs = pareto_frontier( [ MappingUtilityFunction( lambda o: elicitor_utilities[o[0]], reserved_value=reserved_value, outcome_type=tuple, ), MappingUtilityFunction( lambda o: opponent_utilities[o[0]], reserved_value=reserved_value, outcome_type=tuple, ), ], outcomes=outcomes, sort_by_welfare=True, ) welfare = ( np.asarray(elicitor_utilities) + np.asarray(opponent_utilities) ).tolist() # print(f'frontier: {frontier}\nmax. welfare: {max(welfare)} at outcome: ({welfare.index(max(welfare))},)') # print(f'frontier_locs: frontier_locs') neg = SAOMechanism(outcomes=n_outcomes, n_steps=10, outcome_type=tuple) opponent = LimitedOutcomesNegotiator( acceptable_outcomes=accepted, acceptance_probabilities=[1.0] * len(accepted), ) eufun = MappingUtilityFunction( dict(zip(outcomes, elicitor_utilities)), reserved_value=reserved_value, outcome_type=tuple, ) user = User(ufun=eufun, cost=cost) strategy = EStrategy(strategy=strategy) strategy.on_enter(ami=neg.ami) elicitor = FullKnowledgeElicitor(strategy=strategy, user=user) neg.add(opponent) neg.add(elicitor) neg.run() f2, f2_outcomes = neg.pareto_frontier(sort_by_welfare=True) assert len(frontier) == len(f2) assert all([_1 == _2] for _1, _2 in zip(frontier, f2)) assert [_[0] for _ in f2_outcomes] == frontier_locs