def test_from_outcomes(): issues = Issues(price=[2, 3], cost=[1, 2, 3], delivery=["yes", "no"]) found = Issue.from_outcomes(Issue.enumerate(issues.issues)) for i, f in zip(issues.issues, found): assert i.name == f.name assert all(a == b for a, b in zip(sorted(i.values), f._values)) issues = Issues(price=(1, 7), cost=(0, 5), delivery=["yes", "no"]) found = Issue.from_outcomes(Issue.enumerate(issues.issues, max_n_outcomes=1000), numeric_as_ranges=True) for i, f in zip(issues.issues, found): v = sorted(i.values) assert i.name == f.name assert f._values[0] >= v[0] and f._values[1] <= v[1]
def test_lap_ufun_partial_ranking(): issues = [Issue(5, "Price"), Issue(5, "Distance")] gt = {(o["Price"], o["Distance"]): 0.2 * o["Price"] - 0.45 * o["Distance"] for o in Issue.enumerate(issues, astype=dict)} ufun = RankingLAPUfunLearner(issues=issues, degree=1) ufun.fit(ranking_list=[(4, 0), (3, 0), (3, 1), (2, 4), (0, 4)]) assert ufun.theta[0] > 0.0 > ufun.theta[1]
def test_ufun_quality_error_sums(kind): issues = [Issue(5, "Price"), Issue(5, "Distance")] gt = {(o["Price"], o["Distance"]): 0.2 * o["Price"] - 0.45 * o["Distance"] for o in Issue.enumerate(issues, astype=dict)} full_ranking = [ _[0] for _ in sorted( zip(gt.keys(), gt.values()), key=lambda x: x[1], reverse=True) ] ufun = RankingLAPUfunLearner(issues=issues, degree=1, kind=kind) assert (_ufun_objective( [0.2, -0.45], ranking=full_ranking, fs=ufun.fs, n_params=ufun.n_params, tolerance=ufun.tolerance, kind=ufun.kind, ) == 0.0) assert (_ufun_objective( [0.2, 0.4], ranking=full_ranking, fs=ufun.fs, n_params=ufun.n_params, tolerance=ufun.tolerance, kind=ufun.kind, ) > 0.0)
def test_can_run_all_negotiators(asdict): from negmas.helpers import instantiate issues = [Issue((0.0, 1.0), name="price"), Issue(10, name="quantity")] weights = dict(price=1.0, quantity=1.0) if asdict else (1.0, 1.0) for outcome_type in [tuple, dict]: outcomes = Issue.enumerate(issues, max_n_outcomes=100, astype=outcome_type) neg_types = [ ( "RandomNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "AspirationNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "LimitedOutcomesNegotiator", dict(acceptance_probabilities=0.5), ), ( "LimitedOutcomesAcceptor", dict(acceptance_probabilities=0.5), ), ( "ToughNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "OnlyBestNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "NaiveTitForTatNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "SimpleTitForTatNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ( "NiceNegotiator", dict(ufun=LinearUtilityFunction(weights=weights)), ), ] for i, (neg_type, params) in enumerate(neg_types): for n2, p2 in neg_types: print(f"{neg_type} <> {n2}") n1 = instantiate("negmas.sao." + neg_type, **params) n2 = instantiate("negmas.sao." + n2, **p2) m = SAOMechanism(n_steps=30, issues=issues, outcome_type=dict if asdict else tuple) m.add(n1) m.add(n2) m.run() assert not m.running
def negotiator(self, is_seller: bool, issues=None, outcomes=None) -> SAONegotiator: if outcomes is None and ( issues is None or not Issue.enumerate(issues, astype=tuple) ): return None params = self.negotiator_params params["ufun"] = self.create_ufun( is_seller=is_seller, outcomes=outcomes, issues=issues ) return instantiate(self.negotiator_type, **params)
def test_ranking(ascending): issues = [Issue(5, "Price"), Issue(5, "Distance")] outcomes = Issue.enumerate(issues, astype=tuple) ufun = util.UtilityFunction.generate_random(1, outcomes=outcomes)[0] rank = ranking(ufun, outcomes, ascending=ascending) for r1, r2 in zip(rank[:-1], rank[1:]): assert (ascending and ufun(r1) <= ufun(r2)) or (not ascending and ufun(r1) >= ufun(r2))
def negotiator(self, is_seller: bool, issues=None) -> Optional[SAONegotiator]: """Creates a negotiator""" if issues is None or not Issue.enumerate(issues, astype=tuple): return None return AspirationNegotiator( ufun=self.create_ufun(is_seller=is_seller, issues=issues), assume_normalized=True, )
def test_partial_ranking(ascending, fraction): issues = [Issue(5, "Price"), Issue(5, "Distance")] outcomes = Issue.enumerate(issues, astype=tuple) ufun = util.UtilityFunction.generate_random(1, outcomes=outcomes)[0] rank = partial(ranking(ufun, outcomes, ascending=ascending), fraction) assert len(rank) == int(len(outcomes) * fraction + 0.5) for r1, r2 in zip(rank[:-1], rank[1:]): assert (ascending and ufun(r1) <= ufun(r2)) or (not ascending and ufun(r1) >= ufun(r2))
def test_can_create_all_negotiator_types(): from negmas.helpers import instantiate issues = [Issue((0.0, 1.0), name="price"), Issue(10, name="quantity")] for outcome_type in [tuple, dict]: outcomes = Issue.enumerate(issues, max_n_outcomes=100, astype=outcome_type) neg_types = [ ( "RandomNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ("LimitedOutcomesNegotiator", dict()), ("LimitedOutcomesAcceptor", dict()), ( "AspirationNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ( "ToughNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ( "OnlyBestNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ( "NaiveTitForTatNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ( "SimpleTitForTatNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ( "NiceNegotiator", dict(ufun=LinearUtilityFunction( weights=dict(price=1.0, quantity=1.0))), ), ] for neg_type, params in neg_types: _ = instantiate("negmas.sao." + neg_type, **params)
def before_step(self): self.__endall = not self.awi.is_first_level and not self.awi.is_last_level if self.__endall: return # we assume that we are either in the first or the latest layer # and calculate our ufun limits and reserved value self.ufun.reserved_value = self.ufun.from_contracts([]) self._reserved_value = self.ufun.reserved_value AspirationMixin.aspiration_init( self, max_aspiration=1.0, aspiration_type=float(random.randint(1, 4)) if random.random() < 0.7 else random.random(), above_reserved_value=False, ) # if self.awi.current_exogenous_input_quantity or self.awi.current_exogenous_output_quantity: # breakpoint() self._limit = self.ufun.find_limit(True, int(self.awi.is_last_level), int(self.awi.is_first_level)) self._max_utility = self._limit.utility urange = self._max_utility - self._reserved_value if urange <= 1e-5: urange = 1e-5 self._urange = urange if self.awi.is_last_level: self._best = (self._limit.input_quantity, self._limit.input_price) else: self._best = (self._limit.output_quantity, self._limit.output_price) # compile a list of all outcomes with their utilities and sort it # descendigly by utility issues = (self.awi.current_output_issues if self.awi.is_first_level else self.awi.current_output_issues) outcomes = list(Issue.enumerate(issues, astype=tuple)) self._outcomes = sorted( zip( ((self.ufun.from_offers([_], [self.awi.is_first_level]) - self._reserved_value) / (self._urange) for _ in outcomes), outcomes, ), key=lambda x: -x[0], ) self._last_index = 0
def __init__( self, *args, issues: Optional[List[Issue]], outcomes: List[Outcome] = None, **kwargs, ): super().__init__(*args, **kwargs) if issues is None and outcomes is None: raise ValueError("Neither issues nor outcomes was given") self.issues = issues if outcomes is not None: self.outcomes = [ outcome.values() if isinstance(outcome, dict) else outcome if isinstance(outcome, tuple) else outcome.astuple() for outcome in outcomes ] else: self.outcomes = Issue.enumerate(issues=issues, astype=tuple) self.issue_names = [issue.name for issue in issues]
def test_lap_ufun_full_ranking(): issues = [Issue(5, "Price"), Issue(5, "Distance")] gt = {(o["Price"], o["Distance"]): 0.2 * o["Price"] - 0.45 * o["Distance"] for o in Issue.enumerate(issues, astype=dict)} full_ranking = [ _[0] for _ in sorted( zip(gt.keys(), gt.values()), key=lambda x: x[1], reverse=True) ] for kind in ("error_sums", "errors"): ufun = RankingLAPUfunLearner(issues=issues, degree=1, kind=kind) ufun.fit(ranking_list=full_ranking) learned_ranking = [ _[0] for _ in sorted( zip(ufun.uvals.keys(), ufun.uvals.values()), key=lambda x: x[1], reverse=True, ) ] if kind == "error_sums": assert full_ranking == learned_ranking, f"Failed on {kind}" assert (ufun.ranking_error(gt=MappingUtilityFunction( lambda o: 0.2 * o[0] - 0.45 * o[1])) == 0.0)
def create_ufun(self, is_seller: bool, issues=None, outcomes=None): return RandomUtilityFunction(outcomes if outcomes is not None else Issue.enumerate(issues, astype=tuple) , reserved_value=0.0)
def rank( outcomes, fractions, n_rankings_per_fraction, n_trials_per_ranking, white_noise, uniform_noise, degrees, serial, methods, ): global ufuns global issues if white_noise is None: white_noise = (0.0, 0.0, 1) if uniform_noise is None: uniform_noise = (0.0, 0.0, 1) ufuns, names, issues = generate_genius_ufuns(max_n_outcomes=outcomes) n_ufuns = len(ufuns) results_file_name = os.path.expanduser("~/code/projects/uneg/data/accuracy.csv") n_all = 0 if serial: for i, (gt, name, issue_list) in enumerate(zip(ufuns, names, issues)): outcomes = Issue.enumerate(issue_list, astype=tuple) for fraction in np.linspace(*fractions): k = int(fraction * len(outcomes) + 0.5) if k < 2: print( f"{i}/{len(ufuns)}: {name}: " f"(fraction:{fraction:0.02} Cancelled (too small fraction)", flush=True, ) continue n_rankings = int( math.factorial(len(outcomes)) / (math.factorial(len(outcomes) - k) * math.factorial(k)) ) n_rankings_to_run = min((n_rankings_per_fraction, n_rankings)) for w in np.linspace(*white_noise): for u in np.linspace(*uniform_noise): for method in methods: for degree in degrees: results = evaluate_ranking( n_ufuns, name, method, degree, fraction, outcomes, u, w, n_rankings_to_run, n_trials_per_ranking, results_file_name, i, ) add_records( results_file_name, pd.DataFrame(data=results) ) else: executor = ProcessPoolExecutor(max_workers=None) future_results = [] for i, (gt, name, issue_list) in enumerate(zip(ufuns, names, issues)): outcomes = Issue.enumerate(issue_list, astype=tuple) for fraction in np.linspace(*fractions): k = int(fraction * len(outcomes) + 0.5) if k < 2: print( f"{i}/{len(ufuns)}: {name}: " f"(fraction:{fraction:0.02} Cancelled (too small fraction)", flush=True, ) continue n_rankings = int( math.factorial(len(outcomes)) / (math.factorial(len(outcomes) - k) * math.factorial(k)) ) n_rankings_to_run = min((n_rankings_per_fraction, n_rankings)) for w in np.linspace(*white_noise): for u in np.linspace(*uniform_noise): for method in methods: for degree in degrees: future_results.append( executor.submit( evaluate_ranking, n_ufuns, name, method, degree, fraction, outcomes, u, w, n_rankings_to_run, n_trials_per_ranking, results_file_name, i, ) ) n_all += 1 print(f"Submitted all processes ({n_all})") for j, future in enumerate(as_completed(future_results)): try: results = future.result() add_records(results_file_name, pd.DataFrame(data=results)) except TimeoutError: print("Tournament timed-out") break except Exception as e: print(traceback.format_exc()) print(e)