예제 #1
0
def test_alternating_offers_eliciting_mechanism_voi_optimal():
    config = SAOElicitingMechanism.generate_config(
        cost=0.001,
        n_outcomes=10,
        opponent_model_adaptive=False,
        opponent_type="limited_outcomes",
        conflict=1.0,
        n_steps=500,
        time_limit=100000.0,
        own_utility_uncertainty=0.5,
        own_reserved_value=0.1,
    )
    p = SAOElicitingMechanism(**config, elicitor_type="voi_optimal")
    p.run()
    assert len(p.history) > 0
    # pprint(p.elicitation_state)
    # print(p.agents[1].reserved_value)
    assert p.elicitation_state["elicitation_cost"] >= 0.0
    assert (
        p.elicitation_state["elicitor_utility"]
        + p.elicitation_state["elicitation_cost"]
        >= p.negotiators[1].reserved_value
    )
    assert (
        p.elicitation_state["total_voi"] is not None
        and p.elicitation_state["total_voi"] >= 0
    )
예제 #2
0
def test_alternating_offers_eliciting_mechanism_voi():
    for strategy, dynamic in [(None, False), (None, True)]:
        config = SAOElicitingMechanism.generate_config(
            cost=0.001,
            n_outcomes=10,
            opponent_model_adaptive=False,
            opponent_type="limited_outcomes",
            conflict=1.0,
            n_steps=500,
            time_limit=100000.0,
            own_utility_uncertainty=0.1,
            own_reserved_value=0.1,
        )
        p = SAOElicitingMechanism(
            **config,
            elicitation_strategy=strategy,
            elicitor_type="voi",
            dynamic_queries=dynamic,
        )
        p.run()
        assert len(p.history) > 0
        # pprint(p.elicitation_state)
        # print(p.agents[1].reserved_value)
        assert p.elicitation_state["elicitation_cost"] >= 0.0
        assert (
            p.elicitation_state["elicitor_utility"] >= p.negotiators[1].reserved_value
        )
예제 #3
0
def test_a_small_elicitation_session():
    import os

    config = SAOElicitingMechanism.generate_config(cost=0.2, n_outcomes=5, n_steps=10)
    p = SAOElicitingMechanism(
        **config,
        history_file_name=f"{os.path.expanduser('~/logs/negmas/tmp/elicit.log')}'",
    )
    p.run()
예제 #4
0
def test_a_typical_elicitation_session():
    import random
    import os

    n_outcomes = 10
    accepted_outcomes = [int(random.random() <= 0.5) for _ in range(n_outcomes)]
    config = SAOElicitingMechanism.generate_config(
        cost=0.2, n_outcomes=n_outcomes, n_steps=100, own_reserved_value=0.25
    )
    p = SAOElicitingMechanism(
        **config,
        history_file_name=f"{os.path.expanduser('~/logs/negmas/tmp/elicit.log')}'",
    )
    p.run()
예제 #5
0
def test_alternating_offers_eliciting_mechanism_full_knowledge():
    config = SAOElicitingMechanism.generate_config(
        cost=0.001,
        n_outcomes=10,
        opponent_model_adaptive=False,
        opponent_type="limited_outcomes",
        conflict=1.0,
        n_steps=500,
        time_limit=100000.0,
        own_utility_uncertainty=0.1,
        own_reserved_value=0.1,
    )
    p = SAOElicitingMechanism(
        **config, elicitation_strategy="bisection", elicitor_type="full_knowledge"
    )
    p.run()
    assert len(p.history) > 0
    # pprint(p.elicitation_state)
    # print(p.agents[1].reserved_value)
    assert p.elicitation_state["elicitation_cost"] == 0.0
    assert p.elicitation_state["elicitor_utility"] >= p.negotiators[1].reserved_value
예제 #6
0
def test_elicitation_run_with_no_conflict():
    n_outcomes = 50
    n_steps = 100
    config = SAOElicitingMechanism.generate_config(
        cost=0.05,
        n_outcomes=n_outcomes,
        conflict=0.0,
        winwin=1.0,
        n_steps=n_steps,
        own_reserved_value=0.1,
        opponent_type="tough",
        opponent_model_uncertainty=0.0,
        own_utility_uncertainty=0.0,
    )
    neg = SAOElicitingMechanism(**config, elicitor_type="full_knowledge")
    frontier, frontier_outcomes = neg.pareto_frontier(sort_by_welfare=True)
    assert len(frontier) > 0
    neg.run()