Exemple #1
0
def run_config(world_config: Dict[str, Any], funcs: List[str]):
    """Runs a single configuration and returns values of all functions for that configuration"""
    world = SCML2020World(**SCML2020World.generate(**world_config))
    results = {}
    results["log_folder"] = world.log_folder
    try:
        _start = time.perf_counter()
        world.run()
        _end = time.perf_counter()
        results.update({func: dep_vars[func](world) for func in funcs})
        results["time"] = _end - _start
        results["time_per_step"] = (_end - _start) / world.n_steps
        results["failed_run"] = False
        results["exception"] = None
    except Exception as e:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print(
            f"Exception occurred: {str(e)}\n{traceback.format_tb(exc_traceback)}"
        )
        results.update({func: float("nan") for func in funcs})
        results["time"] = float("nan")
        results["time_per_step"] = float("nan")
        results["failed_run"] = True
        results["exception"] = str(e)

    results.update(world_config)
    results.update(world.info)
    return results
Exemple #2
0
    def create_session(self) -> "SCML2020World":
        from scml import SCML2020World

        world = SCML2020World(
            **SCML2020World.generate(agent_types=self.competitors, n_steps=50),
            construct_graphs=True,
        )
        self.competitors = self.get_agents()
        return world
Exemple #3
0
def play_n_games(n, compare0, compare1, compare2, compare3):
    scores = {}
    scores["DrorStepAgent"] = 0
    scores["DecentralizingAgent"] = 0
    scores["DrorDanaStepAgent"] = 0
    scores["DrorOmer"] = 0
    scores["DrorDana2StepAgent"] = 0
    for n_simulations in range(n):
        world = SCML2020World(
            **SCML2020World.generate(
                [compare0, DecentralizingAgent, compare1, compare2], n_steps=30
            ),
            construct_graphs=True,
        )

        world.run()
        returned_scores = return_agent_scores(world)
        winner = max(returned_scores.items(), key=operator.itemgetter(1))[0]
        scores[winner] += 1
    return scores
Exemple #4
0
def play_n_game(n, dror_agent):
    scores = {}
    scores["DrorStepAgent"] = 0
    scores["DecentralizingAgent"] = 0

    for n_simulations in range(n):
        world = SCML2020World(
            **SCML2020World.generate(
                [dror_agent, DecentralizingAgent, IndDecentralizingAgent], n_steps=10
            ),
            construct_graphs=True,
        )

        world.run()
        returned_scores = return_agent_scores(world)
        winner = (
            "DrorStepAgent"
            if returned_scores.get("DrorStepAgent", -20)
            >= returned_scores.get("DecentralizingAgent", -20)
            else "DecentralizingAgent"
        )

        scores[winner] += 1
    return scores["DrorStepAgent"] - scores["DecentralizingAgent"]
            train_input_tags.append(input_tag)
            train_output_tags.append(output_tag)
    return train_data, train_input_tags, train_output_tags


train_data = []
train_input_tags = []
train_output_tags = []

print("training...")

for i in tqdm(range(TRADE_TRAIN_DATA)):
    world = SCML2020World(
        **SCML2020World.generate(
            agent_types=[NegotiatorAgent, UnicornAgent],
            n_steps=40,
            n_processes=1,  # TODO: 2
        ),
        construct_graphs=False)
    SCML2020World.cancelled_contracts = cancelled_contracts
    world.run()

    data, input_tags, output_tags = get_train_data(world)
    train_data += data
    train_input_tags += input_tags
    train_output_tags += output_tags

import torch
from trade_model import MAX_HORIZON

Exemple #6
0
            neg_features.append(np.array(message[:RESPONSE_RELATIVE_TIME]))
            neg_tags.append(np.array(message[RESPONSE_RELATIVE_TIME:]))
        features.append(np.array(neg_features))
        tags.append(np.array(neg_tags))

    return features, tags


print("training...")
seller_train_data = []
buyer_train_data = []
for i in tqdm(range(NEG_TRAIN_DATA)):
    world = SCML2020World(
        **SCML2020World.generate(
            agent_types=[MyLearnNegotiationAgent, DecentralizingAgent,],
            n_steps=40,
            n_processes=2,
        ),
        construct_graphs=True
    )
    SCML2020World.cancelled_contracts = cancelled_contracts
    world.run()
    plot(world)

    seller_data, buyer_data = get_train_data(world)
    seller_train_data += seller_data
    buyer_train_data += buyer_data

seller_train_features, seller_train_tags = split_features_tags(seller_train_data)
seller_train_features = [
    torch.from_numpy(feature).float() for feature in seller_train_features
Exemple #7
0
            # if I am a seller, I will schedule production
            output_product = contract.annotation["product"]
            input_product = output_product - 1
            steps, _ = self.awi.schedule_production(
                process=input_product,
                repeats=contract.agreement["quantity"],
                step=(earliest_production, step - 1),
                line=-1,
                partial_ok=True,
            )
            self.schedule_range[contract.id] = (
                min(steps) if len(steps) > 0 else -1,
                max(steps) if len(steps) > 0 else -1,
                is_seller,
            )


if __name__ == '__main__':
    agent_types = [
        OmerProductionStrategyAgent,
        DecentralizingAgent,
        BuyCheapSellExpensiveAgent,
        IndDecentralizingAgent,
        MovingRangeAgent
    ]
    world = SCML2020World(
        **SCML2020World.generate(agent_types, n_steps=50),
        construct_graphs=True,
    )
    world.run()
            neg_features.append(np.array(message[:RESPONSE_RELATIVE_TIME]))
            neg_tags.append(np.array(message[RESPONSE_RELATIVE_TIME:]))
        features.append(np.array(neg_features))
        tags.append(np.array(neg_tags))

    return features, tags


print("training...")
seller_train_data = []
buyer_train_data = []
for i in tqdm(range(UTILITY_TRAIN_DATA)):
    world = SCML2020World(**SCML2020World.generate(
        agent_types=[
            MyLearnUtilityAgent,
            DecentralizingAgent,
        ],
        n_steps=40,
        n_processes=2,
    ),
                          construct_graphs=True)
    SCML2020World.cancelled_contracts = cancelled_contracts
    world.run()
    plot(world)

    seller_data, buyer_data = get_train_data(world)
    seller_train_data += seller_data
    buyer_train_data += buyer_data

seller_train_features, seller_train_tags = split_features_tags(
    seller_train_data)
seller_train_features = [
Exemple #9
0
from scml import SCML2020World
from scml import DecentralizingAgent, RandomAgent, DoNothingAgent

# create and run the world
world = SCML2020World(**SCML2020World.generate(
    agent_types=[DoNothingAgent
                 ], n_steps=10, n_processes=2, name="test_world"))
world.run()