Ejemplo n.º 1
0
        # every node is a neighbor of the other for right now
        for i in range(0, len(nodes)):
            for j in range(0, len(nodes)):
                if i != j:
                    nodes[i].add_neighbor(nodes[j])

    tx_rate = TX_RATE if 'transaction_rate' not in params else params[
        'transaction_rate']

    # generate mock poisson dataset
    if params['dataset'] == 'poisson':
        tx_dataset = generate_tx_dataset.poisson(tx_rate, params['duration'],
                                                 0, c.nodes)
    elif params['dataset'] == 'deterministic':
        tx_dataset = generate_tx_dataset.deterministic(tx_rate,
                                                       params['duration'], 0,
                                                       c.nodes)

    # generate proposal events
    c.generate_proposals()
    # set transaction dataset
    c.set_transactions(tx_dataset)

    # create arrays to store proposals and transactions
    for node_id in range(0, num_nodes):
        nodes[node_id].create_arrays(len(tx_dataset))

    # run simulation
    c.run()
Ejemplo n.º 2
0
import sys

from ratelimitedapi import RateLimitedApi
from coordinator import Coordinator

if __name__ == "__main__":
    # Working dir needs to have config.json file at the root.  Coordinator will create subfolders for storing the
    # backtest results.
    working_dir = Path(r"path/to/data/dir")
    with (working_dir / "config.json").open() as f:
        config = json.load(f)

    loglevel = config.get("loglevel", logging.INFO)
    logging.basicConfig(  # configures root logger, more succinct than wiring up handlers on object directly
        level=loglevel,
        format="%(asctime)s %(name)s [%(threadName)s] [%(levelname)-5.5s]: %(message)s",
        stream=sys.stdout
    )
    logger = logging.getLogger()  # root logger
    logger.info(str(config))

    try:
        mod = importlib.import_module(config["module"])
        testset = getattr(mod, config["testset"])
        api = RateLimitedApi(config["user_id"], config["token"], debug=False)
        coordinator = Coordinator(testset, api, working_dir, config["project_name"], config["concurrency"])
        asyncio.run(coordinator.run(), debug=True)
    except Exception as e:
        logger.error("Unhandled error", exc_info=e)
        sys.exit(1)
Ejemplo n.º 3
0
    parser.set_defaults(kill=False)

    args, command_flags = parser.parse_known_args()

    notify = args.notify
    runs = args.runs
    name = args.name
    kill = args.kill
    pool = args.pool

    if kill != False:
        if kill == None:
            kill_all()
        else:
            kill_job(str(kill))
        sys.exit(0)

    command = " ".join(args.command + command_flags)

    print("Running remote command: {}".format(command))

    # Starting the coordinator
    coordinator = Coordinator(command=command,
                              times=runs,
                              name=name,
                              notify=notify,
                              pool=Pool(pool))

    coordinator.onDone(lambda: onDone(coordinator))
    coordinator.run()
                    for tasks in current_coordinator.tasks_per_worker.values()
                    for task in tasks)
    if all_tasks != set(["1", "3", "4"]):
        raise Exception(
            f"coordinator {current_coordinator} does not have the right tasks. It has {all_tasks}"
        )

    for coordinator in agents:
        coordinator.cancel()


# delete current prefixes and results file
etcd = etcd3.client(host=etcd_ip, port=etcd_port)
etcd.delete_prefix(group_prefix)
etcd.delete_prefix(global_task_prefix)

agents = [agent_one, agent_two]

tasks = asyncio.gather(agent_one.run(), agent_two.run(),
                       control_agents(agents))

try:
    loop.run_until_complete(tasks)
except KeyboardInterrupt as e:
    pass
except:
    raise
finally:
    for coordinator in agents:
        coordinator.cancel()