示例#1
0
def _load_modules(dist, algo):
    dist_module, algo_module, graph_module = None, None, None
    if dist is not None:
        try:
            dist_module = import_module("pydcop.distribution.{}".format(dist))
            # TODO check the imported module has the right methods ?
        except ImportError:
            _error("Could not find distribution method {}".format(dist))

    try:
        algo_module = load_algorithm_module(algo)

        graph_module = import_module("pydcop.computations_graph.{}".format(
            algo_module.GRAPH_TYPE))

    except ImportError as e:
        _error("Could not find module for algorithm: {}".format(algo), e)
    except Exception as e:
        _error(
            "Error loading algorithm module  and associated "
            "computation graph type for : {}".format(algo),
            e,
        )

    return dist_module, algo_module, graph_module
示例#2
0
def load_algo_module(algo):
    algo_module = None
    try:
        algo_module = load_algorithm_module(algo)
    except ImportError as e:
        _error("Could not find dcop algorithm: {}".format(algo), e)
    return algo_module
示例#3
0
def test_load_algorithm_with_default_footprint():

    # dsatuto has no load method defined : check that we get instead default
    # implementations
    algo = load_algorithm_module("dsatuto")
    assert algo.algorithm_name == "dsatuto"
    assert algo.communication_load(None, None) == 1
    assert algo.computation_memory(None) == 1
示例#4
0
def test_load_algorithm():

    # We test load for all available algorithms
    for a in list_available_algorithms():
        algo = load_algorithm_module(a)

        assert algo.algorithm_name == a
        assert hasattr(algo, "communication_load")
        assert hasattr(algo, "computation_memory")
示例#5
0
def build_computation(comp_def: ComputationDef) -> MessagePassingComputation:
    """
    Build a concrete computation instance from a computation definition.

    :param comp_def: the computation definition
    :return: a concrete MessagePassingComputation
    """
    algo_module = load_algorithm_module(comp_def.algo.algo)
    computation = algo_module.build_computation(comp_def)
    return computation
def test_fallback_memory_footprint():
    # use dsatuto as is has no computation_memory function defined
    dsa_module = load_algorithm_module("dsatuto")

    v1 = Variable("v1", [1, 2])
    comp_def = ComputationDef(
        VariableComputationNode(v1, []),
        AlgorithmDef.build_with_default_param("dsatuto"),
    )
    comp = dsa_module.DsaTutoComputation(comp_def)

    assert comp.footprint() == 1
def test_memory_footprint():
    # use maxsum as is has a computation_memory function defined
    maxsum_module = load_algorithm_module("amaxsum")
    from pydcop.computations_graph.factor_graph import (
        VariableComputationNode as FGVariableComputationNode, )

    v1 = Variable("v1", [1, 2])
    comp_def = ComputationDef(
        FGVariableComputationNode(v1, []),
        AlgorithmDef.build_with_default_param("amaxsum"),
    )
    comp = maxsum_module.MaxSumVariableComputation(comp_def=comp_def)

    # The variable has no neighbors : footprint is 0
    assert comp.footprint() == 0
示例#8
0
def _load_modules(dist, algo):
    dist_module, algo_module, graph_module = None, None, None
    if dist:
        try:
            dist_module = import_module("pydcop.distribution.{}".format(dist))
            # TODO check the imported module has the right methods ?
        except ImportError:
            _error("Could not find distribution method {}".format(dist))

    try:
        algo_module = load_algorithm_module(algo)
        # TODO check the imported module has the right methods ?

        graph_module = import_module("pydcop.computations_graph.{}".format(
            algo_module.GRAPH_TYPE))
    except ImportError:
        _error("Could not find computation graph type: {}".format(
            algo_module.GRAPH_TYPE))

    return dist_module, algo_module, graph_module
示例#9
0
def generate_iot(args):
    print("generate iot ", args.output)

    # Constraints and variables with a power-law constraint graph:
    variables, constraints, domain = generate_powerlaw_var_constraints(
        args.num, args.domain, args.range
    )

    # Build a dcop and computation graph with no agents, just to be able to
    # compute the footprint of computations:
    dcop = DCOP(
        "graph coloring",
        "min",
        domains={"d": domain},
        variables=variables,
        agents={},
        constraints=constraints,
    )
    graph_module = import_module("pydcop.computations_graph.factor_graph")
    cg = graph_module.build_computation_graph(dcop)
    algo_module = load_algorithm_module("maxsum")

    footprints = {c.name: algo_module.computation_memory(c) for c in cg.nodes}

    # Generate an agent for each variable computation and assign the
    # computation to that agent.
    agents = {}  # type: Dict[str, AgentDef]
    mapping = defaultdict(lambda: [])  # type: Dict[str, List[str]]
    for comp in cg.nodes:
        if isinstance(comp, VariableComputationNode):
            a_name = agt_name(comp.name)
            agt = AgentDef(
                a_name,
                capacity=footprints[comp.name] * 100,
                default_hosting_cost=10,
                hosting_costs=agt_hosting_costs(comp, cg),
                default_route=1,
                routes=agt_route_costs(comp, cg),
            )
            logger.debug(
                "Create agent %s for computation %s with capacity %s",
                agt.name,
                comp.name,
                agt.capacity,
            )
            agents[agt.name] = agt
            mapping[agt.name].append(comp.name)

    # Now, we have created all the agents and distributed all the variables
    # let's distribute the factor computations.
    msg_load = msg_load_func(cg, algo_module.communication_load)
    factor_mapping = distribute_factors(agents, cg, footprints, mapping, msg_load)
    for a in mapping:
        mapping[a].extend(factor_mapping[a])

    dcop = DCOP(
        "graph coloring",
        "min",
        domains={"d": domain},
        variables=variables,
        agents=agents,
        constraints=constraints,
    )

    distribution = Distribution(mapping)

    if args.output:
        outputfile = args.output
        write_in_file(outputfile, dcop_yaml(dcop))

        dist = distribution.mapping()
        cost = ilp_compref.distribution_cost(
            distribution,
            cg,
            dcop.agents.values(),
            computation_memory=algo_module.computation_memory,
            communication_load=algo_module.communication_load,
        )

        result = {
            "inputs": {
                "dist_algo": "io_problem",
                "dcop": args.output,
                "graph": "factor_graph",
                "algo": "maxsum",
            },
            "distribution": dist,
            "cost": cost,
        }
        outputfile = "dist_" + args.output
        write_in_file(outputfile, yaml.dump(result))
    else:
        print(dcop_yaml(dcop))
示例#10
0
def generate_small_world(args):
    logger.debug("generate small world problem %s ", args)

    # Erdős-Rényi graph aka binomial graph.
    graph = nx.barabasi_albert_graph(args.num, 2)

    # import matplotlib.pyplot as plt
    # plt.subplot(121)
    # nx.draw(graph)  # default spring_layout
    # plt.show()

    domain = Domain("d", "d", range(args.domain))
    variables = {}
    agents = {}
    for n in graph.nodes:
        v = Variable(var_name(n), domain)
        variables[v.name] = v
        logger.debug("Create var for node %s : %s", n, v)

    constraints = {}
    for i, (n1, n2) in enumerate(graph.edges):
        v1 = variables[var_name(n1)]
        v2 = variables[var_name(n2)]
        values = random_assignment_matrix([v1, v2], range(args.range))
        c = NAryMatrixRelation([v1, v2], values, name=c_name(n1, n2))
        logger.debug("Create constraints for edge (%s, %s) : %s", v1, v2, c)
        constraints[c.name] = c

    dcop = DCOP(
        "graph coloring",
        "min",
        domains={"d": domain},
        variables=variables,
        agents={},
        constraints=constraints,
    )
    graph_module = import_module("pydcop.computations_graph.factor_graph")
    cg = graph_module.build_computation_graph(dcop)
    algo_module = load_algorithm_module("maxsum")

    footprints = {n.name: algo_module.computation_memory(n) for n in cg.nodes}
    f_vals = footprints.values()
    logger.info(
        "%s computations, footprint: \n  sum: %s, avg: %s max: %s, "
        "min: %s",
        len(footprints),
        sum(f_vals),
        sum(f_vals) / len(footprints),
        max(f_vals),
        min(f_vals),
    )

    default_hosting_cost = 2000
    small_agents = [agt_name(i) for i in range(75)]
    small_capa, avg_capa, big_capa = 40, 200, 1000
    avg_agents = [agt_name(i) for i in range(75, 95)]
    big_agents = [agt_name(i) for i in range(95, 100)]
    hosting_factor = 10

    for a in small_agents:
        # communication costs with all other agents
        comm_costs = {other: 6 for other in small_agents if other != a}
        comm_costs.update({other: 8 for other in avg_agents})
        comm_costs.update({other: 10 for other in big_agents})
        # hosting cost for all computations
        hosting_costs = {}
        for n in cg.nodes:
            # hosting_costs[n.name] = hosting_factor * \
            #                         abs(small_capa -footprints[n.name])
            hosting_costs[n.name] = footprints[n.name] / small_capa

        agt = AgentDef(
            a,
            default_hosting_cost=default_hosting_cost,
            hosting_costs=hosting_costs,
            default_route=10,
            routes=comm_costs,
            capacity=small_capa,
        )
        agents[agt.name] = agt
        logger.debug("Create small agt : %s", agt)

    for a in avg_agents:
        # communication costs with all other agents
        comm_costs = {other: 8 for other in small_agents}
        comm_costs.update({other: 2 for other in avg_agents if other != a})
        comm_costs.update({other: 4 for other in big_agents})
        # hosting cost for all computations
        hosting_costs = {}
        for n in cg.nodes:
            # hosting_costs[n.name] = hosting_factor * \
            #                         abs(avg_capa - footprints[n.name])
            hosting_costs[n.name] = footprints[n.name] / avg_capa

        agt = AgentDef(
            a,
            default_hosting_cost=default_hosting_cost,
            hosting_costs=hosting_costs,
            default_route=10,
            routes=comm_costs,
            capacity=avg_capa,
        )
        agents[agt.name] = agt
        logger.debug("Create avg agt : %s", agt)

    for a in big_agents:
        # communication costs with all other agents
        comm_costs = {other: 10 for other in small_agents}
        comm_costs.update({other: 4 for other in avg_agents})
        comm_costs.update({other: 1 for other in big_agents if other != a})
        # hosting cost for all computations
        hosting_costs = {}
        for n in cg.nodes:
            hosting_costs[n.name] = footprints[n.name] / big_capa

        agt = AgentDef(
            a,
            default_hosting_cost=default_hosting_cost,
            hosting_costs=hosting_costs,
            default_route=10,
            routes=comm_costs,
            capacity=big_capa,
        )
        agents[agt.name] = agt
        logger.debug("Create big agt : %s", agt)

    dcop = DCOP(
        "graph coloring",
        "min",
        domains={"d": domain},
        variables=variables,
        agents=agents,
        constraints=constraints,
    )

    if args.output:
        outputfile = args.output[0]
        write_in_file(outputfile, dcop_yaml(dcop))
    else:
        print(dcop_yaml(dcop))
示例#11
0
def run_cmd(args, timer: Timer = None, timeout=None):
    logger.debug("Distribution replicas : %s", args)
    global orchestrator

    # global dcop
    logger.info("loading dcop from {}".format(args.dcop_files))
    dcop = load_dcop_from_file(args.dcop_files)

    try:
        algo_module = load_algorithm_module(args.algo)
        algo = build_algo_def(algo_module, args.algo, dcop.objective,
                              [])  # FIXME : algo params needed?

        graph_module = import_module("pydcop.computations_graph.{}".format(
            algo_module.GRAPH_TYPE))
        logger.info("Building computation graph ")
        cg = graph_module.build_computation_graph(dcop)
        logger.info("Computation graph : %s", cg)

    except ImportError:
        _error("Could not find module for algorithm {} or graph model "
               "for this algorithm".format(args.algo))

    logger.info("loading distribution from {}".format(args.distribution))
    distribution = load_dist_from_file(args.distribution)

    INFINITY = 10000  # FIXME should not be mandatory

    global orchestrator
    if args.mode == "thread":
        orchestrator = run_local_thread_dcop(algo,
                                             cg,
                                             distribution,
                                             dcop,
                                             INFINITY,
                                             replication=args.replication)
    elif args.mode == "process":

        # Disable logs from agents, they are in other processes anyway
        agt_logs = logging.getLogger("pydcop.agent")
        agt_logs.disabled = True

        # When using the (default) 'fork' start method, http servers on agent's
        # processes do not work (why ?)
        multiprocessing.set_start_method("spawn")
        orchestrator = run_local_process_dcop(algo,
                                              cg,
                                              distribution,
                                              dcop,
                                              INFINITY,
                                              replication=args.replication)

    try:
        orchestrator.deploy_computations()
        start_t = time.time()
        orchestrator.start_replication(args.ktarget)
        orchestrator.wait_ready()
        # print(f" Replication Metrics {orchestrator.replication_metrics()}")
        metrics = orchestrator.replication_metrics()
        msg_count, msg_size = 0, 0
        for a in metrics:
            msg_count += metrics[a]["count_ext_msg"]
            msg_size += metrics[a]["size_ext_msg"]
        # print(f" Count: {msg_count} - Size {msg_size}")
        duration = time.time() - start_t
        if timer:
            timer.cancel()
        rep_dist = {
            c: list(hosts)
            for c, hosts in orchestrator.mgt.replica_hosts.items()
        }
        orchestrator.stop_agents(5)
        orchestrator.stop()
        result = {
            "inputs": {
                "dcop": args.dcop_files,
                "algo": args.algo,
                "replication": args.replication,
                "k": args.ktarget,
            },
            "metrics": {
                "duration": duration,
                "msg_size": msg_size,
                "msg_count": msg_count,
            },
            "replica_dist": rep_dist,
        }
        result["inputs"]["distribution"] = args.distribution
        if args.output is not None:
            with open(args.output, encoding="utf-8", mode="w") as fo:
                fo.write(yaml.dump(result))
        else:
            print(yaml.dump(result))
        sys.exit(0)

        # TODO : retrieve and display replica distribution
        # Each agent should send back to the orchestrator the agents hosting
        # the replicas for each of it's computations
    except Exception as e:
        orchestrator.stop_agents(5)
        orchestrator.stop()
        _error("ERROR", e)