예제 #1
0
def agt_route_costs(
    var_comp: ComputationNode, cg: ComputationGraph
) -> Dict[str, float]:
    """
    Generate route cost between the agent hosting var_comp and all other agents.

    Parameters
    ----------
    var_comp: ComputationNode
        the Variable computation hosted by the agent
    cg: computation graph

    Returns
    -------
    a dict containing the route cost to all other agents
    """
    routes = {}
    degree_v = len(var_comp.neighbors)
    for neighbor in var_comp.neighbors:
        # var_com is a variable computation, each neighbor will be a factor
        # which has two neighbor, var_comp and another variable
        neigh_var = next(
            c for c in cg.computation(neighbor).neighbors if c != var_comp.name
        )
        degree_n = len(cg.computation(neigh_var).neighbors)
        route = (1 + abs(degree_n - degree_v)) / (degree_n + degree_v)
        routes[agt_name(neigh_var)] = route
    return routes
예제 #2
0
def distribute(
        computation_graph: ComputationGraph,
        agentsdef: Iterable[AgentDef],
        hints: DistributionHints = None,
        computation_memory: Callable[[ComputationNode], float] = None,
        communication_load: Callable[[ComputationNode, str], float] = None,
        timeout=None,  # not used
) -> Distribution:
    if computation_memory is None:
        raise ImpossibleDistributionException("adhoc distribution requires "
                                              "computation_memory functions")

    mapping = defaultdict(lambda: [])
    agents_capa = {a.name: a.capacity for a in agentsdef}
    computations = computation_graph.node_names()
    # as we're dealing with a secp modelled as a constraint graph,
    # we only have actuator and pysical model variables.

    # First, put each actuator variable on its agent
    for agent in agentsdef:
        for comp in computation_graph.node_names():
            if agent.hosting_cost(comp) == 0:
                mapping[agent.name].append(comp)
                computations.remove(comp)
                agents_capa[agent.name] -= computation_memory(
                    computation_graph.computation(comp))
                if agents_capa[agent.name] < 0:
                    raise ImpossibleDistributionException(
                        f"Not enough capacity on {agent} to hosts actuator {comp}: {agents_capa[agent.name]}"
                    )
                break
    logger.info(f"Actuator variables on agents: {dict(mapping)}")

    # We must now place physical model variable on an agent that host
    # a variable it depends on.
    # As physical models always depends on actuator variable,
    # there must always be a computation it depends on that is already hosted.

    for comp in computations:
        footprint = computation_memory(computation_graph.computation(comp))
        neighbors = computation_graph.neighbors(comp)

        candidates = find_candidates(agents_capa, comp, footprint, mapping,
                                     neighbors)

        # Host the computation on the first agent and decrease its remaining capacity
        selected = candidates[0][2]
        mapping[selected].append(comp)
        agents_capa[selected] -= footprint

    return Distribution({a: list(mapping[a]) for a in mapping})
    def setUp(self):

        c1 = ComputationNode('c1', 'dummy_type', neighbors=['c2'])
        c2 = ComputationNode('c2', 'dummy_type', neighbors=['c1'])

        self.cg = ComputationGraph(graph_type='test', nodes=[c1, c2])
        self.agents = [AgentDef('a1'), AgentDef('a2')]
예제 #4
0
def distribute(computation_graph: ComputationGraph,
               agentsdef: Iterable[AgentDef],
               hints: DistributionHints = None,
               computation_memory=None,
               communication_load=None):
    """
    Generate a distribution for the dcop.

    :param computation_graph: a ComputationGraph
    :param agentsdef: the agents definitions
    :param hints: a DistributionHints
    :param computation_memory: a function that takes a computation node as an 
      argument and return the memory footprint for this
    :param link_communication: a function that takes a Link as an argument 
      and return the communication cost of this edge
    """
    if computation_memory is None or communication_load is None:
        raise ImpossibleDistributionException(
            'LinearProg distribution requires '
            'computation_memory and link_communication functions')

    agents = list(agentsdef)

    # In order to remove (latter on) distribution hints, we interpret
    # hosting costs of 0 as a "must host" relationship
    must_host = defaultdict(lambda: [])
    for agent in agentsdef:
        for comp in computation_graph.node_names():
            if agent.hosting_cost(comp) == 0:
                must_host[agent.name].append(comp)
    logger.debug(f"Must host: {must_host}")

    return factor_graph_lp_model(computation_graph, agents, must_host,
                                 computation_memory, communication_load)
예제 #5
0
def _objective_function(cg: ComputationGraph, communication_load,
                        alphas, agents_names):
    # The objective function is the negated sum of the communication cost on
    # the links in the factor graph.
    return lpSum([-communication_load(cg.computation(link.variable_node),
                                      link.factor_node ) *
                  alphas[((link.variable_node, link.factor_node), k)]
                  for link in cg.links for k in agents_names])
    def setUp(self):

        # A grid-shaped  (3x2) computation graph with 6 computations
        self.l1 = Link(['c1', 'c2'])
        self.l2 = Link(['c2', 'c3'])
        self.l3 = Link(['c1', 'c4'])
        self.l4 = Link(['c2', 'c5'])
        self.l5 = Link(['c3', 'c6'])
        self.l6 = Link(['c4', 'c5'])
        self.l7 = Link(['c5', 'c6'])
        self.links = [
            self.l1, self.l2, self.l3, self.l4, self.l5, self.l6, self.l7
        ]

        nodes = {}
        for i in range(1, 7):
            name = 'c' + str(i)
            nodes[name] = ComputationNode(
                name,
                'test',
                links=[l for l in self.links if l.has_node(name)])

        self.cg = ComputationGraph('test', nodes=nodes.values())
        # setattr(self.cg, 'links', [self.l1, self.l2, self.l3, self.l4,
        #                            self.l5, self.l6, self.l7])
        #
        # 6 agents hosting these computations
        d = Discovery('a1', 'addr1')
        d.register_computation('c1', 'a1', 'addr1', publish=False)
        d.register_computation('c2', 'a2', 'addr2', publish=False)
        d.register_computation('c3', 'a3', 'addr3', publish=False)
        d.register_computation('c4', 'a4', 'addr4', publish=False)
        d.register_computation('c5', 'a5', 'addr5', publish=False)
        d.register_computation('c6', 'a8', 'addr8', publish=False)
        # and the corresponding replica, 2 for each computation
        d.register_replica('c1', 'a2')
        d.register_replica('c1', 'a5')
        d.register_replica('c2', 'a3')
        d.register_replica('c2', 'a6')
        d.register_replica('c3', 'a1')
        d.register_replica('c3', 'a4')
        d.register_replica('c4', 'a2')
        d.register_replica('c4', 'a5')
        d.register_replica('c5', 'a3')
        d.register_replica('c5', 'a6')
        d.register_replica('c6', 'a1')
        d.register_replica('c6', 'a4')
        self.discovery = d
예제 #7
0
def distribution_cost(
        distribution: Distribution, computation_graph: ComputationGraph,
        agentsdef: Iterable[AgentDef],
        computation_memory: Callable[[ComputationNode], float],
        communication_load: Callable[[ComputationNode, str], float]) -> float:
    """
    Compute the cost for a distribution.

    In this model, the cost only includes the communication costs based on message size.

    Parameters
    ----------
    distribution
    computation_graph
    agentsdef
    computation_memory
    communication_load

    Returns
    -------

    """
    # No hosting and route cost here, as this distribution only takes message size
    # into account:
    # route = route_fonc(agentsdef)
    # hosting_cost = hosting_cost_func(agentsdef)

    comm = 0
    agt_names = [a.name for a in agentsdef]
    for l in computation_graph.links:
        # As we support hypergraph, we may have more than 2 ends to a link
        for c1, c2 in combinations(l.nodes, 2):
            if distribution.agent_for(c1) != distribution.agent_for(c2):
                edge_cost = communication_load(
                    computation_graph.computation(c1), c2)
                logger.debug(f"edge cost between {c1} and {c2} :  {edge_cost}")
                comm += edge_cost
            else:
                logger.debug(
                    f"On same agent, no edge cost between {c1} and {c2}")

    # This distribution model only takes communication cost into account.
    # cost = RATIO_HOST_COMM * comm + (1-RATIO_HOST_COMM) * hosting
    return comm, comm, 0
예제 #8
0
def _removal_candidate_computation_info(
        orphan: str, departed: List[str], cg: ComputationGraph,
        discovery: Discovery) \
        -> Tuple[List[str], Dict[str, str], Dict[str, List[str]]]:
    """
    All info needed by an agent to participate in negotiation about hosting
    the computation `comp`

    :param orphan: the candidate computation that must be hosted
    :param departed: the agent that left the system
    :param cg: the computation graph
    :param discovery: the distribution of computation on agents

    :return: a triple ( candidate_agents, fixed_neighbors, candidates_neighbors)
    where:

    * candidate agents is a list of agents that could host this computation
    * fixed_neighbors is a map comp->agent that indicates, for each
      neighbor computation of `comp` that is not a candidate (orphaned),
      its host agent
    * candidates_neighbors is a map comp -> List[agt] indicating which agent
      could host each of the neighbor computation that is also a candidate
      computation.

    """
    orphaned_computation = _removal_orphaned_computations(departed, discovery)

    candidate_agents = list(
        discovery.replica_agents(orphan).difference(departed))
    fixed_neighbors = {}
    candidates_neighbors = {}
    for n in cg.neighbors(orphan):
        if n == orphan:
            continue
        if n in orphaned_computation:
            candidates_neighbors[n] = \
                list(discovery.replica_agents(n).difference(departed))
        else:
            fixed_neighbors[n] = discovery.computation_agent(n)

    return candidate_agents, fixed_neighbors, candidates_neighbors
예제 #9
0
def _distribute_try(computation_graph: ComputationGraph,
                    agents: Iterable[AgentDef],
                    hints: DistributionHints = None,
                    computation_memory=None,
                    communication_load=None,
                    attempt=0):

    agents_capa = {a.name: a.capacity for a in agents}
    # The distribution methods depends on the order used to process the node,
    # we shuffle them to test a new configuration when retry a distribution
    # after a failure
    nodes = list(computation_graph.nodes)
    shuffle(nodes)
    mapping = defaultdict(set)
    var_hosted = {}

    # Distribute owned computation variable on the corresponding agent.
    # For dcop build from an secp, this is the same thing as deploying the
    # light variable on the light devices, as we were doing before.
    for a in agents_capa:
        for c in hints.must_host(a):
            mapping[a].add(c)
            var_hosted.update({c: a})
            agents_capa[a] -= computation_memory(
                computation_graph.computation(c))

    # First mimic original secp adhoc behavior
    for n in nodes:
        if n.name in var_hosted:
            continue
        hostwith = hints.host_with(n.name)
        # secp models have a constraint that should be hosted on the same
        # agent than the variable of the model
        if len(hostwith) == 1 and n.type == 'FactorComputation' and \
            computation_graph.computation(hostwith[0]).type \
                == 'VariableComputation':

            dependent_var = [v.name for v in n.factor.dimensions]
            candidates = [
                a for a in agents_capa
                if len(set(mapping[a]).intersection(dependent_var)) > 0
            ]

            candidates.sort(key=lambda x: len(mapping[a]))
            if candidates:
                selected = candidates[0]
            else:
                selected = choice(list(agents_capa.keys()))

            mapping[selected].update({n.name, hostwith[0]})
            var_hosted[n.name] = selected
            var_hosted[hostwith[0]] = selected
            agents_capa[selected] -= computation_memory(n)

    for n in nodes:
        if n.name in var_hosted:
            continue
        footprint = computation_memory(n)
        # Candidates : hints only with enough capacity
        candidates = [(agents_capa[a], a) for a in hints.host_with(n.name)
                      if agents_capa[a] > footprint]
        # If no hinted agents has enough capacity, fall back to all agents
        if not candidates:
            candidates = [(c, a) for a, c in agents_capa.items()
                          if c > footprint]

        # Select the candidate that is already hosting the highest
        # number of computations sharing a link with this one.
        scores = []
        for capacity, a in candidates:
            count = 0
            for l in computation_graph.links_for_node(n.name):
                count += len([None for l_n in l.nodes if l_n in mapping[a]])
            # The tuple is in this order so that we sort by score first,
            # and then by available capacity.
            scores.append((count, capacity, a))
        scores.sort(reverse=True)

        if scores:
            selected = scores[0][2]
            agents_capa[selected] -= footprint
        else:
            # Retry 3 times in case of failure, the nodes will be shuffled
            # every time, increasing the probability to find a feasible
            # distribution.
            if attempt > 2:
                raise ImpossibleDistributionException(
                    'Could not find feasible distribution after {} '
                    'attempts'.format(attempt))
            else:
                _distribute_try(computation_graph, agents, hints,
                                computation_memory, computation_graph,
                                attempt + 1)

        mapping[selected].update({n.name})
        var_hosted[n.name] = selected

    return Distribution({a: list(mapping[a]) for a in mapping})
예제 #10
0
파일: gh_cgdp.py 프로젝트: khoihd/pyDcop
def distribute(
    computation_graph: ComputationGraph,
    agentsdef: Iterable[AgentDef],
    hints=None,
    computation_memory: Callable[[ComputationNode], float] = None,
    communication_load: Callable[[ComputationNode, str], float] = None,
) -> Distribution:
    """
    gh-cgdp distribution method.

    Heuristic distribution baed on communication and hosting costs, while respecting
    agent's capacities

    Parameters
    ----------
    computation_graph
    agentsdef
    hints
    computation_memory
    communication_load

    Returns
    -------
    Distribution:
        The distribution for the computation graph.

    """

    # Place computations with hosting costs == 0
    # For SECP, this assign actuators var and factor to the right device.
    fixed_mapping = {}
    for comp in computation_graph.node_names():
        for agent in agentsdef:
            if agent.hosting_cost(comp) == 0:
                fixed_mapping[comp] = (
                    agent.name,
                    computation_memory(computation_graph.computation(comp)),
                )
                break

    # Sort computation by footprint, but add a random element to avoid sorting on names
    computations = [(computation_memory(n), n, None, random.random())
                    for n in computation_graph.nodes
                    if n.name not in fixed_mapping]
    computations = sorted(computations,
                          key=lambda o: (o[0], o[3]),
                          reverse=True)
    computations = [t[:-1] for t in computations]
    logger.info("placing computations %s",
                [(f, c.name) for f, c, _ in computations])

    current_mapping = {}  # Type: Dict[str, str]
    i = 0
    while len(current_mapping) != len(computations):
        footprint, computation, candidates = computations[i]
        logger.debug(
            "Trying to place computation %s with footprint %s",
            computation.name,
            footprint,
        )
        # look for cancidiate agents for computation c
        # TODO: keep a list of remaining capacities for agents ?
        if candidates is None:
            candidates = candidate_hosts(
                computation,
                footprint,
                computations,
                agentsdef,
                communication_load,
                current_mapping,
                fixed_mapping,
            )
            computations[i] = footprint, computation, candidates
        logger.debug("Candidates for computation %s : %s", computation.name,
                     candidates)

        if not candidates:
            if i == 0:
                logger.error(
                    f"Cannot find a distribution, no candidate for computation {computation}\n"
                    f" current mapping: {current_mapping}")
                raise ImpossibleDistributionException(
                    f"Impossible Distribution, no candidate for {computation}")

            # no candidate : backtrack !
            i -= 1
            logger.info(
                "No candidate for %s, backtrack placement "
                "of computation %s (was on %s",
                computation.name,
                computations[i][1].name,
                current_mapping[computations[i][1].name],
            )
            current_mapping.pop(computations[i][1].name)

            # FIXME : eliminate selected agent for previous computation
        else:
            _, selected = candidates.pop()
            current_mapping[computation.name] = selected.name
            computations[i] = footprint, computation, candidates
            logger.debug("Place computation %s on agent %s", computation.name,
                         selected.name)
            i += 1

    # Build the distribution for the mapping
    agt_mapping = defaultdict(lambda: [])
    for c, a in current_mapping.items():
        agt_mapping[a].append(c)
    for c, (a, _) in fixed_mapping.items():
        agt_mapping[a].append(c)
    dist = Distribution(agt_mapping)

    return dist
예제 #11
0
def ilp_cgdp(
    cg: ComputationGraph,
    agentsdef: Iterable[AgentDef],
    footprint: Callable[[str], float],
    capacity: Callable[[str], float],
    route: Callable[[str, str], float],
    msg_load: Callable[[str, str], float],
    hosting_cost: Callable[[str, str], float],
):
    agt_names = [a.name for a in agentsdef]
    pb = LpProblem("oilp_cgdp", LpMinimize)

    # One binary variable xij for each (variable, agent) couple
    xs = LpVariable.dict("x", (cg.node_names(), agt_names), cat=LpBinary)

    # TODO: Do not create var for computation that are already assigned to an agent with hosting = 0 ?
    # Force computation with hosting cost of 0 to be hosted on that agent.
    # This makes the work much easier for glpk !
    x_fixed_to_0 = []
    x_fixed_to_1 = []
    for agent in agentsdef:
        for comp in cg.node_names():
            assigned_agent = None
            if agent.hosting_cost(comp) == 0:
                pb += xs[(comp, agent.name)] == 1
                x_fixed_to_1.append((comp, agent.name))
                assigned_agent = agent.name
                for other_agent in agentsdef:
                    if other_agent.name == assigned_agent:
                        continue
                    pb += xs[(comp, other_agent.name)] == 0
                    x_fixed_to_0.append((comp, other_agent.name))
                logger.debug(
                    f"Setting binary varaibles to fixed computation {comp}")

    # One binary variable for computations c1 and c2, and agent a1 and a2
    betas = {}
    count = 0
    for a1, a2 in combinations(agt_names, 2):
        # Only create variables for couple c1, c2 if there is an edge in the
        # graph between these two computations.
        for l in cg.links:
            # As we support hypergraph, we may have more than 2 ends to a link
            for c1, c2 in combinations(l.nodes, 2):
                if (c1, a1, c2, a2) in betas:
                    continue
                count += 2
                b = LpVariable("b_{}_{}_{}_{}".format(c1, a1, c2, a2),
                               cat=LpBinary)
                betas[(c1, a1, c2, a2)] = b
                # Linearization constraints :
                # a_ijmn <= x_im
                # a_ijmn <= x_jn
                if (c1, a1) in x_fixed_to_0 or (c2, a2) in x_fixed_to_0:
                    pb += b == 0
                elif (c1, a1) in x_fixed_to_1:
                    pb += b == xs[(c2, a2)]
                elif (c2, a2) in x_fixed_to_1:
                    pb += b == xs[(c1, a1)]
                else:
                    pb += b <= xs[(c1, a1)]
                    pb += b <= xs[(c2, a2)]
                    pb += b >= xs[(c2, a2)] + xs[(c1, a1)] - 1

                b = LpVariable("b_{}_{}_{}_{}".format(c1, a2, c2, a1),
                               cat=LpBinary)
                if (c1, a2) in x_fixed_to_0 or (c2, a1) in x_fixed_to_0:
                    pb += b == 0
                elif (c1, a2) in x_fixed_to_1:
                    pb += b == xs[(c2, a1)]
                elif (c2, a1) in x_fixed_to_1:
                    pb += b == xs[(c1, a2)]
                else:
                    betas[(c1, a2, c2, a1)] = b
                    pb += b <= xs[(c2, a1)]
                    pb += b <= xs[(c1, a2)]
                    pb += b >= xs[(c1, a2)] + xs[(c2, a1)] - 1

    # Set objective: communication + hosting_cost
    pb += (
        _objective(xs, betas, route, msg_load, hosting_cost),
        "Communication costs and prefs",
    )

    # Adding constraints:
    # Constraints: Memory capacity for all agents.
    for a in agt_names:
        pb += (
            lpSum([footprint(i) * xs[i, a]
                   for i in cg.node_names()]) <= capacity(a),
            "Agent {} capacity".format(a),
        )

    # Constraints: all computations must be hosted.
    for c in cg.node_names():
        pb += (
            lpSum([xs[c, a] for a in agt_names]) == 1,
            "Computation {} hosted".format(c),
        )

    # solve using GLPK
    status = pb.solve(
        solver=GLPK_CMD(keepFiles=1, msg=False, options=["--pcost"]))

    if status != LpStatusOptimal:
        raise ImpossibleDistributionException("No possible optimal"
                                              " distribution ")
    logger.debug("GLPK cost : %s", pulp.value(pb.objective))

    mapping = {}
    for k in agt_names:
        agt_computations = [
            i for i, ka in xs if ka == k and pulp.value(xs[(i, ka)]) == 1
        ]
        # print(k, ' -> ', agt_computations)
        mapping[k] = agt_computations
    return mapping
예제 #12
0
def distribute_factors(
    agents: Dict[str, AgentDef],
    cg: ComputationGraph,
    footprints: Dict[str, float],
    mapping: Dict[str, List[str]],
    msg_load: Callable[[str, str], float],
) -> Dict[str, List[str]]:
    """
    Optimal distribution of factors on agents.

    Parameters
    ----------
    cg: computations graph

    agents: dict
        a dict {agent_name : AgentDef} containing all available agents

    Returns
    -------
    a dict { agent_name: list of factor names}
    """
    pb = LpProblem("ilp_factors", LpMinimize)

    # build the inverse mapping var -> agt
    inverse_mapping = {}  # type: Dict[str, str]
    for a in mapping:
        inverse_mapping[mapping[a][0]] = a

    # One binary variable xij for each (variable, agent) couple
    factor_names = [n.name for n in cg.nodes if isinstance(n, FactorComputationNode)]
    xs = LpVariable.dict("x", (factor_names, agents), cat=LpBinary)
    logger.debug("Binary variables for factor distribution : %s", xs)

    # Hard constraints: respect agent's capacity
    for a in agents:
        # Footprint of the variable this agent is already hosting:
        v_footprint = footprints[mapping[a][0]]
        pb += (
            lpSum([footprints[fn] * xs[fn, a] for fn in factor_names])
            <= (agents[a].capacity - v_footprint),
            "Agent {} capacity".format(a),
        )

    # Hard constraints: all computations must be hosted.
    for c in factor_names:
        pb += lpSum([xs[c, a] for a in agents]) == 1, "Factor {} hosted".format(c)

    # 1st objective : minimize communication costs:
    comm = LpAffineExpression()
    for (fn, an_f) in xs:
        for vn in cg.neighbors(fn):
            an_v = inverse_mapping[vn]  # agt hosting neighbor var vn
            comm += agents[an_f].route(an_v) * msg_load(vn, fn) * xs[(fn, an_f)]

    # 2st objective : minimize hosting costs
    hosting = lpSum([agents[a].hosting_cost(c) * xs[(c, a)] for c, a in xs])

    # agregate the two objectives using RATIO_HOST_COMM
    pb += lpSum([RATIO_HOST_COMM * comm, (1 - RATIO_HOST_COMM) * hosting])

    # solve using GLPK and convert to mapping { agt_name : [factors names]}
    status = pb.solve(solver=GLPK_CMD(keepFiles=1, msg=False, options=["--pcost"]))
    if status != LpStatusOptimal:
        raise ImpossibleDistributionException(
            "No possible optimal distribution for factors"
        )
    logger.debug("GLPK cost : %s", value(pb.objective))
    mapping = {}  # type: Dict[str, List[str]]
    for k in agents:
        agt_computations = [i for i, ka in xs if ka == k and value(xs[(i, ka)]) == 1]
        # print(k, ' -> ', agt_computations)
        mapping[k] = agt_computations
    logger.debug("Factors distribution : %s ", mapping)
    return mapping
예제 #13
0
def _computation_memory_in_cg(computation_name: str, cg: ComputationGraph,
                              computation_memory):
    computation = cg.computation(computation_name)
    l = computation_memory(computation)
    return l