Example #1
0
    def test_host_with_several(self):
        dh = DistributionHints(host_with={'c1': ['v1', 'v2']})

        self.assertIn('c1', dh.host_with('v1'))
        self.assertIn('v1', dh.host_with('c1'))
        self.assertIn('v2', dh.host_with('c1'))
        self.assertIn('v2', dh.host_with('c1'))
Example #2
0
def _distribute_try(computation_graph: ComputationGraph,
                    agents: Iterable[AgentDef],
                    hints: DistributionHints = None,
                    computation_memory=None,
                    communication_load=None,
                    attempt=0):

    agents_capa = {a.name: a.capacity for a in agents}
    # The distribution methods depends on the order used to process the node,
    # we shuffle them to test a new configuration when retry a distribution
    # after a failure
    nodes = list(computation_graph.nodes)
    shuffle(nodes)
    mapping = defaultdict(set)
    var_hosted = {}

    # Distribute owned computation variable on the corresponding agent.
    # For dcop build from an secp, this is the same thing as deploying the
    # light variable on the light devices, as we were doing before.
    for a in agents_capa:
        for c in hints.must_host(a):
            mapping[a].add(c)
            var_hosted.update({c: a})
            agents_capa[a] -= computation_memory(
                computation_graph.computation(c))

    # First mimic original secp adhoc behavior
    for n in nodes:
        if n.name in var_hosted:
            continue
        hostwith = hints.host_with(n.name)
        # secp models have a constraint that should be hosted on the same
        # agent than the variable of the model
        if len(hostwith) == 1 and n.type == 'FactorComputation' and \
            computation_graph.computation(hostwith[0]).type \
                == 'VariableComputation':

            dependent_var = [v.name for v in n.factor.dimensions]
            candidates = [
                a for a in agents_capa
                if len(set(mapping[a]).intersection(dependent_var)) > 0
            ]

            candidates.sort(key=lambda x: len(mapping[a]))
            if candidates:
                selected = candidates[0]
            else:
                selected = choice(list(agents_capa.keys()))

            mapping[selected].update({n.name, hostwith[0]})
            var_hosted[n.name] = selected
            var_hosted[hostwith[0]] = selected
            agents_capa[selected] -= computation_memory(n)

    for n in nodes:
        if n.name in var_hosted:
            continue
        footprint = computation_memory(n)
        # Candidates : hints only with enough capacity
        candidates = [(agents_capa[a], a) for a in hints.host_with(n.name)
                      if agents_capa[a] > footprint]
        # If no hinted agents has enough capacity, fall back to all agents
        if not candidates:
            candidates = [(c, a) for a, c in agents_capa.items()
                          if c > footprint]

        # Select the candidate that is already hosting the highest
        # number of computations sharing a link with this one.
        scores = []
        for capacity, a in candidates:
            count = 0
            for l in computation_graph.links_for_node(n.name):
                count += len([None for l_n in l.nodes if l_n in mapping[a]])
            # The tuple is in this order so that we sort by score first,
            # and then by available capacity.
            scores.append((count, capacity, a))
        scores.sort(reverse=True)

        if scores:
            selected = scores[0][2]
            agents_capa[selected] -= footprint
        else:
            # Retry 3 times in case of failure, the nodes will be shuffled
            # every time, increasing the probability to find a feasible
            # distribution.
            if attempt > 2:
                raise ImpossibleDistributionException(
                    'Could not find feasible distribution after {} '
                    'attempts'.format(attempt))
            else:
                _distribute_try(computation_graph, agents, hints,
                                computation_memory, computation_graph,
                                attempt + 1)

        mapping[selected].update({n.name})
        var_hosted[n.name] = selected

    return Distribution({a: list(mapping[a]) for a in mapping})