Beispiel #1
0
def test_simulation_manager_cache(graph, platform_odroid,
                                  representation_odroid, mapper):
    proc_names = [proc.name for proc in graph.processes()]
    core_types = [core.type for core in platform_odroid.processors()]
    trace = MockTrace(proc_names, core_types, lambda _: 5, max_length=10)
    mapping = mapper.generate_mapping([0, 4])
    simulation_manager = SimulationManager(representation_odroid,
                                           trace,
                                           jobs=None,
                                           parallel=True)
    assert not simulation_manager._cache
    lookup_result = simulation_manager.lookup(tuple([0, 4]))
    assert not lookup_result
    simulation_result = simulation_manager.simulate([mapping])
    assert len(simulation_manager._cache) == 1
    lookup_result = simulation_manager.lookup(tuple([0, 4]))
    assert isinstance(lookup_result, SimulationResult)
    assert simulation_result[0] == lookup_result
Beispiel #2
0
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        num_iterations=100,
        progress=False,
        radius=3.0,
        random_seed=42,
        record_statistics=False,
        parallel=False,
        dump_cache=False,
        chunk_size=10,
        jobs=1,
    ):
        """Generates a random mapping for a given platform and dataflow application.
        Args:
        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param num_iterations: Number of iterations (mappings) in random walk
        :type num_iterations: int
        :param rodius: Currently unused.
        :type radius: float
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        self.full_mapper = True
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomMapper(self.graph,
                                          self.platform,
                                          trace,
                                          representation,
                                          random_seed=None)
        self.num_iterations = num_iterations
        self.dump_cache = dump_cache
        self.seed = random_seed
        self.progress = progress
        if self.seed == "None":
            self.seed = None
        if self.seed is not None:
            random.seed(self.seed)
            np.random.seed(self.seed)

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )
Beispiel #3
0
class RandomWalkMapper(object):
    """Generates a full mapping via a random walk

    This class is used to generate a mapping for a given
    platform and dataflow application, via a random walk through
    the mapping space.
    It produces multiple random mappings and simulates each mapping in
    order to find the 'best' mapping. As outlined below, the script expects
    multiple configuration parameters to be available.
    **Hydra Parameters**:
        * **jobs:** the number of parallel jobs
        * **num_operations:** the total number of mappings to be generated
    """
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        num_iterations=100,
        progress=False,
        radius=3.0,
        random_seed=42,
        record_statistics=False,
        parallel=False,
        dump_cache=False,
        chunk_size=10,
        jobs=1,
    ):
        """Generates a random mapping for a given platform and dataflow application.
        Args:
        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param num_iterations: Number of iterations (mappings) in random walk
        :type num_iterations: int
        :param rodius: Currently unused.
        :type radius: float
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        self.full_mapper = True
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomMapper(self.graph,
                                          self.platform,
                                          trace,
                                          representation,
                                          random_seed=None)
        self.num_iterations = num_iterations
        self.dump_cache = dump_cache
        self.seed = random_seed
        self.progress = progress
        if self.seed == "None":
            self.seed = None
        if self.seed is not None:
            random.seed(self.seed)
            np.random.seed(self.seed)

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

    def generate_mapping(self):
        """Generates a mapping via a random walk"""
        start = timeit.default_timer()
        # Create a list of 'simulations'. These are later executed by multiple
        # worker processes.
        mappings = []

        if self.progress:
            iterations_range = tqdm.tqdm(range(self.num_iterations))
        else:
            iterations_range = range(self.num_iterations)
        for i in iterations_range:
            mapping = self.random_mapper.generate_mapping()
            mappings.append(mapping)
        if (hasattr(self.representation, "canonical_operations")
                and not self.representation.canonical_operations):
            tup = list(
                map(self.representation.toRepresentationNoncanonical,
                    mappings))
        else:
            tup = list(map(self.representation.toRepresentation, mappings))
        sim_results = self.simulation_manager.simulate(tup)
        exec_times = [x.exec_time for x in sim_results]
        best_result_idx = exec_times.index(min(exec_times))
        best_result = mappings[best_result_idx]
        stop = timeit.default_timer()
        log.info("Tried %d random mappings in %0.1fs" %
                 (len(exec_times), stop - start))
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")

        return best_result
Beispiel #4
0
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        random_seed=42,
        record_statistics=False,
        initial_temperature=1.0,
        final_temperature=0.1,
        temperature_proportionality_constant=0.5,
        radius=3.0,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=1,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param initial_temperature: Initial temperature for simmulated annealing
        :type initial_temperature: float
        :param final_temperature: Final temperature for simmulated annealing
        :type final_temperature: float
        :param temperature_proportionality_constant: Temperature prop. constant for simmulated annealing
        :type temperature_proportionality_constant: float
        :param radius: Radius for search when moving
        :type radius: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomPartialMapper(self.graph,
                                                 self.platform,
                                                 seed=None)
        self.statistics = Statistics(log, len(self.graph.processes()),
                                     record_statistics)
        self.initial_temperature = initial_temperature
        self.final_temperature = final_temperature
        self.max_rejections = len(self.graph.processes()) * (
            len(self.platform.processors()) - 1)  # R_max = L
        self.p = temperature_proportionality_constant
        self.radius = radius
        self.progress = progress
        self.dump_cache = dump_cache

        if not (1 > self.p > 0):
            log.error(
                f"Temperature proportionality constant {self.p} not suitable, "
                f"it should be close to, but smaller than 1 (algorithm probably won't terminate)."
            )

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )
Beispiel #5
0
class SimulatedAnnealingMapper(object):
    """Generates a full mapping by using a simulated annealing algorithm from:
    Orsila, H., Kangas, T., Salminen, E., Hämäläinen, T. D., & Hännikäinen, M. (2007).
    Automated memory-aware application distribution for multi-processor system-on-chips.
    Journal of Systems Architecture, 53(11), 795-815.e.
    """
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        random_seed=42,
        record_statistics=False,
        initial_temperature=1.0,
        final_temperature=0.1,
        temperature_proportionality_constant=0.5,
        radius=3.0,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=1,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param initial_temperature: Initial temperature for simmulated annealing
        :type initial_temperature: float
        :param final_temperature: Final temperature for simmulated annealing
        :type final_temperature: float
        :param temperature_proportionality_constant: Temperature prop. constant for simmulated annealing
        :type temperature_proportionality_constant: float
        :param radius: Radius for search when moving
        :type radius: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomPartialMapper(self.graph,
                                                 self.platform,
                                                 seed=None)
        self.statistics = Statistics(log, len(self.graph.processes()),
                                     record_statistics)
        self.initial_temperature = initial_temperature
        self.final_temperature = final_temperature
        self.max_rejections = len(self.graph.processes()) * (
            len(self.platform.processors()) - 1)  # R_max = L
        self.p = temperature_proportionality_constant
        self.radius = radius
        self.progress = progress
        self.dump_cache = dump_cache

        if not (1 > self.p > 0):
            log.error(
                f"Temperature proportionality constant {self.p} not suitable, "
                f"it should be close to, but smaller than 1 (algorithm probably won't terminate)."
            )

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

    def temperature_cooling(self, temperature, iter):
        return self.initial_temperature * self.p**np.floor(
            iter / self.max_rejections)

    def query_accept(self, time, temperature):
        with np.errstate(over="raise"):
            try:
                normalized_probability = 1 / (np.exp(
                    time / (0.5 * temperature * self.initial_cost)))
            except FloatingPointError:
                normalized_probability = 0

        return normalized_probability

    def move(self, mapping, temperature):
        radius = self.radius
        while 1:
            new_mappings = self.representation._uniformFromBall(
                mapping, radius, 20)
            for m in new_mappings:
                if list(m) != list(mapping):
                    return m
            radius *= 1.1
            if radius > 10000 * self.radius:
                log.error("Could not mutate mapping")
                raise RuntimeError("Could not mutate mapping")

    def generate_mapping(self):
        """Generates a full mapping using simulated anealing"""
        mapping_obj = self.random_mapper.generate_mapping()
        if (hasattr(self.representation, "canonical_operations")
                and not self.representation.canonical_operations):
            mapping = self.representation.toRepresentationNoncanonical(
                mapping_obj)
        else:
            mapping = self.representation.toRepresentation(mapping_obj)

        last_mapping = mapping
        last_simres = self.simulation_manager.simulate([mapping])[0]
        last_exec_time = last_simres.exec_time
        self.initial_cost = last_exec_time
        best_mapping = mapping
        best_exec_time = last_exec_time
        rejections = 0

        iter = 0
        temperature = self.initial_temperature
        if self.progress:
            pbar = tqdm.tqdm(total=self.max_rejections * 20)

        while rejections < self.max_rejections:
            temperature = self.temperature_cooling(temperature, iter)
            log.info(f"Current temperature {temperature}")
            mapping = self.move(last_mapping, temperature)
            cur_simres = self.simulation_manager.simulate([mapping])[0]
            cur_exec_time = cur_simres.exec_time
            faster = cur_exec_time < last_exec_time
            if not faster and cur_exec_time != last_exec_time:
                prob = self.query_accept(cur_exec_time - last_exec_time,
                                         temperature)
                rand = random.random()
                accept_randomly = prob > rand
            else:
                accept_randomly = False  # don't accept if no movement.
            if faster or accept_randomly:
                # accept
                if cur_exec_time < best_exec_time:
                    best_exec_time = cur_exec_time
                    best_mapping = mapping
                last_mapping = mapping
                last_exec_time = cur_exec_time
                log.info(f"Rejected ({rejections})")
                rejections = 0
            else:
                # reject
                if temperature <= self.final_temperature:
                    rejections += 1
            iter += 1
            if self.progress:
                pbar.update(1)
        if self.progress:
            pbar.update(self.max_rejections * 20 - iter)
            pbar.close()

        self.simulation_manager.statistics.log_statistics()
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")

        return self.representation.fromRepresentation(best_mapping)
Beispiel #6
0
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        random_seed=42,
        record_statistics=False,
        max_iterations=10,
        iteration_size=5,
        tabu_tenure=5,
        move_set_size=10,
        radius=2.0,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=1,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param max_iterations: Maximal number of iterations of tabu search
        :type max_iterations: int
        :param iteration_size: Size (# mappings) of a single iteration
        :type iteration_size: int
        :param tabu_tenure: How long until a tabu move is allowed again?
        :type tabu_tenure: int
        :param move_set_size: Size of the move set considered in an iteration
        :type move_set_size: int
        :param radius: Radius for updating candidate moves
        :type radius: float
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomPartialMapper(
            self.graph, self.platform, seed=None
        )
        self.max_iterations = max_iterations
        self.iteration_size = iteration_size
        self.tabu_tenure = tabu_tenure
        self.move_set_size = move_set_size
        self.dump_cache = dump_cache
        self.radius = radius
        self.progress = progress
        self.tabu_moves = dict()

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )
Beispiel #7
0
class TabuSearchMapper(object):
    """Generates a full mapping by using a tabu search on the mapping space."""

    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        random_seed=42,
        record_statistics=False,
        max_iterations=10,
        iteration_size=5,
        tabu_tenure=5,
        move_set_size=10,
        radius=2.0,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=1,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param max_iterations: Maximal number of iterations of tabu search
        :type max_iterations: int
        :param iteration_size: Size (# mappings) of a single iteration
        :type iteration_size: int
        :param tabu_tenure: How long until a tabu move is allowed again?
        :type tabu_tenure: int
        :param move_set_size: Size of the move set considered in an iteration
        :type move_set_size: int
        :param radius: Radius for updating candidate moves
        :type radius: float
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.random_mapper = RandomPartialMapper(
            self.graph, self.platform, seed=None
        )
        self.max_iterations = max_iterations
        self.iteration_size = iteration_size
        self.tabu_tenure = tabu_tenure
        self.move_set_size = move_set_size
        self.dump_cache = dump_cache
        self.radius = radius
        self.progress = progress
        self.tabu_moves = dict()

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation

        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

    def update_candidate_moves(self, mapping):
        new_mappings = self.representation._uniformFromBall(
            mapping, self.radius, self.move_set_size
        )
        new_mappings = list(map(np.array, new_mappings))
        sim_results = self.simulation_manager.simulate(new_mappings)
        sim_exec_times = [x.exec_time for x in sim_results]
        moves = set(
            zip(
                [
                    tuple(new_mapping - np.array(mapping))
                    for new_mapping in new_mappings
                ],
                sim_exec_times,
            )
        )
        missing = self.move_set_size - len(moves)
        retries = 0
        while missing > 0 and retries < 10:
            new_mappings = self.representation._uniformFromBall(
                mapping, self.radius, missing
            )
            sim_results = self.simulation_manager.simulate(new_mappings)
            sim_exec_times = [x.exec_time for x in sim_results]
            new_moves = set(
                zip(
                    [
                        tuple(new_mapping - np.array(mapping))
                        for new_mapping in new_mappings
                    ],
                    sim_exec_times,
                )
            )
            moves = moves.union(new_moves)
            missing = self.move_set_size - len(moves)
            retries += 1
        if missing > 0:
            log.warning(
                f"Running with smaller move list  (by {missing} moves). The radius might be set too small?"
            )
        self.moves = moves

    def move(self, best):
        delete = []
        for move in self.tabu_moves:
            self.tabu_moves[move] -= 1
            if self.tabu_moves[move] <= 0:
                delete.append(move)

        tabu = set(self.tabu_moves.keys())
        for move in delete:
            del self.tabu_moves[move]

        moves_sorted = sorted(list(self.moves), key=lambda x: x[1])
        if moves_sorted[0][1] < best:
            self.tabu_moves[moves_sorted[0][0]] = self.tabu_tenure
            return moves_sorted[0]
        else:
            no_move = np.zeros(len(moves_sorted[0][0]))
            non_tabu = [
                m for m in moves_sorted if m[0] not in tabu.union(no_move)
            ]
            # no need to re-sort:
            # https://stackoverflow.com/questions/1286167/is-the-order-of-results-coming-from-a-list-comprehension-guaranteed
            if len(non_tabu) > 0:
                self.tabu_moves[non_tabu[0][0]] = self.tabu_tenure
                return non_tabu[0]
            else:
                self.tabu_moves[moves_sorted[0][0]] = self.tabu_tenure
                return moves_sorted[0]

    def diversify(self, mapping):
        new_mappings = self.representation._uniformFromBall(
            mapping, 3 * self.radius, self.move_set_size
        )
        new_mappings = list(map(np.array, new_mappings))
        sim_results = self.simulation_manager.simulate(new_mappings)
        sim_exec_times = [x.exec_time for x in sim_results]
        moves = set(
            zip(
                [
                    tuple(new_mapping - np.array(mapping))
                    for new_mapping in new_mappings
                ],
                sim_exec_times,
            )
        )
        return sorted(moves, key=lambda x: x[1])[0]

    def generate_mapping(self):
        """Generates a full mapping using gradient descent"""
        mapping_obj = self.random_mapper.generate_mapping()
        if (
            hasattr(self.representation, "canonical_operations")
            and not self.representation.canonical_operations
        ):
            cur_mapping = self.representation.toRepresentationNoncanonical(
                mapping_obj
            )
        else:
            cur_mapping = self.representation.toRepresentation(mapping_obj)

        best_mapping = cur_mapping
        best_simres = self.simulation_manager.simulate([cur_mapping])[0]
        best_exec_time = best_simres.exec_time
        since_last_improvement = 0

        if self.progress:
            iterations_range = tqdm.tqdm(range(self.max_iterations))
        else:
            iterations_range = range(self.max_iterations)
        for iter in iterations_range:
            while since_last_improvement < self.iteration_size:
                self.update_candidate_moves(cur_mapping)
                move, cur_exec_time = self.move(
                    best_exec_time
                )  # updates tabu set
                cur_mapping = cur_mapping + np.array(move)
                since_last_improvement += 1
                if cur_exec_time < best_exec_time:
                    since_last_improvement = 0
                    best_exec_time = cur_exec_time
                    best_mapping = cur_mapping

            since_last_improvement = 0
            move, cur_exec_time = self.diversify(cur_mapping)
            cur_mapping = cur_mapping + np.array(move)

        self.simulation_manager.statistics.log_statistics()
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")

        return self.representation.fromRepresentation(np.array(best_mapping))
Beispiel #8
0
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        gd_iterations=100,
        stepsize=2,
        random_seed=42,
        record_statistics=False,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=2,
        momentum_decay=0.5,
        parallel_points=5,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param gd_iterations: Number of iterations for gradient descent
        :type gd_iterations: int
        :param stepsize: Factor to multiply to (Barzilai–Borwein) factor gradient in step
        :type stepsize: float
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.num_PEs = len(platform.processors())
        self.random_mapper = RandomPartialMapper(self.graph,
                                                 self.platform,
                                                 seed=None)
        self.gd_iterations = gd_iterations
        self.stepsize = stepsize
        self.momentum_decay = momentum_decay
        self.parallel_points = parallel_points
        self.dump_cache = dump_cache
        self.progress = progress
        self.statistics = Statistics(log, len(self.graph.processes()),
                                     record_statistics)

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation
        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )
Beispiel #9
0
class GradientDescentMapper(object):
    """Generates a full mapping by using a gradient descent on the mapping space."""
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        gd_iterations=100,
        stepsize=2,
        random_seed=42,
        record_statistics=False,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=False,
        jobs=2,
        momentum_decay=0.5,
        parallel_points=5,
    ):
        """Generates a full mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param gd_iterations: Number of iterations for gradient descent
        :type gd_iterations: int
        :param stepsize: Factor to multiply to (Barzilai–Borwein) factor gradient in step
        :type stepsize: float
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.num_PEs = len(platform.processors())
        self.random_mapper = RandomPartialMapper(self.graph,
                                                 self.platform,
                                                 seed=None)
        self.gd_iterations = gd_iterations
        self.stepsize = stepsize
        self.momentum_decay = momentum_decay
        self.parallel_points = parallel_points
        self.dump_cache = dump_cache
        self.progress = progress
        self.statistics = Statistics(log, len(self.graph.processes()),
                                     record_statistics)

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation
        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

    def generate_mapping(self):
        """Generates a full mapping using gradient descent"""
        mappings = []
        for _ in range(self.parallel_points):
            mapping_obj = self.random_mapper.generate_mapping()
            if (hasattr(self.representation, "canonical_operations")
                    and not self.representation.canonical_operations):
                m = self.representation.toRepresentationNoncanonical(
                    mapping_obj)
            else:
                m = self.representation.toRepresentation(mapping_obj)
            mappings.append(m)

        self.dim = len(mappings[0])
        cur_sim_results = self.simulation_manager.simulate(mappings)
        cur_exec_times = [x.exec_time for x in cur_sim_results]
        idx = np.argmin(cur_exec_times)
        self.best_mapping = mappings[idx]
        self.best_exec_time = cur_exec_times[idx]
        active_points = list(range(self.parallel_points))
        if self.progress:
            iterations_range = tqdm.tqdm(range(self.gd_iterations))
        else:
            iterations_range = range(self.gd_iterations)
        grads = [0] * self.parallel_points

        # don't check for a loop the first time
        last_mappings = [[np.inf] * self.dim] * self.parallel_points
        log.info(f"Starting gradient descent with {self.parallel_points}"
                 f" parallel points for {self.gd_iterations} iterations."
                 f" Best starting mapping ({idx}): {self.best_exec_time}")

        # main loop
        for _ in iterations_range:
            old_grads = copy.copy(grads)
            for i in active_points:
                grads[i] = self.momentum_decay * old_grads[
                    i] + self.calculate_gradient(mappings[i],
                                                 cur_exec_times[i])
                log.debug(f"gradient (point {i}): {grads[i]}")

            before_last_mappings = copy.copy(last_mappings)
            last_mappings = copy.copy(mappings)

            # Barzilai–Borwein. Note that before_last_mappings here holds the value for
            # the last mappings still, since we are currently updating the mappigs
            gammas = _calculate_gammas(
                [grads[i] for i in active_points],
                [old_grads[i] for i in active_points],
                [np.array(mappings[i]) for i in active_points],
                [np.array(before_last_mappings[i]) for i in active_points],
            )
            for idx, i in enumerate(active_points):
                # note that gamma has lost the ordering, which is why we enumerate
                mappings[i] = (mappings[i] + gammas[idx] *
                               (-grads[i]) * self.stepsize)
                log.debug(f"moving mapping {i} to: {mappings[i]}")
                mappings[i] = self.representation.approximate(
                    np.array(mappings[i]))
                log.debug(f"approximating to: {mappings[i]}")

            cur_sim_results = self.simulation_manager.simulate(mappings)
            cur_exec_times = [x.exec_time for x in cur_sim_results]
            idx = np.argmin(cur_exec_times)
            log.info(f"{idx} best mapping in batch: {cur_exec_times[idx]}")
            if cur_exec_times[idx] < self.best_exec_time:
                log.info(
                    f"better than old best time ({self.best_exec_time}). Replacing"
                )
                self.best_exec_time = cur_exec_times[idx]
                self.best_mapping = mappings[idx]

            # remove points on (local) minima or stuck on a loop
            finished_points = []
            for i in active_points:
                # found local minimum
                if np.allclose(grads[i], np.zeros(self.dim)):
                    log.info(f"Found local minimum in {i}. Removing point.")
                    finished_points.append(i)

                # stuck in a loop.
                if np.allclose(mappings[i], last_mappings[i]) or np.allclose(
                        mappings[i], before_last_mappings[i]):
                    log.info(f"Point {i} stuck in a loop. Removing point.")
                    log.debug(f"mapping: {mappings[i]}\n last:"
                              f" {last_mappings[i]}\n"
                              f" before_last: {before_last_mappings[i]}")
                    finished_points.append(i)

            for i in finished_points:
                if i in active_points:
                    active_points.remove(i)
            if len(active_points) == 0:
                break

        self.best_mapping = np.array(
            self.representation.approximate(np.array(self.best_mapping)))
        self.simulation_manager.statistics.log_statistics()
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")

        return self.representation.fromRepresentation(self.best_mapping)

    def calculate_gradient(self, mapping, cur_exec_time):
        grad = np.zeros(self.dim)
        m_plus = []
        m_minus = []
        for i in range(self.dim):
            evec = np.zeros(self.dim)
            evec[i] = 1
            m_plus.append(mapping + evec)
            m_minus.append(mapping - evec)

        sim_results = self.simulation_manager.simulate(m_plus + m_minus)
        exec_times = [x.exec_time for x in sim_results]

        for i in range(self.dim):
            diff_plus = exec_times[i] - cur_exec_time
            #  because of the -h in the denominator of the difference quotient
            diff_minus = cur_exec_time - exec_times[i + self.dim]
            grad[i] = (diff_plus + diff_minus) / 2
        return grad
Beispiel #10
0
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        initials="random",
        objectives=["exec_time"],
        pop_size=10,
        num_gens=5,
        mutpb=0.5,
        cxpb=0.35,
        tournsize=4,
        mupluslambda=True,
        crossover_rate=1,
        radius=2.0,
        random_seed=42,
        record_statistics=True,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=True,
        jobs=4,
    ):
        """Generates a partial mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param initials: what initial population to use (e.g. random)
        :type initials: string
        :param objectives: Optimization objectives
        :type objectives: list of strings
        :param pop_size: Population size
        :type pop_size: int
        :param num_gens: Number of generations
        :type num_gens: int
        :param mutpb: Probability of mutation
        :type mutpb: float
        :param cxpb: Crossover probability
        :type cxpb: float
        :param tournsize: Size of tournament for selection
        :type tournsize: int
        :param mupluslambda: Use mu+lambda algorithm? if False: mu,lambda
        :type mupluslambda: bool
        :param crossover_rate: The number of crossovers in the crossover operator
        :type crossover_rate: int
        :param radius: The radius for searching mutations
        :type radius: float
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.crossover_rate = crossover_rate
        self.objectives = Objectives.from_string_list(objectives)
        self.pop_size = pop_size
        self.num_gens = num_gens
        self.mutpb = mutpb
        self.cxpb = cxpb
        self.mupluslambda = mupluslambda
        self.dump_cache = dump_cache
        self.radius = radius
        self.progress = progress

        objective_resources = Objectives.RESOURCES in self.objectives
        self.random_mapper = RandomPartialMapper(
            self.graph, self.platform, resources_first=objective_resources)

        if Objectives.ENERGY in self.objectives:
            if not self.platform.has_power_model():
                log.warning(
                    "The platform does not have a power model, excluding "
                    "energy consumption from the objectives.")
                self.objectives ^= Objectives.ENERGY

        if self.objectives == Objectives.NONE:
            raise RuntimeError(
                "Trying to initalize genetic algorithm without objectives")

        if self.crossover_rate > len(self.graph.processes()):
            log.error(
                "Crossover rate cannot be higher than number of processes "
                "in application")
            raise RuntimeError("Invalid crossover rate")

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation
        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

        if "FitnessMin" not in deap.creator.__dict__:
            num_params = 0
            if Objectives.EXEC_TIME in self.objectives:
                num_params += 1
            if Objectives.ENERGY in self.objectives:
                num_params += 1
            if Objectives.RESOURCES in self.objectives:
                num_params += len(self.platform.get_processor_types())
            # this will weigh a milisecond as equivalent to an additional core
            # todo: add a general parameter for controlling weights
            deap.creator.create("FitnessMin",
                                deap.base.Fitness,
                                weights=num_params * (-1.0, ))

        if "Individual" not in deap.creator.__dict__:
            deap.creator.create("Individual",
                                list,
                                fitness=deap.creator.FitnessMin)

        toolbox = deap.base.Toolbox()
        toolbox.register("attribute", random.random)
        toolbox.register("mapping", self.random_mapping)
        toolbox.register(
            "individual",
            deap.tools.initIterate,
            deap.creator.Individual,
            toolbox.mapping,
        )
        toolbox.register("population", deap.tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("mate", self.mapping_crossover)
        toolbox.register("mutate", self.mapping_mutation)
        toolbox.register("evaluate", self.evaluate_mapping)
        toolbox.register("select",
                         deap.tools.selTournament,
                         tournsize=tournsize)

        self.evolutionary_toolbox = toolbox
        self.hof = (
            deap.tools.ParetoFront()
        )  # todo: we could add symmetry comparison (or other similarity) here
        stats = deap.tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)
        self.evolutionary_stats = stats

        if initials == "random":
            self.population = toolbox.population(n=self.pop_size)
        else:
            log.error("Initials not supported yet")
            raise RuntimeError("GeneticMapper: Initials not supported")
Beispiel #11
0
class GeneticMapper(object):
    """Generates a full mapping by using genetic algorithms."""
    def __init__(
        self,
        graph,
        platform,
        trace,
        representation,
        initials="random",
        objectives=["exec_time"],
        pop_size=10,
        num_gens=5,
        mutpb=0.5,
        cxpb=0.35,
        tournsize=4,
        mupluslambda=True,
        crossover_rate=1,
        radius=2.0,
        random_seed=42,
        record_statistics=True,
        dump_cache=False,
        chunk_size=10,
        progress=False,
        parallel=True,
        jobs=4,
    ):
        """Generates a partial mapping for a given platform and dataflow application.

        :param graph: a dataflow graph
        :type graph: DataflowGraph
        :param platform: a platform
        :type platform: Platform
        :param trace: a trace generator
        :type trace: TraceGenerator
        :param representation: a mapping representation object
        :type representation: MappingRepresentation
        :param initials: what initial population to use (e.g. random)
        :type initials: string
        :param objectives: Optimization objectives
        :type objectives: list of strings
        :param pop_size: Population size
        :type pop_size: int
        :param num_gens: Number of generations
        :type num_gens: int
        :param mutpb: Probability of mutation
        :type mutpb: float
        :param cxpb: Crossover probability
        :type cxpb: float
        :param tournsize: Size of tournament for selection
        :type tournsize: int
        :param mupluslambda: Use mu+lambda algorithm? if False: mu,lambda
        :type mupluslambda: bool
        :param crossover_rate: The number of crossovers in the crossover operator
        :type crossover_rate: int
        :param radius: The radius for searching mutations
        :type radius: float
        :param random_seed: A random seed for the RNG
        :type random_seed: int
        :param record_statistics: Record statistics on mappings evaluated?
        :type record_statistics: bool
        :param dump_cache: Dump the mapping cache?
        :type dump_cache: bool
        :param chunk_size: Size of chunks for parallel simulation
        :type chunk_size: int
        :param progress: Display simulation progress visually?
        :type progress: bool
        :param parallel: Execute simulations in parallel?
        :type parallel: bool
        :param jobs: Number of jobs for parallel simulation
        :type jobs: int
        """
        random.seed(random_seed)
        np.random.seed(random_seed)
        self.full_mapper = True  # flag indicating the mapper type
        self.graph = graph
        self.platform = platform
        self.crossover_rate = crossover_rate
        self.objectives = Objectives.from_string_list(objectives)
        self.pop_size = pop_size
        self.num_gens = num_gens
        self.mutpb = mutpb
        self.cxpb = cxpb
        self.mupluslambda = mupluslambda
        self.dump_cache = dump_cache
        self.radius = radius
        self.progress = progress

        objective_resources = Objectives.RESOURCES in self.objectives
        self.random_mapper = RandomPartialMapper(
            self.graph, self.platform, resources_first=objective_resources)

        if Objectives.ENERGY in self.objectives:
            if not self.platform.has_power_model():
                log.warning(
                    "The platform does not have a power model, excluding "
                    "energy consumption from the objectives.")
                self.objectives ^= Objectives.ENERGY

        if self.objectives == Objectives.NONE:
            raise RuntimeError(
                "Trying to initalize genetic algorithm without objectives")

        if self.crossover_rate > len(self.graph.processes()):
            log.error(
                "Crossover rate cannot be higher than number of processes "
                "in application")
            raise RuntimeError("Invalid crossover rate")

        # This is a workaround until Hydra 1.1 (with recursive instantiaton!)
        if not issubclass(type(type(representation)), MappingRepresentation):
            representation = instantiate(representation, graph, platform)
        self.representation = representation
        self.simulation_manager = SimulationManager(
            self.representation,
            trace,
            jobs,
            parallel,
            progress,
            chunk_size,
            record_statistics,
        )

        if "FitnessMin" not in deap.creator.__dict__:
            num_params = 0
            if Objectives.EXEC_TIME in self.objectives:
                num_params += 1
            if Objectives.ENERGY in self.objectives:
                num_params += 1
            if Objectives.RESOURCES in self.objectives:
                num_params += len(self.platform.get_processor_types())
            # this will weigh a milisecond as equivalent to an additional core
            # todo: add a general parameter for controlling weights
            deap.creator.create("FitnessMin",
                                deap.base.Fitness,
                                weights=num_params * (-1.0, ))

        if "Individual" not in deap.creator.__dict__:
            deap.creator.create("Individual",
                                list,
                                fitness=deap.creator.FitnessMin)

        toolbox = deap.base.Toolbox()
        toolbox.register("attribute", random.random)
        toolbox.register("mapping", self.random_mapping)
        toolbox.register(
            "individual",
            deap.tools.initIterate,
            deap.creator.Individual,
            toolbox.mapping,
        )
        toolbox.register("population", deap.tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("mate", self.mapping_crossover)
        toolbox.register("mutate", self.mapping_mutation)
        toolbox.register("evaluate", self.evaluate_mapping)
        toolbox.register("select",
                         deap.tools.selTournament,
                         tournsize=tournsize)

        self.evolutionary_toolbox = toolbox
        self.hof = (
            deap.tools.ParetoFront()
        )  # todo: we could add symmetry comparison (or other similarity) here
        stats = deap.tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)
        self.evolutionary_stats = stats

        if initials == "random":
            self.population = toolbox.population(n=self.pop_size)
        else:
            log.error("Initials not supported yet")
            raise RuntimeError("GeneticMapper: Initials not supported")
            # toolbox.register("individual_guess", self.initIndividual, creator.Individual)
            # toolbox.register("population_guess", self.initPopulation, list, toolbox.individual_guess, initials,pop_size)
            # population = toolbox.population_guess()

    def evaluate_mapping(self, mapping):
        result = []
        simres = self.simulation_manager.simulate([list(mapping)])[0]
        if Objectives.EXEC_TIME in self.objectives:
            result.append(simres.exec_time)
        if Objectives.ENERGY in self.objectives:
            result.append(simres.dynamic_energy)
        if Objectives.RESOURCES in self.objectives:
            mapping_obj = self.representation.fromRepresentation(list(mapping))
            resource_dict = mapping_obj.to_resourceDict()
            for core_type in resource_dict:
                result.append(resource_dict[core_type])
        return tuple(result)

    def random_mapping(self):
        mapping = self.random_mapper.generate_mapping()
        if (hasattr(self.representation, "canonical_operations")
                and not self.representation.canonical_operations):
            as_rep = self.representation.toRepresentationNoncanonical(mapping)
        else:
            as_rep = self.representation.toRepresentation(mapping)
        return list(as_rep)

    def mapping_crossover(self, m1, m2):
        return self.representation._crossover(m1, m2, self.crossover_rate)

    def mapping_mutation(self, mapping):
        # m_obj = self.representation.fromRepresentation(list((mapping)))
        radius = self.radius
        while 1:
            new_mappings = self.representation._uniformFromBall(
                mapping, radius, 20)
            for m in new_mappings:
                if list(m) != list(mapping):
                    for i in range(len(mapping)):
                        # we do this since mapping is a DEAP Individual data structure
                        mapping[i] = m[i]
                    return (mapping, )
            radius *= 1.1
            if radius > 10000 * self.radius:
                log.error("Could not mutate mapping")
                raise RuntimeError("Could not mutate mapping")

    def run_genetic_algorithm(self):
        toolbox = self.evolutionary_toolbox
        stats = self.evolutionary_stats
        hof = self.hof
        pop_size = self.pop_size
        num_gens = self.num_gens
        cxpb = self.cxpb
        mutpb = self.mutpb
        population = self.population

        if self.mupluslambda:
            population, logbook = deap.algorithms.eaMuPlusLambda(
                population,
                toolbox,
                mu=pop_size,
                lambda_=3 * pop_size,
                cxpb=cxpb,
                mutpb=mutpb,
                ngen=num_gens,
                stats=stats,
                halloffame=hof,
                verbose=self.progress,
            )
            log.info(logbook.stream)
        else:
            population, logbook = deap.algorithms.eaMuCommaLambda(
                population,
                toolbox,
                mu=pop_size,
                lambda_=3 * pop_size,
                cxpb=cxpb,
                mutpb=mutpb,
                ngen=num_gens,
                stats=stats,
                halloffame=hof,
                verbose=self.progress,
            )
            log.info(logbook.stream)

        return population, logbook, hof

    def generate_mapping(self):
        """Generates a full mapping using a genetic algorithm"""
        _, logbook, hof = self.run_genetic_algorithm()
        mapping = hof[0]
        self.simulation_manager.statistics.log_statistics()
        with open("evolutionary_logbook.txt", "w") as f:
            f.write(str(logbook))
        result = self.representation.fromRepresentation(np.array(mapping))
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")
        self.cleanup()
        return result

    def generate_pareto_front(self):
        """Generates a pareto front of (full) mappings using a genetic algorithm
        the input parameters determine the criteria with which the pareto
        front is going to be built.
        """
        _, logbook, hof = self.run_genetic_algorithm()
        results = []
        self.simulation_manager.statistics.log_statistics()
        with open("evolutionary_logbook.pickle", "wb") as f:
            pickle.dump(logbook, f)
        for mapping in hof:
            mapping_object = self.representation.fromRepresentation(
                np.array(mapping))
            self.simulation_manager.append_mapping_metadata(mapping_object)
            results.append(mapping_object)
        self.simulation_manager.statistics.to_file()
        if self.dump_cache:
            self.simulation_manager.dump("mapping_cache.csv")
        self.cleanup()
        return results

    def cleanup(self):
        log.info("cleaning up")
        toolbox = self.evolutionary_toolbox
        toolbox.unregister("attribute")
        toolbox.unregister("mapping")
        toolbox.unregister("individual")
        toolbox.unregister("population")
        toolbox.unregister("mate")
        toolbox.unregister("mutate")
        toolbox.unregister("evaluate")
        toolbox.unregister("select")
        stats = self.evolutionary_stats
        self.evolutionary_stats = None
        del stats
        del deap.creator.FitnessMin
        del deap.creator.Individual