def __init__(self, graph, platform): self.__graph = graph self.__platform = platform self.__fullMapper = RandomPartialMapper(graph, platform, None) self.__communicationMapper = ComPartialMapper(graph, platform, self) self.__processMapper = ProcPartialMapper(graph, platform, self)
def __init__( self, graph, platform, norm_p, extra_dimensions=True, extra_dimensions_factor=3, ignore_channels=True, target_distortion=1.1, jlt_tries=10, verbose=False, disable_embedding_test=False, ): # todo: make sure the correspondence of cores is correct! M_matrix, self._arch_nc, self._arch_nc_inv = arch_to_distance_metric( platform, heterogeneity=extra_dimensions) self._M = FiniteMetricSpace(M_matrix) self.graph = graph self.platform = platform self.extra_dims = extra_dimensions self.jlt_tries = jlt_tries self.target_distortion = target_distortion self.ignore_channels = ignore_channels self.verbose = verbose if hasattr(platform, "embedding_json"): self.embedding_matrix_path = platform.embedding_json else: self.embedding_matrix_path = None if not self.ignore_channels: log.warning("Not ignoring channels might lead" " to invalid mappings when approximating.") self.extra_dims_factor = extra_dimensions_factor self._d = len(graph.processes()) if self.extra_dims: self._split_d = self._d self._split_k = len(platform.processors()) self._d += len(graph.channels()) self.p = norm_p com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper) init_app_ncs(self, graph) if self.p != 2: log.error(f"Metric space embeddings only supports p = 2." f" For p = 1, for example, finding such an embedding" f" is NP-hard (See Matousek, J., Lectures on Discrete" f" Geometry, Chap. 15.5)") MetricSpaceEmbedding.__init__( self, self._M, self._d, jlt_tries=self.jlt_tries, embedding_matrix_path=self.embedding_matrix_path, target_distortion=self.target_distortion, verbose=verbose, disable_embedding_test=disable_embedding_test, ) log.info(f"Found embedding with distortion: {self.distortion}")
def mapping(graph, platform): com_mapper = ComFullMapper(graph, platform) mapper = ProcPartialMapper(graph, platform, com_mapper) from_list = [] # Map process "a" from_list.append(0) # Map process "b" from_list.append(1) mapping = mapper.generate_mapping(from_list) return mapping
def __init__(self, graph, platform, trace, threshold, threads=1): self.graph = graph self.platform = platform self.trace = trace self.randMapGen = RandomPartialMapper(self.graph, self.platform) self.comMapGen = ComPartialMapper(self.graph, self.platform, self.randMapGen) self.dcMapGen = ProcPartialMapper(self.graph, self.platform, self.comMapGen) self.threads = threads self.threshold = threshold self.cache = {} self.total_cached = 0 self.oracle_type = "simulation"
def test_allEquivalent(platform, graph): com_mapper = ComFullMapper(graph, platform) mapper = ProcPartialMapper(graph, platform, com_mapper) mapping = mapper.generate_mapping([0, 1]) representation = SymmetryRepresentation(graph, platform) assert len(list(representation.allEquivalent(mapping))) == 12 assert ( len(list(representation.allEquivalent(mapping, only_support=True))) == 6 ) representation = SymmetryRepresentation(graph, platform, disable_mpsym=True) assert len(list(representation.allEquivalent(mapping))) == 12 assert ( len(list(representation.allEquivalent(mapping, only_support=True))) == 6 )
def __call__(cls, *args, **kwargs): time = timeit.default_timer() graph = args[0] platform = args[1] graph_names, platform_names = MappingRepresentation.gen_hash( graph, platform) if (cls, graph, platform) in cls._instances: different = cls._instances[(cls, graph, platform)].changed_parameters( *args[2:]) if (cls, graph, platform) not in cls._instances or different: # make hashables of these two cls._instances[(cls, graph_names, platform_names)] = super( MappingRepresentation, cls).__call__(*args, **kwargs) log.info( f"Initializing representation {cls} of graph with processes: " f"{graph_names} on platform with cores {platform_names}") instance = copy(cls._instances[(cls, graph_names, platform_names)]) instance.graph = graph instance.platform = platform com_mapper = ComFullMapper(graph, platform) instance.list_mapper = ProcPartialMapper(graph, platform, com_mapper) instance.init_time = timeit.default_timer() - time return instance
class MappingCompletionWrapper: """A wrapper class for different partial and full mappers in order to create a complete mapping out of a process mapping vector. """ def __init__(self, graph, platform): self.__graph = graph self.__platform = platform self.__fullMapper = RandomPartialMapper(graph, platform, None) self.__communicationMapper = ComPartialMapper(graph, platform, self) self.__processMapper = ProcPartialMapper(graph, platform, self) def completeMappingAtRandom(self, processMappingVector): # create empty mapping, complete it with generated process mapping # and random channel mapping mapping = Mapping(self.__graph, self.__platform) mapping.from_list(processMappingVector) assert mapping.get_unmapped_channels() == [] assert mapping.get_unmapped_processes() == [] return mapping def completeMappingBestEffort(self, processMappingVector): mapping = self.__processMapper.generate_mapping(processMappingVector) mapping = self.__fullMapper.generate_mapping(part_mapping=mapping) # self.__processMapper.reset() #not sure what this is supposed to do return mapping def generate_mapping(self, mapping): return mapping
def __init__( self, graph, platform, channels=False, periodic_boundary_conditions=False, norm_p=2, ): self.graph = graph self.platform = platform self.channels = channels self.boundary_conditions = periodic_boundary_conditions self.p = norm_p self.num_procs = len(list(self.graph._processes.keys())) com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper)
def apply_singlePerturbation(self, mapping, history): """Creates a defined number of unique single core perturbations Therefore, the mapping is interpreted as vector with the processor cores assigned to the vector elements. """ rand_part_mapper = RandomPartialMapper(self.graph, self.platform) proc_part_mapper = ProcPartialMapper( self.graph, self.platform, rand_part_mapper ) iteration_max = self.iteration_max pe = rand.randint(0, len(list(self.platform.processors())) - 1) process = rand.randint(0, len(list(self.graph.processes())) - 1) vec = [] # assign cores to vector pe_mapping = proc_part_mapper.get_pe_name_mapping() log.debug(str(pe_mapping)) for p in self.graph.processes(): log.debug(mapping.affinity(p).name) vec.append(pe_mapping[mapping.affinity(p).name]) log.debug("Process: {} PE: {} vec: {}".format(process, pe, vec)) orig_vec = vec[:] vec[process] = pe # apply initial perturbation to mapping log.debug("Perturbated vec: {} Original vec: {}".format(vec, orig_vec)) # The code above can produce identical perturbations, the following loop should prevent this: timeout = 0 while timeout < iteration_max: perturbated_mapping = proc_part_mapper.generate_mapping( vec, history ) if perturbated_mapping: break else: pe = rand.randint(0, len(list(self.platform.processors())) - 1) process = rand.randint(0, len(list(self.graph.processes())) - 1) vec[process] = pe # apply a new perturbation to mapping timeout += 1 if timeout == iteration_max: log.error("Could not find a new perturbation") sys.exit(1) return perturbated_mapping
def pareto_mappings(platform, graph): com_mapper = ComFullMapper(graph, platform) mapper = ProcPartialMapper(graph, platform, com_mapper) mapping_tuples = [ ([0, 0], 10.2, 21.45), ([0, 5], 5.2, 31.15), ([0, 1], 9.7, 23.45), ([4, 4], 6.0, 35.45), ([4, 5], 4.32, 39.1), ] mappings = [] for tup in mapping_tuples: mapping = mapper.generate_mapping(tup[0]) mapping.metadata.exec_time = tup[1] mapping.metadata.energy = tup[2] mappings.append(mapping) return mappings
def test_mapping_table_writer_with(platform, graph, tmpdir, expected_csv): output_file = Path(tmpdir).joinpath("output_table.csv") com_mapper = ComFullMapper(graph, platform) mapper = ProcPartialMapper(graph, platform, com_mapper) mapping1 = mapper.generate_mapping([0, 0]) mapping1.metadata.exec_time = 10.2 mapping1.metadata.energy = 21.45 mapping2 = mapper.generate_mapping([0, 5]) mapping2.metadata.exec_time = 5.2 mapping2.metadata.energy = 31.15 attributes = {"num_resources": num_resources} with MappingTableWriter( platform, graph, output_file, attributes=attributes ) as writer: writer.write_header() writer.write_mapping(mapping1) writer.write_mapping(mapping2) assert filecmp.cmp(output_file, expected_csv, shallow=False)
def __init__( self, platform, graph, path, process_prefix="t_", process_suffix="", metadata_exec_time="executionTime", metadata_energy="dynamicEnergy", attributes=None, ): self.platform = platform self.graph = graph self.path = path self._process_prefix = process_prefix self._process_suffix = process_suffix self._metadata_exec_time = metadata_exec_time self._metadata_energy = metadata_energy self._attributes = attributes if self._attributes is None: self._attributes = [] # Parsed data self._data = [] # Read and constructed mappings self.mappings = None self.com_mapper = ComFullMapper(graph, platform) self.mapper = ProcPartialMapper(graph, platform, self.com_mapper) self._process_names = [p.name for p in self.graph.processes()] self._processor_numbers = {} for i, pe in enumerate(self.platform.processors()): self._processor_numbers[pe.name] = i self._read_csv()
class MappingTableReader: """A CSV Mapping Table reader. This class reads the content of the CSV file and returns a list of mappings along with its attributes. Rows of the CSV table describe different mappings of an application to a platform. The columns starting with `process_prefix` and ending with `process_suffix` describe the process allocation onto the platform. The columns `metadata_exec_time` and `metadata_energy` describe the metadata value. The collumns in the `attributes` list describe the additional mapping attribute to extract. :param platform: The platform. :type platform: Platform :param graph: The dataflow graph. :type graph: DataflowGraph :param path: The path to CSV file. :type path: string :param process_prefix: The prefix of processes in the CSV table. :type process_prefix: string :param process_suffix: The suffix of processes in the CSV table. :type process_suffix: string :param metadata_exec_time: The name of the execution time column. :type metadata_exec_time: string or None :param metadata_energy: The name of the energy column. :type metadata_energy: string or None :param attributes: The list of attributes to extract. :type attributes: list of strings or None """ def __init__( self, platform, graph, path, process_prefix="t_", process_suffix="", metadata_exec_time="executionTime", metadata_energy="dynamicEnergy", attributes=None, ): self.platform = platform self.graph = graph self.path = path self._process_prefix = process_prefix self._process_suffix = process_suffix self._metadata_exec_time = metadata_exec_time self._metadata_energy = metadata_energy self._attributes = attributes if self._attributes is None: self._attributes = [] # Parsed data self._data = [] # Read and constructed mappings self.mappings = None self.com_mapper = ComFullMapper(graph, platform) self.mapper = ProcPartialMapper(graph, platform, self.com_mapper) self._process_names = [p.name for p in self.graph.processes()] self._processor_numbers = {} for i, pe in enumerate(self.platform.processors()): self._processor_numbers[pe.name] = i self._read_csv() def _read_csv(self): prefix = self._process_prefix suffix = self._process_suffix time_col = self._metadata_exec_time energy_col = self._metadata_energy with open(self.path) as csv_file: reader = csv.DictReader(csv_file) for row in reader: to_update = {} for name in self._process_names: to_update.update({name: row[prefix + name + suffix]}) # Save desired property to a dict for p in self._attributes: to_update.update({p: row[p]}) # Save energy-utility metadata to a dict if time_col is not None: to_update.update({time_col: row[time_col]}) if energy_col is not None: to_update.update({energy_col: row[energy_col]}) self._data.append(to_update) def form_mappings(self): """Form mappings from the parsed data. Returns the list of tuples, the first element of the tuple is the `Mapping` object, the next elements are the attribute values in the order of `attributes` paramater. """ if self.mappings is not None: log.warning("Mappings were already generated, returning them.") return self.mappings time_col = self._metadata_exec_time energy_col = self._metadata_energy self.mappings = [] for entry in self._data: from_list = [] for process in self._process_names: pe = self._processor_numbers[entry[process]] from_list.append(pe) if from_list != []: mapping = self.mapper.generate_mapping(from_list) # Update energy-utility metadata if time_col is not None: mapping.metadata.exec_time = float(entry[time_col]) if energy_col is not None: mapping.metadata.energy = float(entry[energy_col]) else: mapping = Mapping(self.graph, self.platform) self.mappings.append( (mapping,) + tuple(entry[p] for p in self._attributes) ) return self.mappings
def mapper(graph, platform_odroid): com_mapper = ComFullMapper(graph, platform_odroid) return ProcPartialMapper(graph, platform_odroid, com_mapper)
class SimpleVectorRepresentation(metaclass=MappingRepresentation): """Simple Vector Representation: This representation treats mappings as vectors. The first dimensions (or components) of this vector represent the processes, and the values represent the PE where said processes are mapped. After the mapping of processes to PEs, the same method is applied to encode the channel to communication primitive mapping. This is only done if the channels variable is set when intializing the repreresentation object. A visualization of the encoding: [ P_1, P_2, ... , P_k, C_1, ..., C_l] ^ PE where ^ Comm. prim. process 1 where chan. 1 is mapped. is mapped. Methods generally work with objects of the `mocasin.common.mapping.Mapping` class. Exceptions are the fromRepresentation method, which takes a vector and returns a Mapping object, and methods prefixed with an "_". Methods prefixed with "_", like _uniformFromBall generally work directly with the representation. Its usage is discouraged for having a standard interface, but they are provided in case they prove useful, when you know what you are doing. """ def __init__( self, graph, platform, channels=False, periodic_boundary_conditions=False, norm_p=2, ): self.graph = graph self.platform = platform self.channels = channels self.boundary_conditions = periodic_boundary_conditions self.p = norm_p self.num_procs = len(list(self.graph._processes.keys())) com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper) def changed_parameters(self, channels, periodic_boundary_conditions, norm_p): return (self.channels != channels or self.boundary_conditions != periodic_boundary_conditions or self.p != norm_p) def _uniform(self): Procs = sorted(list(self.graph._processes.keys())) PEs = sorted(list(self.platform._processors.keys())) pe_mapping = list(randint(0, len(PEs), size=len(Procs))) if self.channels: return SimpleVectorRepresentation.randomPrimitives( self, pe_mapping) else: return pe_mapping def randomPrimitives(self, pe_mapping): Procs = sorted(list(self.graph._processes.keys())) PEs = sorted(list(self.platform._processors.keys())) CPs = sorted(list(self.platform._primitives.keys())) res = pe_mapping[:len(Procs)] for c in self.graph.channels(): suitable_primitives = [] for p in self.platform.primitives(): # assert: len([..]) list in next line == 1 src_proc_idx = [ i for i, x in enumerate(Procs) if x == c.source.name ][0] src_pe_name = PEs[res[src_proc_idx]] src = self.platform.find_processor(src_pe_name) sink_procs_idxs = [ i for i, x in enumerate(Procs) if x in [snk.name for snk in c.sinks] ] try: sink_pe_names = [PEs[res[s]] for s in sink_procs_idxs] except: log.error(f"Invalid mapping: {res} \n PEs: " f"{PEs},\n sink_procs_idxs: {sink_procs_idxs}\n") sinks = [ self.platform.find_processor(snk) for snk in sink_pe_names ] if p.is_suitable(src, sinks): suitable_primitives.append(p) primitive = suitable_primitives[randint( 0, len(suitable_primitives))].name primitive_idx = [i for i, x in enumerate(CPs) if x == primitive][0] res.append(primitive_idx) return res def uniform(self): return self.fromRepresentation(self._uniform()) def toRepresentation(self, mapping): return mapping.to_list(channels=self.channels) def fromRepresentation(self, mapping): if type(mapping) == np.ndarray: mapping = mapping.astype(int) mapping_obj = self.list_mapper.generate_mapping(mapping) return mapping_obj def _simpleVec2Elem(self, x): if not self.channels: return x else: m = self.list_mapper.generate_mapping(x) return m.to_list(channels=True) def _elem2SimpleVec(self, x): if self.channels: return x else: return x[:self.num_procs] def _uniformFromBall(self, p, r, npoints=1, simple=False): Procs = list(self.graph._processes.keys()) PEs = list(self.platform._processors.keys()) P = len(PEs) res = [] def _round(point): # perodic boundary conditions rounded = int(round(point) % P) if self.boundary_conditions: return rounded else: if point > P - 1: return P - 1 elif point < 0: return 0 else: return rounded center = p[:len(Procs)] for _ in range(npoints): if simple: radius = _round(r / 2) offset = [] for _ in range(len(Procs)): offset.append(randint(-radius, radius)) else: offset = r * lp.uniform_from_p_ball(p=self.p, n=len(Procs)) real_point = (np.array(center) + np.array(offset)).tolist() v = list(map(_round, real_point)) if self.channels: res.append(self.randomPrimitives(v)) else: res.append(v) log.debug(f"uniform from ball: {res}") return res def uniformFromBall(self, p, r, npoints=1): return self.fromRepresentation( self._uniformFromBall(p, r, npoints=npoints)) def distance(self, x, y): a = np.array(x) b = np.array(y) return np.linalg.norm(a - b) def _distance(self, x, y): return self.distance(x, y) def approximate(self, x): approx = np.rint(x).astype(int) P = len(list(self.platform._processors.keys())) if self.boundary_conditions: res = list(map(lambda t: t % P, approx)) else: res = list(map(lambda t: max(0, min(t, P - 1)), approx)) return res def crossover(self, m1, m2, k): return self._crossover(self.toRepresentation(m1), self.toRepresentation(m2), k) def _crossover(self, m1, m2, k): assert len(m1) == len(m2) crossover_points = random.sample(range(len(m1)), k) swap = False for i in range(len(m1)): if i in crossover_points: swap = not swap if swap: m1[i] = m2[i] m2[i] = m2[i] log.debug(f"crossover: {m1},{m2}") return m1, m2
class SymmetryRepresentation(metaclass=MappingRepresentation): """Symmetry Representation This representation considers the *archtiecture* symmetries for mappings. Application symmetries are still WIP. Mappings in this representation are vectors, too, just like in the Simple Vector Representation. The difference is that the vectors don't correspond to a single mapping, but to an equivalence class of mappings. Thus, two mappings that are equivalent through symmetries will yield the same vector in this representation. This unique vectors for each equivalent class are called "canonical mappings", because they are canonical representatives of their orbit. Canonical mappings are the lexicographical lowest elements of the equivalence class. For example, if the PEs 0-3 are all equivalent, and PEs 4-7 are also equivalent independently (as would be the case on an Exynos ARM big.LITTLE), these two mappings of 5 Processes are equivalent: [1,1,3,4,6] and [1,1,0,5,4] This representation would yield neither of them, as the following mapping is smaller lexicographically than both: [0,0,1,4,5] This is the canonical mapping of this orbit and what this representation would use to represent the class. This representation currently just supports global symmetries, partial symmetries are WIP. Methods generally work with objects of the `mocasin.common.mapping.Mapping` class. Exceptions are the fromRepresentation method, which takes a vector and returns a Mapping object, and methods prefixed with an "_". Methods prefixed with "_", like _allEquivalent generally work directly with the representation. In order to work with other mappings in the same class, the methods allEquivalent/_allEquivalent returns for a mapping, all mappings in that class. The channels flag, when set to true, considers channels in the mapping vectors too. This is not supported and tested yet. The periodic_boundary_conditions flag encodes whether distances measured in this space are considered by taking periodic boundary conditions or not. The norm_p parameter sets the p value for the norm (\sum |x_i|^p)^(1/p). The symmetries representation might be used to accelerate a meta-heuristic without changing it, by enabling a symmetry-aware cache. This can be achieved by setting the canonical_operations flag to False. This way, the operations of the representation, like distances or considering elements in a ball, are not executed on the canonical representatives but on the raw elements instead. The symmetries representation uses the mpsym library to accelerate symmetry calculations. This can be disabled by setting disable_mpsym to True, which uses the python fallback version of the symmetries instead. The calculation of the symmetries can be a costly computation, yet it only depends on the architecture. Thus, the symmetries of an architecture can be pre-computed, stored to and read from a file. If the platform object has the field symmetries_json, the symmetries are read from the file in that path. Testing that these symmetries actually correspond to the platform can be disabled with the disable_symmetries_test flag. This can be useful, e.g. for approximating a NoC architecture as a bus. To pre-compute the symmetries and store them in a file, use the calculate_platform_symmetries task. This pre-computation only works when using mpsym. """ def __init__( self, graph, platform, channels=False, periodic_boundary_conditions=False, norm_p=2, canonical_operations=True, disable_mpsym=False, disable_symmetries_test=False, ): self._topologyGraph = platform.to_adjacency_dict( include_proc_type_labels=True) self.graph = graph self.platform = platform self._d = len(graph.processes()) init_app_ncs(self, graph) self._arch_nc_inv = {} self.channels = channels self.boundary_conditions = periodic_boundary_conditions self.p = norm_p com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper) self.canonical_operations = canonical_operations n = len(self.platform.processors()) correct = None if disable_mpsym: self.sym_library = False else: try: mpsym except NameError: self.sym_library = False else: self.sym_library = True if hasattr(platform, "ag"): self._ag = platform.ag log.info( "Symmetries initialized with mpsym: Platform Generator." ) elif hasattr(platform, "ag_json"): if exists(platform.ag_json): self._ag = mpsym.ArchGraphSystem.from_json_file( platform.ag_json) if disable_symmetries_test: log.warning( "Using symmetries JSON without testing.") correct = True else: try: correct = checkSymmetries( platform.to_adjacency_dict(), self._ag.automorphisms(), ) except Exception as e: log.warning( "An unknown error occurred while reading " "the embedding JSON file. Did you provide " "the correct file for the given platform? " f"({e})") correct = False if not correct: log.warning( "Symmetries json does not fit platform.") del self._ag else: log.info( "Symmetries initialized with mpsym: JSON file." ) else: log.warning( "Invalid symmetries JSON path (file does not exist)." ) if not hasattr(self, "_ag"): # only calculate this if not already present log.info("No pre-comupted mpsym symmetry group available." " Initalizing architecture graph...") ( adjacency_dict, num_vertices, coloring, self._arch_nc, ) = to_labeled_edge_graph(self._topologyGraph) nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring) log.info("Architecture graph initialized. Calculating " "automorphism group using Nauty...") autgrp_edges = pynauty.autgrp(nautygraph) autgrp, _ = edge_to_node_autgrp(autgrp_edges[0], self._arch_nc) self._ag = mpsym.ArchGraphAutomorphisms( [mpsym.Perm(g) for g in autgrp]) for node in self._arch_nc: self._arch_nc_inv[self._arch_nc[node]] = node # TODO: ensure that nodes_correspondence fits simpleVec if not self.sym_library: log.info( "Using python symmetries: Initalizing architecture graph...") ( adjacency_dict, num_vertices, coloring, self._arch_nc, ) = to_labeled_edge_graph(self._topologyGraph) nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring) log.info("Architecture graph initialized. Calculating " "automorphism group using Nauty...") autgrp_edges = pynauty.autgrp(nautygraph) autgrp, _ = edge_to_node_autgrp(autgrp_edges[0], self._arch_nc) permutations_lists = map(list_to_tuple_permutation, autgrp) permutations = [ Permutation.fromLists(p, n=n) for p in permutations_lists ] self._G = PermutationGroup(permutations) log.info("Initialized automorphism group with internal symmetries") def _simpleVec2Elem(self, x): x_ = x[:self._d] # keep channels if exist (they should be mapped accordingly...) _x = x[self._d:] if self.sym_library: return list(self._ag.representative(x_)) + _x else: return self._G.tuple_normalize(x_) + _x def changed_parameters(self): return False def _elem2SimpleVec(self, x): return x def _uniform(self): procs_only = SimpleVectorRepresentation._uniform(self)[:self._d] if self.sym_library: return self._ag.representative(procs_only) else: return self._G.tuple_normalize(procs_only) def uniform(self): return self.fromRepresentation(self._uniform()) def _allEquivalent(self, x, only_support=False): x_ = x[:self._d] if self.sym_library: # TODO: Orbit exploration with support is not implemented in mpsym support_orbit = set() for p in self._ag.orbit(x_): if only_support: support = frozenset(p) if support in support_orbit: continue support_orbit.add(support) yield tuple(p) else: for x in self._G.tuple_orbit(x_, only_support=only_support): yield x def allEquivalent(self, x, only_support=False): """Generate all equivalent mappings to the given one. If `only_support` is true, the generator returns only the mappings, for which occupied cores (or a support) are different, otherwise, it returns all equivalent mappings. """ x_ = x.to_list(channels=False) for elem in self._allEquivalent(x_, only_support=only_support): mapping = self.list_mapper.generate_mapping(list(elem)) if hasattr(x, "metadata"): mapping.metadata = copy(x.metadata) yield mapping def toRepresentation(self, mapping): return self._simpleVec2Elem(mapping.to_list(channels=self.channels)) def toRepresentationNoncanonical(self, mapping): return SimpleVectorRepresentation.toRepresentation(self, mapping) def fromRepresentation(self, mapping): # Does not check if canonical. This is deliberate. mapping_obj = self.list_mapper.generate_mapping(mapping) return mapping_obj def _uniformFromBall(self, p, r, npoints=1): return SimpleVectorRepresentation._uniformFromBall(self, p, r, npoints=npoints) def uniformFromBall(self, p, r, npoints=1): return self.fromRepresentation( self._uniformFromBall(p, r, npoints=npoints)) def distance(self, x, y): if self.canonical_operations: return SimpleVectorRepresentation.distance( self, self.toRepresentation(x), self.toRepresentation(y)) else: xsv = SimpleVectorRepresentation.toRepresentation(self, x) ysv = SimpleVectorRepresentation.toRepresentation(self, y) return SimpleVectorRepresentation.distance(self, xsv, ysv) def crossover(self, m1, m2, k): if self.canonical_operations: return SimpleVectorRepresentation._crossover( self, self.toRepresentation(m1), self.toRepresentation(m2), k) else: xsv = SimpleVectorRepresentation.toRepresentation(self, m1) ysv = SimpleVectorRepresentation.toRepresentation(self, m2) return SimpleVectorRepresentation._crossover(self, xsv, ysv, k) def _crossover(self, x, y, k): if self.canonical_operations: xcan = self._simpleVec2Elem(x) ycan = self._simpleVec2Elem(y) xcx, ycx = SimpleVectorRepresentation._crossover( self, xcan, ycan, k) # update manually so that we return DEAP Individuals in DEAP for i in range(len(x)): x[i] = xcx[i] y[i] = ycx[i] return x, y else: return SimpleVectorRepresentation._crossover(self, x, y, k) def approximate(self, x): approx = SimpleVectorRepresentation.approximate(self, x) return self._simpleVec2Elem(approx)
class Simulation(Oracle): """simulation code""" def __init__(self, graph, platform, trace, threshold, threads=1): self.graph = graph self.platform = platform self.trace = trace self.randMapGen = RandomPartialMapper(self.graph, self.platform) self.comMapGen = ComPartialMapper(self.graph, self.platform, self.randMapGen) self.dcMapGen = ProcPartialMapper(self.graph, self.platform, self.comMapGen) self.threads = threads self.threshold = threshold self.cache = {} self.total_cached = 0 self.oracle_type = "simulation" def prepare_sim_contexts_for_samples(self, samples): """Prepare simualtion/application context and mapping for a each element in `samples`.""" # Create a list of 'simulation contexts'. # These can be later executed by multiple worker processes. simulation_contexts = [] for i in range(0, len(samples)): log.debug("Using simcontext no.: {} {}".format(i, samples[i])) # create a simulation context mapping = self.dcMapGen.generate_mapping( list(map(int, samples[i].sample2simpleTuple()))) sim_context = self.prepare_sim_context(mapping) samples[i].setSimContext(sim_context) def prepare_sim_context(self, mapping): sim_mapping = self.dcMapGen.generate_mapping(mapping.to_list()) sim_context = DataflowSimulation(self.platform, self.graph, sim_mapping, self.trace) log.debug("Mapping toList: {}".format(sim_mapping.to_list())) return sim_context def is_feasible(self, samples): """Checks if a set of samples is feasible in context of a given timing threshold. Trigger the simulation on 4 for parallel jobs and process the resulting array of simulation results according to the given threshold. """ results = [] # run simulations and search for the best mapping if len(samples) > 1 and self.threads > 1: # run parallel simulation for more than one sample in samples list from multiprocessing import Pool log.debug("Running parallel simulation for {} samples".format( len(samples))) pool = Pool(processes=self.threads, maxtasksperchild=100) results = list( pool.map(self.run_simulation, samples, chunksize=self.threads)) else: # results list of simulation contexts log.debug("Running single simulation") results = list(map(self.run_simulation, samples)) # find runtime from results exec_times = [] # in ps for r in results: exec_times.append(float(r.sim_context.result.exec_time)) feasible = [] for r in results: assert r.sim_context.result and r.sim_context.result.exec_time ureg = pint.UnitRegistry() threshold = ureg(self.threshold).to(ureg.ps).magnitude if r.sim_context.result.exec_time > threshold: r.setFeasibility(False) feasible.append(False) else: r.setFeasibility(True) feasible.append(True) log.debug("Exec.-Times: {} Feasible: {}".format(exec_times, feasible)) # return samples with the according sim context return results def run_simulation(self, sample): # do simulation requires sim_context if sample.sim_context.result is not None: self.total_cached += 1 return sample try: utils.run_simulation(sample.sim_context) # add to cache mapping = tuple(sample.getMapping().to_list()) self.cache[mapping] = sample.sim_context.result except Exception as e: log.debug("Exception in Simulation: {}".format(str(e))) traceback.print_exc() # log.exception(str(e)) if hasattr(e, "details"): log.info(e.details()) return sample
def __init__( self, graph, platform, channels=False, periodic_boundary_conditions=False, norm_p=2, canonical_operations=True, disable_mpsym=False, disable_symmetries_test=False, ): self._topologyGraph = platform.to_adjacency_dict( include_proc_type_labels=True) self.graph = graph self.platform = platform self._d = len(graph.processes()) init_app_ncs(self, graph) self._arch_nc_inv = {} self.channels = channels self.boundary_conditions = periodic_boundary_conditions self.p = norm_p com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper) self.canonical_operations = canonical_operations n = len(self.platform.processors()) correct = None if disable_mpsym: self.sym_library = False else: try: mpsym except NameError: self.sym_library = False else: self.sym_library = True if hasattr(platform, "ag"): self._ag = platform.ag log.info( "Symmetries initialized with mpsym: Platform Generator." ) elif hasattr(platform, "ag_json"): if exists(platform.ag_json): self._ag = mpsym.ArchGraphSystem.from_json_file( platform.ag_json) if disable_symmetries_test: log.warning( "Using symmetries JSON without testing.") correct = True else: try: correct = checkSymmetries( platform.to_adjacency_dict(), self._ag.automorphisms(), ) except Exception as e: log.warning( "An unknown error occurred while reading " "the embedding JSON file. Did you provide " "the correct file for the given platform? " f"({e})") correct = False if not correct: log.warning( "Symmetries json does not fit platform.") del self._ag else: log.info( "Symmetries initialized with mpsym: JSON file." ) else: log.warning( "Invalid symmetries JSON path (file does not exist)." ) if not hasattr(self, "_ag"): # only calculate this if not already present log.info("No pre-comupted mpsym symmetry group available." " Initalizing architecture graph...") ( adjacency_dict, num_vertices, coloring, self._arch_nc, ) = to_labeled_edge_graph(self._topologyGraph) nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring) log.info("Architecture graph initialized. Calculating " "automorphism group using Nauty...") autgrp_edges = pynauty.autgrp(nautygraph) autgrp, _ = edge_to_node_autgrp(autgrp_edges[0], self._arch_nc) self._ag = mpsym.ArchGraphAutomorphisms( [mpsym.Perm(g) for g in autgrp]) for node in self._arch_nc: self._arch_nc_inv[self._arch_nc[node]] = node # TODO: ensure that nodes_correspondence fits simpleVec if not self.sym_library: log.info( "Using python symmetries: Initalizing architecture graph...") ( adjacency_dict, num_vertices, coloring, self._arch_nc, ) = to_labeled_edge_graph(self._topologyGraph) nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring) log.info("Architecture graph initialized. Calculating " "automorphism group using Nauty...") autgrp_edges = pynauty.autgrp(nautygraph) autgrp, _ = edge_to_node_autgrp(autgrp_edges[0], self._arch_nc) permutations_lists = map(list_to_tuple_permutation, autgrp) permutations = [ Permutation.fromLists(p, n=n) for p in permutations_lists ] self._G = PermutationGroup(permutations) log.info("Initialized automorphism group with internal symmetries")
class MetricEmbeddingRepresentation(MetricSpaceEmbedding, metaclass=MappingRepresentation): """Metric Space Representation A representation for a metric space that uses an efficient embedding into a real space. Upon initialization, this representation calculates an embedding into a real space such that the distances in the metric space differ from the embedded distances by a factor of at most `distortion`. Elements in this representation are real vectors, the meaning of the components does not have a concrete interpretation. However, they do have a particular structure. For multiple processes, a single embedding for the architecture is calculated. The multi-process vector space is the orthogonal sum of copies of a vector space emebedding for the single-process case. This provably preserves the distortion and makes calculations much more efficient. The additional option, extra_dims, adds additional dimensions for each PE to count when multiple processes are mapped to the same PE. The scaling factor for those extra dimensions is controlled by the value of extra_dims_factor. When the ignore_channels flag is set to true, the embedding ignores communication channels, only focusing on the mapping of computation. The target_distortion value sets the maximum distortion of the metric space that is allowed for the embedding. A value close to 1 might be hard or even impossible, a higher value reduces accuracy but allows to reduce the dimension of the embedding more using the JLT-based dimensionality-reduction. The number of tries to achieve the target distortion for a given dimension is controlled by the parameter jlt_tries. The verbose flag controls the verbosity of the module. Finally, since the embedding depends only on the architecture, it can be pre-computed and stored in a file. This file is read from the architecture description. If the architecture object has a field embedding_json, it will read the precomputed embedding from a json file in this path. Most architectures set this field from a parameter with the same name that can be configured in hydra. Such a json file can be generated using the calculate_platform_embedding task. If no such file exists, or it is invalid, an embedding will be computed from scratch. The flag disable_embedding_test can be set to True to accept the embedding from the json without checking it fits the given architecture and parameters. """ def __init__( self, graph, platform, norm_p, extra_dimensions=True, extra_dimensions_factor=3, ignore_channels=True, target_distortion=1.1, jlt_tries=10, verbose=False, disable_embedding_test=False, ): # todo: make sure the correspondence of cores is correct! M_matrix, self._arch_nc, self._arch_nc_inv = arch_to_distance_metric( platform, heterogeneity=extra_dimensions) self._M = FiniteMetricSpace(M_matrix) self.graph = graph self.platform = platform self.extra_dims = extra_dimensions self.jlt_tries = jlt_tries self.target_distortion = target_distortion self.ignore_channels = ignore_channels self.verbose = verbose if hasattr(platform, "embedding_json"): self.embedding_matrix_path = platform.embedding_json else: self.embedding_matrix_path = None if not self.ignore_channels: log.warning("Not ignoring channels might lead" " to invalid mappings when approximating.") self.extra_dims_factor = extra_dimensions_factor self._d = len(graph.processes()) if self.extra_dims: self._split_d = self._d self._split_k = len(platform.processors()) self._d += len(graph.channels()) self.p = norm_p com_mapper = ComFullMapper(graph, platform) self.list_mapper = ProcPartialMapper(graph, platform, com_mapper) init_app_ncs(self, graph) if self.p != 2: log.error(f"Metric space embeddings only supports p = 2." f" For p = 1, for example, finding such an embedding" f" is NP-hard (See Matousek, J., Lectures on Discrete" f" Geometry, Chap. 15.5)") MetricSpaceEmbedding.__init__( self, self._M, self._d, jlt_tries=self.jlt_tries, embedding_matrix_path=self.embedding_matrix_path, target_distortion=self.target_distortion, verbose=verbose, disable_embedding_test=disable_embedding_test, ) log.info(f"Found embedding with distortion: {self.distortion}") def changed_parameters(self, norm_p): return self.p != norm_p def _simpleVec2Elem(self, x): proc_vec = x[:self._d] as_array = np.array(self.i(proc_vec)).flatten() # [value for comp in self.i(x) for value in comp] return as_array def _elem2SimpleVec(self, x): return self.inv(self.approx(x[:(self._k * self._d)]).tolist()) def _uniform(self): res = np.array(self.uniformVector()).flatten() return res def uniform(self): return self.fromRepresentation( np.array(self.uniformVector()).flatten()) def _uniformFromBall(self, p, r, npoints=1): log.debug(f"Uniform from ball with radius r={r} around point p={p}") # print(f"point of type {type(p)} and shape {p.shape}") point = np.array(p).flatten() results_raw = MetricSpaceEmbedding.uniformFromBall( self, point, r, npoints) results = list( map(lambda x: np.array(list(np.array(x).flat)), results_raw)) if self.extra_dims: results = list( map( lambda x: self._simpleVec2Elem(self._elem2SimpleVec(x)), results, )) # print(f"results uniform from ball: {results}") return results def uniformFromBall(self, p, r, npoints=1): log.debug(f"Uniform from ball with radius r={r} around point p={p}") point = self.toRepresentation(p) uniformpoints = MetricSpaceEmbedding.uniformFromBall( self, point, r, npoints) elements = map(self.fromRepresentation, uniformpoints) return list( elements ) # Returns a list not map object. Do we want to change this? def toRepresentation(self, mapping): return self._simpleVec2Elem(mapping.to_list(channels=self.extra_dims)) def fromRepresentation(self, mapping): simple_vec = self._elem2SimpleVec(mapping) if self.ignore_channels: simple_vec = simple_vec[:self._split_d] mapping_obj = self.list_mapper.generate_mapping(simple_vec) return mapping_obj def _distance(self, x, y): return lp.p_norm(x - y, self.p) def distance(self, x, y): return self._distance(self.toRepresentation(x), self.toRepresentation(y)) def approximate(self, x): res = np.array(self.approx(x[:(self._d * self._k)])).flatten() return res def crossover(self, m1, m2, k): return self._crossover(self.toRepresentation(m1), self.toRepresentation(m2), k) def _crossover(self, m1, m2, k): assert len(m1) == len(m2) crossover_points = np.array(random.sample(range(self._d), k)) * self._k swap = False for i in range(len(m1)): if i in crossover_points: swap = not swap if swap: m1[i] = m2[i] m2[i] = m2[i] return m1, m2