def to_mocasin_processor(self): """Returns a mocasin Processor object, equivalent to the TgffProcessor object. :returns: equivalent processor object :rtype: Processor """ frequency_domain = FrequencyDomain("fd{0}".format(self.name), math.ceil(1 / self.cycle_time)) return Processor(self.name, self.type, frequency_domain, None)
def addPeCluster( self, identifier, name, amount, frequency, static_power=None, dynamic_power=None, ): """Creates a new cluster of processing elements on the platform. :param identifier: The identifier the cluster can be addressed within the currently active scope. :type identifier: int :param name: The name of the processing elements. :type name: string :param amount: The amount of processing elements in the cluster. :type amount: int :param frequency: The frequency of the processing elements. :type frequency: int :param static_power: The static power of the processing elements. :type static_power: float :param dynamic_power: The dynamic power of the processing elements. :type dynamic_power: float """ log.warning( "Deprecationg warning: use addPEClusterForProcessor instead." ) try: fd = FrequencyDomain("fd_" + name, frequency) if static_power is not None and dynamic_power is not None: ppm = ProcessorPowerModel( "ppm_" + name, static_power, dynamic_power ) else: ppm = None start = self.__peAmount end = self.__peAmount + amount processors = [] for i in range(start, end): processor = Processor("PE%02d" % i, name, fd, ppm) self.__platform.add_processor(processor) self.__platform.add_scheduler( Scheduler( "sched%02d" % i, [processor], self.__schedulingPolicy ) ) processors.append((processor, [])) self.__peAmount += 1 self.__elementDict[self.__activeScope].update( {identifier: processors} ) except: log.error("Exception caught: " + sys.exc_info()[0]) if self.__symLibrary: self._ag_addCluster(identifier, name, amount, processors)
def addPeClusterForProcessor( self, identifier, processor, amount, processor_names=None ): """Creates a new cluster of processing elements on the platform. :param identifier: The identifier the cluster can be addressed within the currently active scope. :type identifier: int :param processor: The mocasin Processor object which will be used for the cluster. :type processor: Processor :param amount: The amount of processing elements in the cluster. :type amount: int :param processor_names: The names of the processors. :type amount: list of strings """ try: start = self.__peAmount end = self.__peAmount + amount processors = [] for i in range(amount): pe_counter = start + i # copy the input processor since a single processor can only be added once if processor_names is not None: name = processor_names[i] else: name = f"processor_{self.__peAmount:04d}" new_processor = Processor( name, processor.type, processor.frequency_domain, processor.power_model, processor.context_load_cycles, processor.context_store_cycles, ) self.__platform.add_processor(new_processor) self.__platform.add_scheduler( Scheduler( f"sched_{name}", [new_processor], self.__schedulingPolicy, ) ) processors.append((new_processor, [])) self.__peAmount += 1 self.__elementDict[self.__activeScope].update( {identifier: processors} ) except: log.error("Exception caught: " + str(sys.exc_info()[0])) if self.__symLibrary: self._ag_addCluster(identifier, processor.name, amount, processors)
def platform(num_procs, mocker): p = Platform("platform") procs = [] for i in range(num_procs): proc = Processor(("processor" + str(i)), "proctype", mocker.Mock(), mocker.Mock()) procs.append(proc) p.add_processor(proc) policy = mocker.Mock() sched = Scheduler("name", procs, policy) p.add_scheduler(sched) return p
def __init__(self, name, num_processors, symmetries_json=None, embedding_json=None): """Initialize the platform Generate `num_processors` processors and schedulers and connect them to a global shared bus. This also create a global RAM object and connects it to the bus. Based on this bus setup, the primitive for communication via shared RAM is generated. """ super().__init__(name, symmetries_json, embedding_json) fd_pes = FrequencyDomain("fd_pes", 500000000) fd_ram = FrequencyDomain("fd_ram", 100000000) ram = Storage("RAM", fd_ram, 5, 7, 16, 12) self.add_communication_resource(ram) bus = Bus("bus", fd_pes, 1, 8) self.add_communication_resource(bus) bus.connect_storage(ram) policy = SchedulingPolicy("FIFO", 100) for i in range(0, num_processors): name = "PE%02d" % (i) processor = Processor(name, "RISC", fd_pes, 100, 100) bus.connect_processor(processor) self.add_processor(processor) self.add_scheduler(Scheduler("sp_" + name, [processor], policy)) primitives = primitives_from_buses([bus]) for p in primitives: self.add_primitive(p)
def convert( platform, xml_platform, scheduler_cycles=None, fd_frequencies=None, ppm_power=None, ): # keep a map of scheduler names to processors, this helps when creating # scheduler objects schedulers_to_processors = {} for s in xml_platform.get_Scheduler(): schedulers_to_processors[s.get_id()] = [] # Check the fd_frequencies defined the frequencies of only known domains if fd_frequencies is not None: fd_names = map(lambda x: x.get_id(), xml_platform.get_FrequencyDomain()) for fd in fd_frequencies: if fd not in fd_names: log.warning(f"The fd_frequencies defines the frequency of " f"an unknown domain {fd}") # Collect all frequency and voltage domains voltage_domains = {} for vd in xml_platform.get_VoltageDomain(): name = vd.get_id() voltage_domains[name] = [] for v in vd.get_Voltage(): voltage = ur(v.get_value() + v.get_unit()).to("V").magnitude voltage_domains[name].append(voltage) frequency_domains = {} # We do not save voltage domains by their names defined in MAPS XML files, # instead we save under the same names as frequency domains fd_voltage_cond = {} for fd in xml_platform.get_FrequencyDomain(): name = fd.get_id() max_frequency = 0 supported_frequency_voltage_pairs = [] for f in fd.get_Frequency(): voltage_domain_conds = f.get_VoltageDomainCondition() if len(voltage_domain_conds) > 1: raise RuntimeError( f"The xml defines multiple voltages for the frequency domain " f"{name} at {f.get_value()}{f.get_unit()}.") voltage_cond = None for v in voltage_domain_conds: voltage_cond = (ur(v.get_value() + v.get_unit()).to("V").magnitude) frequency = ur(f.get_value() + f.get_unit()).to("Hz").magnitude supported_frequency_voltage_pairs.append( tuple((frequency, voltage_cond))) if fd_frequencies is not None and name in fd_frequencies: frequency = fd_frequencies[name] voltage_cond = None found = False for f, v in supported_frequency_voltage_pairs: if frequency != f: continue voltage_cond = v found = True if not found: log.warning( f"The fd_frequencies sets the frequency of the domain {name} " f"to {frequency} Hz, which is not defined in the xml. ") else: frequency, voltage_cond = max(supported_frequency_voltage_pairs) if len(fd.get_Frequency()) > 1: log.warning( "The xml defines multiple frequencies for the domain " "%s. -> Select Maximum", name, ) frequency_domains[name] = FrequencyDomain(name, frequency) fd_voltage_cond[name] = voltage_cond log.debug( "Found frequency domain %s (%d Hz).", name, frequency, ) # Collect processor power model parameters processor_power_params = {} for ppm in xml_platform.get_ProcessorPowerModel(): name = ppm.get_id() leakage_current = (ur(ppm.get_leakageCurrentValue() + ppm.get_leakageCurrentUnit()).to("A").magnitude) switched_capacitance = ( ur(ppm.get_switchedCapacitanceValue() + ppm.get_switchedCapacitanceUnit()).to("F").magnitude) processor_power_params[name] = { "leakage_current": leakage_current, "switched_capacitance": switched_capacitance, } # Check custom power if ppm_power is not None: for ppm, powers in ppm_power.items(): if ppm not in processor_power_params: log.warning(f"The ppm_power defines the power of " f"an unknown processor power model {ppm}") continue for power_mode in ["static", "dynamic"]: if power_mode not in powers: log.warning( f"The ppm_power does not define the {power_mode} power of " f"a processor power model {ppm}") processor_power_models = {} # Initialize all Processors for xp in xml_platform.get_Processor(): name = xp.get_id() type = xp.get_core() fd_name = xp.get_frequencyDomain() fd = frequency_domains[fd_name] # Initialize a processor power model vd_name = xp.get_voltageDomain() ppm_name = xp.get_processorPowerModel() if ppm_name is None: ppm = None else: if ppm_name not in processor_power_models: ppm = create_processor_power_model( fd_name, vd_name, ppm_name, frequency_domains, voltage_domains, fd_voltage_cond, processor_power_params, ppm_power, ) processor_power_models[ppm_name] = ppm else: ppm = processor_power_models[ppm_name] context_load = get_value_in_cycles(xp, "contextLoad", 0) context_store = get_value_in_cycles(xp, "contextStore", 0) p = Processor(name, type, fd, ppm, context_load, context_store) schedulers_to_processors[xp.get_scheduler()].append(p) platform.add_processor(p) log.debug("Found processor %s of type %s", name, type) # Initialize all Schedulers for xs in xml_platform.get_Scheduler(): name = xs.get_id() policy = create_policy(xml_platform, xs.get_schedulingPolicyList(), scheduler_cycles) s = Scheduler(name, schedulers_to_processors[name], policy) log.debug( "Found scheduler %s for %s supporting %s", name, schedulers_to_processors[name], policy.name, ) platform.add_scheduler(s) # Initialize all Memories, Caches, and Fifos as CommunicationResources for xm in xml_platform.get_Memory(): name = xm.get_id() read_latency = get_value_in_cycles(xm, "readLatency", 0) write_latency = get_value_in_cycles(xm, "writeLatency", 0) read_throughput = get_value_in_byte_per_cycle(xm, "readThroughput", float("inf")) write_throughput = get_value_in_byte_per_cycle(xm, "writeThroughput", float("inf")) fd = frequency_domains[xm.get_frequencyDomain()] mem = Storage( name, fd, read_latency, write_latency, read_throughput, write_throughput, ) platform.add_communication_resource(mem) log.debug("Found memory %s", name) for xc in xml_platform.get_Cache(): name = xc.get_id() # XXX we assume 100% cache hit rate read_latency = get_value_in_cycles(xc, "readHitLatency", 0) write_latency = get_value_in_cycles(xc, "writeHitLatency", 0) read_throughput = get_value_in_byte_per_cycle(xc, "readHitThroughput", float("inf")) write_throughput = get_value_in_byte_per_cycle(xc, "writeHitThroughput", float("inf")) fd = frequency_domains[xc.get_frequencyDomain()] cache = Storage( name, fd, read_latency, write_latency, read_throughput, write_throughput, ) platform.add_communication_resource(cache) log.debug("Found cache %s", name) for xf in xml_platform.get_Fifo(): name = xf.get_id() read_latency = get_value_in_cycles(xf, "readLatency", 0) write_latency = get_value_in_cycles(xf, "writeLatency", 0) read_throughput = get_value_in_byte_per_cycle(xf, "readThroughput", float("inf")) write_throughput = get_value_in_byte_per_cycle(xf, "writeThroughput", float("inf")) fd = frequency_domains[xf.get_frequencyDomain()] fifo = Storage( name, fd, read_latency, write_latency, read_throughput, write_throughput, ) platform.add_communication_resource(fifo) log.debug("Found FIFO %s", name) # We also need to collect all the physical links, logical links and dma # controllers # modified by Felix Teweleit 10.08.2018 for ll in xml_platform.get_PhysicalLink(): name = ll.get_id() latency = get_value_in_cycles(ll, "latency", 0) throughput = get_value_in_byte_per_cycle(ll, "throughput", float("inf")) fd = frequency_domains[ll.get_frequencyDomain()] link = CommunicationResource( name, fd, CommunicationResourceType.PhysicalLink, latency, latency, throughput, throughput, ) platform.add_communication_resource(link) log.debug("Found link or DMA %s", name) for ll in xml_platform.get_LogicalLink(): name = ll.get_id() latency = get_value_in_cycles(ll, "latency", 0) throughput = get_value_in_byte_per_cycle(ll, "throughput", float("inf")) fd = frequency_domains[ll.get_frequencyDomain()] link = CommunicationResource( name, fd, CommunicationResourceType.LogicalLink, latency, latency, throughput, throughput, ) platform.add_communication_resource(link) log.debug("Found link or DMA %s", name) for ll in xml_platform.get_DMAController(): name = ll.get_id() latency = get_value_in_cycles(ll, "latency", 0) throughput = get_value_in_byte_per_cycle(ll, "throughput", float("inf")) fd = frequency_domains[ll.get_frequencyDomain()] link = CommunicationResource( name, fd, CommunicationResourceType.DMAController, latency, latency, throughput, throughput, ) platform.add_communication_resource(link) log.debug("Found link or DMA %s", name) # end of modified code # Initialize all Communication Primitives for xcom in xml_platform.get_Communication(): name = xcom.get_id() producers = {} consumers = {} # Read the Producers for xp in xcom.get_Producer(): pn = xp.get_processor() # TODO implement passive producing costs if xp.get_Passive() is not None: log.warning( "Passive producing costs are not supported" " -> ignore passive phase of primitive %s", name, ) # We create a single phase for each producer active = CommunicationPhase( "Produce Active", resources_from_access(xp.get_Active(), platform), "write", ) producers[pn] = [active] # Read the Consumers for xc in xcom.get_Consumer(): cn = xc.get_processor() # TODO implement passive producing costs if xc.get_Passive() is not None: log.warning( "Passive consuming costs are not supported" " -> ignore passive phase of primitive %s", name, ) # We create a single phase for each producer active = CommunicationPhase( "Consume Active", resources_from_access(xc.get_Active(), platform), "read", ) consumers[cn] = [active] # Create a Primitive for each combination of producer and consumer primitive = Primitive(name) for pn in producers: primitive.add_producer(platform.find_processor(pn), producers[pn]) for cn in consumers: primitive.add_consumer(platform.find_processor(cn), consumers[cn]) log.debug("Found the communication primitive %s: %s -> %s" % (name, str(producers.keys()), str(consumers.keys()))) platform.add_primitive(primitive)
def __init__(self): super(Exynos2Chips, self).__init__("Exynos2Chips") # Frequency domains fd_a7 = FrequencyDomain("fd_a7", 1400000000) fd_a15 = FrequencyDomain("fd_a15", 2000000000) fd_l3 = FrequencyDomain("fd_l3", 1400000000) fd_ram = FrequencyDomain("fd_ram", 933000000) # Processors for i in range(0, 4): self.add_processor(Processor("PE%02d" % i, "ARM_CORTEX_A7", fd_a7)) for i in range(4, 8): self.add_processor( Processor("PE%02d" % i, "ARM_CORTEX_A15", fd_a15)) for i in range(8, 12): self.add_processor(Processor("PE%02d" % i, "ARM_CORTEX_A7", fd_a7)) for i in range(12, 16): self.add_processor( Processor("PE%02d" % i, "ARM_CORTEX_A15", fd_a15)) # Schedulers sp = SchedulingPolicy("FIFO", 1000) for i in range(0, 16): self.add_scheduler( Scheduler("sched%02d" % i, [self.find_processor("PE%02d" % i)], [sp])) # L1 Caches for i in range(0, 4): self.add_communication_resource( Storage( "L1_PE%02d" % i, fd_a7, read_latency=1, write_latency=4, read_throughput=8, write_throughput=8, )) for i in range(4, 8): self.add_communication_resource( Storage( "L1_PE%02d" % i, fd_a15, read_latency=1, write_latency=4, read_throughput=8, write_throughput=8, )) for i in range(8, 12): self.add_communication_resource( Storage( "L1_PE%02d" % i, fd_a7, read_latency=1, write_latency=4, read_throughput=8, write_throughput=8, )) for i in range(12, 16): self.add_communication_resource( Storage( "L1_PE%02d" % i, fd_a15, read_latency=1, write_latency=4, read_throughput=8, write_throughput=8, )) # L1 Primitives for i in range(0, 16): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1], "write") consume = CommunicationPhase("consume", [l1], "read") prim = Primitive("prim_L1_PE%02d" % i) prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) # L2 Caches l2_C0_A7 = Storage( "L2_C0_A7", fd_a7, read_latency=16, write_latency=21, read_throughput=8, write_throughput=8, ) l2_C0_A15 = Storage( "L2_C0_A15", fd_a15, read_latency=16, write_latency=21, read_throughput=8, write_throughput=8, ) l2_C1_A7 = Storage( "L2_C1_A7", fd_a7, read_latency=16, write_latency=21, read_throughput=8, write_throughput=8, ) l2_C1_A15 = Storage( "L2_C1_A15", fd_a15, read_latency=16, write_latency=21, read_throughput=8, write_throughput=8, ) self.add_communication_resource(l2_C0_A7) self.add_communication_resource(l2_C0_A15) self.add_communication_resource(l2_C1_A7) self.add_communication_resource(l2_C1_A15) # L2 Primitives prim = Primitive("prim_L2_C0_A7") for i in range(0, 4): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A7], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A7], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) prim = Primitive("prim_L2_C0_A15") for i in range(4, 8): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A15], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A15], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) prim = Primitive("prim_L2_C1_A7") for i in range(8, 12): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A7], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A7], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) prim = Primitive("prim_L2_C1_A15") for i in range(12, 16): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A15], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A15], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) # L3 Caches l3_C0 = Storage( "L3_C0", fd_l3, read_latency=30, write_latency=21, read_throughput=8, write_throughput=8, ) l3_C1 = Storage( "L3_C1", fd_l3, read_latency=40, write_latency=21, read_throughput=8, write_throughput=8, ) self.add_communication_resource(l3_C0) self.add_communication_resource(l3_C1) # L3 Primitives prim = Primitive("prim_L3_C0") for i in range(0, 4): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A7, l3_C0], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A7, l3_C0], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) for i in range(4, 8): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A15, l3_C0], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A15, l3_C0], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) prim = Primitive("prim_L3_C1") for i in range(8, 12): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A7, l3_C1], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A7, l3_C1], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) for i in range(12, 16): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A15, l3_C1], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A15, l3_C1], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim) # RAM ram = Storage( "RAM", fd_ram, read_latency=120, write_latency=120, read_throughput=8, write_throughput=8, ) self.add_communication_resource(ram) prim = Primitive("prim_RAM") for i in range(0, 4): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A7, l3_C0, ram], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A7, l3_C0, ram], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) for i in range(4, 8): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C0_A15, l3_C0, ram], "write") consume = CommunicationPhase("consume", [l1, l2_C0_A15, l3_C0, ram], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) for i in range(8, 12): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A7, l3_C1, ram], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A7, l3_C1, ram], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) for i in range(12, 16): pe = self.find_processor("PE%02d" % i) l1 = self.find_communication_resource("L1_PE%02d" % i) produce = CommunicationPhase("produce", [l1, l2_C1_A15, l3_C1, ram], "write") consume = CommunicationPhase("consume", [l1, l2_C1_A15, l3_C1, ram], "read") prim.add_producer(pe, [produce]) prim.add_consumer(pe, [consume]) self.add_primitive(prim)
def __init__(self, name, num_clusters, cluster_size, hierarchy_levels): """Initialize the platform Generate a hierarchy of clusters connected by buses. On the lowest level, this generates a shared memory, `cluster_size` processors, and a bus. On the intermediate levels, this produces a shared memory and a bus that connects to all buses of lower level clusters. On the top level, this generates a bus as well as the global shared RAM and connects the bus to the lower level clusters. All in all this forms a tree with depth of `hierarchical_levels`. The root node hase `num_clusters` children and all other nodes have cluster_size chilren. The leafe nodes are processors and all other nodes are buses. :param str name: platform name :param int num_clusters: number of top-level clusters :param int cluster_size: number of nodes per cluster :param int hierarchy_levels: number of levels in the hierarchy """ super().__init__(name) if hierarchy_levels < 2: raise ValueError("Number of hierarchies needs to be at least 2") level = 0 # current level in the hierarchy pe_id = 0 # current pe id buses = {} # all buses policy = SchedulingPolicy("FIFO", 100) # start from the lowest level and generate all PEs buses[level] = [] num_l0_clusters = num_clusters * cluster_size**(hierarchy_levels - 2) for cluster_id in range(0, num_l0_clusters): cluster_name = "l%d_c%d" % (level, cluster_id) fd_pes = FrequencyDomain("fd_" + cluster_name, 500000000) bus = Bus("bus_" + cluster_name, fd_pes, 1, 8) buses[level].append(bus) self.add_communication_resource(bus) for j in range(0, cluster_size): name = "PE%02d" % (pe_id) pe_id += 1 processor = Processor(name, "RISC", fd_pes, 100, 100) bus.connect_processor(processor) self.add_processor(processor) self.add_scheduler( Scheduler("shared_" + name, [processor], policy)) mem = Storage("shared_" + cluster_name, fd_pes, 1, 1, 8, 8) self.add_communication_resource(mem) bus.connect_storage(mem) level += 1 # intermediate levels while level < hierarchy_levels - 1: buses[level] = [] bus_id = 0 num_lx_clusters = num_clusters * cluster_size**(hierarchy_levels - level - 2) for cluster_id in range(0, num_lx_clusters): cluster_name = "l%d_c%d" % (level, cluster_id) fd = FrequencyDomain("fd_" + cluster_name, 500000000) mem = Storage("shared_" + cluster_name, fd, 3, 4, 8, 8) self.add_communication_resource(mem) bus = Bus("bus_" + cluster_name, fd, 2, 8) self.add_communication_resource(bus) bus.connect_storage(mem) buses[level].append(bus) for i in range(0, cluster_size): bus.connect_master_bus(buses[level - 1][bus_id]) bus_id += 1 level += 1 # top level fd_top = FrequencyDomain("fd_l%d" % level, 250000000) bus_top = Bus("bus_l%d" % level, fd_top, 3, 8) buses[level] = [bus_top] self.add_communication_resource(bus_top) for b in buses[level - 1]: bus_top.connect_master_bus(b) fd_ram = FrequencyDomain("fd_ram", 100000000) ram = Storage("RAM", fd_ram, 5, 7, 16, 12) self.add_communication_resource(ram) bus_top.connect_storage(ram) bus_list = [] for l in range(0, hierarchy_levels): bus_list.extend(buses[l]) primitives = primitives_from_buses(bus_list) for p in primitives: self.add_primitive(p)