def get_trace(self, process): if process == "a": yield ComputeSegment( processor_cycles={"proc_type_0": 1200, "proc_type_1": 1000} ) yield WriteTokenSegment(channel="c", num_tokens=2) elif process == "b": yield ComputeSegment( processor_cycles={"proc_type_0": 1200, "proc_type_1": 1000} ) yield ReadTokenSegment(channel="c", num_tokens=2) else: raise RuntimeError(f"Unexpected process name ({process})")
def get_trace(self, process): """Get the trace for a specific process/actor in the dataflow app Args: process (str): Name of the process to get a trace for Yields: ComputeSegment: if the next segment is a compute segment ReadTokenSegment: if the next segment is a read segment WriteTokenSegment: if the next segment is a write segment """ # use an exit stack to keep track of all files we open with contextlib.ExitStack() as stack: # open all matching trace files for the different processor types processor_types, trace_files = self._open_trace_files( stack, process) # iterate over all the lines in all the files simultaneously for lines in zip(*trace_files): log.debug(f"reading next trace lines for process {process}") # check if we received enough lines if len(lines) != len(trace_files): raise RuntimeError( f"The trace files for process {process} do not match!") marker = self._get_element(lines, 0) if marker == "m": yield ComputeSegment( self._get_processor_cycles(processor_types, lines, 2)) elif marker == "r": yield ComputeSegment( self._get_processor_cycles(processor_types, lines, 4)) yield ReadTokenSegment( channel=self._get_element(lines, 1), num_tokens=int(self._get_element(lines, 3)), ) elif marker == "w": yield ComputeSegment( self._get_processor_cycles(processor_types, lines, 3)) yield WriteTokenSegment( channel=self._get_element(lines, 1), num_tokens=int(self._get_element(lines, 2)), ) elif marker == "e": return else: raise RuntimeError("Encountered an unknown line marker!")
def get_trace(self, process): """Get the trace for a specific actor in the SDF3 application Args: process (str): Name of the actor to get a trace for Yields: ComputeSegment, ReadTokenSegment, or WriteTokenSegment: The next segment in the process trace """ firings = self._firing_rules[process] # place all initial tokens (also called delays) for channel, count in firings.initial_writes.items(): yield WriteTokenSegment(channel=channel, num_tokens=count) total_reps = self._repetition_vector[process] * self._repetitions for _ in range(0, total_reps): # read tokens for channel, count in firings.reads.items(): yield ReadTokenSegment(channel=channel, num_tokens=count) # compute yield ComputeSegment( processor_cycles=self._actor_processor_cycles[process]) # write tokens for channel, count in firings.writes.items(): yield WriteTokenSegment(channel=channel, num_tokens=count)
def get_trace(self, proc_name): for _ in range(0, self.max_length): processor_cycles = {} for core in self.cores: processor_cycles[core] = self.lookup((core, proc_name)) yield ComputeSegment(processor_cycles)
def get_trace(self, process): if process == "foo": yield ReadTokenSegment(None, None) yield WriteTokenSegment(None, None) if process == "bar": yield ReadTokenSegment(None, None) yield ComputeSegment({"A": 100, "B": 1000}) yield WriteTokenSegment(None, None) yield ComputeSegment({"A": 50, "B": 500}) yield ComputeSegment({"A": 30, "B": 300}) yield ReadTokenSegment(None, None) yield ComputeSegment({"A": 20, "B": 200}) yield WriteTokenSegment(None, None) if process == "baz": yield ReadTokenSegment(None, None) yield ComputeSegment({"A": 100, "B": 1000}) yield WriteTokenSegment(None, None)
def get_trace(self, process): """Get the trace for a specific task in the TGFF graph Args: process (str): Name of the task to get a trace for Yields: ComputeSegment, ReadTokenSegment, or WriteTokenSegment: The next segment in the process trace """ task_name = process if task_name not in self._tgff_graph.tasks: raise RuntimeError(f"Unknown task! ({process})") # prepare a dict of computation cycles for all processor types processor_cycles = {} for processor in self._processor_list.values(): processor_cycles[processor.type] = processor.get_operation( self._tgff_graph.tasks[task_name]) # iterate over all repetitions for _ in range(0, self._repetitions): # First, the task reads from all input channels for channel_name, properties in self._tgff_graph.channels.items(): # properties[1] is the name of the channel's sink task # FIXME: This mechanism should be simplified or the variable # named property if task_name == properties[1]: yield ReadTokenSegment(channel=channel_name, num_tokens=1) # Then, it computes yield ComputeSegment(processor_cycles) # Finally, it writes to all output channels for channel_name, properties in self._tgff_graph.channels.items(): # properties[0] is the name of the channel's source task # FIXME: This mechanism should be simplified or the variable # named property if task_name == properties[0]: yield WriteTokenSegment(channel=channel_name, num_tokens=1)
def preemption_trace_generator(self): yield ComputeSegment({"Test": 10, "Test2": 20})
def write_trace_generator(self): for i in range(1, 6): yield ComputeSegment(processor_cycles={"Test": i}) yield WriteTokenSegment(channel="chan", num_tokens=1)
def initial_read_trace_generator(self): for i in range(1, 6): yield ReadTokenSegment(channel="chan", num_tokens=1) yield ComputeSegment(processor_cycles={"Test": i})
def processing_trace_generator(self): for i in range(1, 6): yield ComputeSegment({"Test": i})