def __do_process(self, comm, queue_idx): event = yield self.__env.process( self.__processor.submit( Processor.Task(comm.id + "-ACCESS", None, self._CPU_CYCLES_PER_DATA_ACCESS))) yield event ret_isp = False ret_length = comm.length if comm.req_isp: event = yield self.__env.process( self.__processor.submit( Processor.Task( comm.id + "-COMPUTE", None, comm.length * self._CPU_CYCLES_PER_KILOBYTE_COMPUTE, Processor.Task.PRIORITY_HIGH))) core_id = yield event ret_isp = True ret_length = comm.length * (1 - comm.selectivity) yield self.__env.process( self.__network_send(HostPlatform.ENDPOINT_NAME, DataPacket(comm.id, queue_idx, ret_length))) # place the CompletionCommand on the completion queue # of the same core comp_comm = CompletionCommand(id=comm.id, length=ret_length, req_isp=comm.req_isp, ret_isp=ret_isp, submit_timestamp=comm.timestamp) yield self.__completion_queues[queue_idx].put(comp_comm) if ret_isp: self.__logger.bind(by_core=core_id).trace(comp_comm)
def __process_command(self, queue_idx): while True: with self.completion_queues[queue_idx].get() as slot: comm = yield slot data_packet = self.read_cache(comm.id) if comm.req_isp and not comm.ret_isp: event = yield self.__env.process( self.__processor.submit( Processor.Task( comm.id + "-COMPUTE", queue_idx, data_packet.length * self._CPU_CYCLES_PER_KILOBYTE_COMPUTE, Processor.Task.PRIORITY_HIGH))) core_id = yield event assert core_id == queue_idx # nosec self.__logger.bind(by_core=core_id).trace(comm) self.summary["num_commands_completed"] += 1 if (self.summary["num_commands_submitted"] == self.summary["num_commands_completed"] and self.__all_command_submitted.processed): self.__shutdown_hook.succeed()
def __app(self, app_idx, comms_per_app, event_submitted): data_length = 4 * 1e3 # 4 MB data size for idx in range( self.__cryptogen.randrange(comms_per_app[1] - comms_per_app[0]) + comms_per_app[0]): queue_idx = self.__cryptogen.randrange(self.__processor.num_cores) comm_idx = "{:d}-{:d}-{:d}".format(app_idx, idx, queue_idx) # generate the workload at localhost event = yield self.__env.process( self.__processor.submit( Processor.Task( comm_idx + "-GENERATE", queue_idx, data_length * self._CPU_CYCLES_PER_KILOBYTE_GENERATE))) yield event # receive the workload by remote hosts # yield self.__env.timeout(data_length * # self._RECEIVE_SUBMISSION_COMM_DELAY_PER_KILOBYTE) submit_comm = SubmissionCommand(comm_idx, data_length, True, 0.5, self.__env.now) yield self.submission_queues[queue_idx].put(submit_comm) self.summary["num_commands_submitted"] += 1 self.summary["total_data_bytes"] += data_length event_submitted.succeed()
def network_receive(self, data_packet): event = yield self.__env.process( self.__processor.submit( Processor.Task( data_packet.id + "-RECEIVE", data_packet.queue_idx, data_packet.length * self._CPU_CYCLES_PER_KILOBYTE_RECEIVE, Processor.Task.PRIORITY_HIGH))) return event
def __network_send(self, endpoint_name, data_packet): endpoint = self.__network_fabric.get_endpoint(endpoint_name) yield self.__env.process( self.__network_fabric.request(data_packet.length)) try: send_event = yield self.__env.process( self.__processor.submit( Processor.Task( data_packet.id + "-SEND", None, data_packet.length * self._CPU_CYCLES_PER_KILOBYTE_SEND, Processor.Task.PRIORITY_HIGH))) receive_event = yield self.__env.process( endpoint.network_receive(data_packet)) transmit_event = self.__env.process( self.__network_fabric.transmit(data_packet.length)) yield send_event & receive_event & transmit_event finally: endpoint.memorize(data_packet.id, data_packet) yield self.__env.process( self.__network_fabric.release(data_packet.length))