def handle(self, args): """ Handles the multiprocess execution of the simulations. """ # Disable Logging During Multiprocess logger = logging.getLogger('cloudscope.simulation') logger.disabled = True # Load the experiments and their options experiments = self.get_experiments(args) # Open an output file for results if one isn't specified if args.output is None: path = "multisim-results-{}.json".format( time.strftime("%Y%m%d%H%M%S", time.localtime()) ) args.output = open(path, 'w+') # Save the output as a class property so it can be accessed by on_result self.output = args.output # Data structures for holding results self.deltas = [] self.errors = [] # Create a pool of processes and begin to execute experiments with Timer() as timer: pool = mp.Pool(processes=args.tasks) tasks = [ pool.apply_async(runner, (i+1,x), k, callback=self.on_result) for i, (x, k) in enumerate(experiments) ] # Close the pool and join pool.close() pool.join() # Compute duration duration = sum(self.deltas) # TIMER COMPLETE! # If traceback, dump the errors out. if args.traceback: for idx, error in enumerate(self.errors): banner = "="*36 print ("{}\nError #{}:\n{}\n\n{}\n").format( banner, idx+1, banner, error['traceback'] ) # Construct complete message for notification notice = ( "{} simulations ({} compute time, {} errors) run by {} tasks in {}\n" "Results written to {}" ).format( len(tasks), humanizedelta(seconds=duration) or "0 seconds", len(self.errors), args.tasks, timer, args.output.name ) self.notify(args.notify, notice, self.errors) return notice
def run(self): """ Reads in the accesses (which must be ordered by timestep) and updates the environment with delays and calls as required. Note this process is fundamentally different than the super class. """ # Read through accesses in an ordered fashion. # Note this will only generate accesses until they are exhausted. for trace in self.reader: # Update the state of the workload with the new access self.update(trace) # Timeout according to the wait on the trace and update clock wait = self.wait() yield self.env.timeout(wait) # Execute the access on the device access = self.access() assert access is not None # Log (debug) the access self.sim.logger.debug( "{} access by {} on {} (at {}) after {}".format( access, self.name, self.device, self.device.location, humanizedelta(milliseconds=wait)))
def run(self): """ The workload generating action that is correct for most subclasses, so long as they modify the update, wait, and access methods correctly. This method rountinely triggers accesses, updates the state of the workload, and logs the progress of the workload. """ while True: # Wait for the next access interval wait = self.wait() yield self.env.timeout(wait) # Trigger the access access = self.access() assert access is not None # Log (debug) the access self.sim.logger.debug( "{} access by {} on {} (at {}) after {}".format( access, self.name, self.device, self.location, humanizedelta(milliseconds=wait))) # Update the state of the workload self.update()
def run(self): """ The workload generating action that is correct for most subclasses, so long as they modify the update, wait, and access methods correctly. This method rountinely triggers accesses, updates the state of the workload, and logs the progress of the workload. """ while True: # Wait for the next access interval wait = self.wait() yield self.env.timeout(wait) # Trigger the access access = self.access() assert access is not None # Log (debug) the access self.sim.logger.debug( "{} access by {} on {} (at {}) after {}".format( access, self.name, self.device, self.location, humanizedelta(milliseconds=wait) ) ) # Update the state of the workload self.update()
def run(self): """ Reads in the accesses (which must be ordered by timestep) and updates the environment with delays and calls as required. Note this process is fundamentally different than the super class. """ # Read through accesses in an ordered fashion. # Note this will only generate accesses until they are exhausted. for trace in self.reader: # Update the state of the workload with the new access self.update(trace) # Timeout according to the wait on the trace and update clock wait = self.wait() yield self.env.timeout(wait) # Execute the access on the device access = self.access() assert access is not None # Log (debug) the access self.sim.logger.debug( "{} access by {} on {} (at {}) after {}".format( access, self.name, self.device, self.device.location, humanizedelta(milliseconds=wait) ) )
def handle(self, args): """ Handles the multiprocess execution of the simulations. """ # Disable Logging During Multiprocess logger = logging.getLogger('cloudscope.simulation') logger.disabled = True # Load the experiments and their options experiments = self.get_experiments(args) # Open an output file for results if one isn't specified if args.output is None: path = "multisim-results-{}.json".format( time.strftime("%Y%m%d%H%M%S", time.localtime())) args.output = open(path, 'w+') # Save the output as a class property so it can be accessed by on_result self.output = args.output # Data structures for holding results self.deltas = [] self.errors = [] # Create a pool of processes and begin to execute experiments with Timer() as timer: pool = mp.Pool(processes=args.tasks) tasks = [ pool.apply_async(runner, (i + 1, x), k, callback=self.on_result) for i, (x, k) in enumerate(experiments) ] # Close the pool and join pool.close() pool.join() # Compute duration duration = sum(self.deltas) # TIMER COMPLETE! # If traceback, dump the errors out. if args.traceback: for idx, error in enumerate(self.errors): banner = "=" * 36 print("{}\nError #{}:\n{}\n\n{}\n").format( banner, idx + 1, banner, error['traceback']) # Construct complete message for notification notice = ( "{} simulations ({} compute time, {} errors) run by {} tasks in {}\n" "Results written to {}").format( len(tasks), humanizedelta(seconds=duration) or "0 seconds", len(self.errors), args.tasks, timer, args.output.name) self.notify(args.notify, notice, self.errors) return notice
def write(self, fobj): """ Writes out a complete trace to the passed in file-like object. Returns the number of rows written the the file. """ # Counts for the trace being written. counts = Counter() replicas = defaultdict(Counter) max_time_step = 0 for idx, access in enumerate(self): # Count the number of rows counts['rows'] += 1 # Count the number of access types if access.method == READ: counts['reads'] += 1 if access.method == WRITE: counts['writes'] += 1 # Count the number of objects and replicas replicas[access.replica][access.object] += 1 # Determine the maximum timestep if int(access.timestep) > max_time_step: max_time_step = int(access.timestep) # Write the objec to disk fobj.write("\t".join(access) + "\n") # Update the counts with globals counts["objects"] = len(set([ key for replica in replicas.keys() for key in replicas[replica].keys() ])) counts["devices"] = len(replicas.keys()) counts["timesteps"] = max_time_step counts["realtime"] = humanizedelta(milliseconds=max_time_step) counts["mean_objects_per_device"] = int(mean([ len(objects.keys()) for objects in replicas.values() ])) counts["mean_accesses_per_device"] = int(mean([ sum(objects.values()) for objects in replicas.values() ])) counts["mean_accesses_per_object"] = int(mean([ count for objects in replicas.values() for count in objects.values() ])) counts["mean_devices_per_object"] = int(mean([ sum(1 if name in objects.keys() else 0 for objects in replicas.values()) for name in set([ key for values in replicas.values() for key in values.keys() ]) ])) return counts
def write(self, fobj): """ Writes out a complete trace to the passed in file-like object. Returns the number of rows written the the file. """ # Counts for the trace being written. counts = Counter() replicas = defaultdict(Counter) max_time_step = 0 for idx, access in enumerate(self): # Count the number of rows counts['rows'] += 1 # Count the number of access types if access.method == READ: counts['reads'] += 1 if access.method == WRITE: counts['writes'] += 1 # Count the number of objects and replicas replicas[access.replica][access.object] += 1 # Determine the maximum timestep if int(access.timestep) > max_time_step: max_time_step = int(access.timestep) # Write the objec to disk fobj.write("\t".join(access) + "\n") # Update the counts with globals counts["objects"] = len( set([ key for replica in replicas.keys() for key in replicas[replica].keys() ])) counts["devices"] = len(replicas.keys()) counts["timesteps"] = max_time_step counts["realtime"] = humanizedelta(milliseconds=max_time_step) counts["mean_objects_per_device"] = int( mean([len(objects.keys()) for objects in replicas.values()])) counts["mean_accesses_per_device"] = int( mean([sum(objects.values()) for objects in replicas.values()])) counts["mean_accesses_per_object"] = int( mean([ count for objects in replicas.values() for count in objects.values() ])) counts["mean_devices_per_object"] = int( mean([ sum(1 if name in objects.keys() else 0 for objects in replicas.values()) for name in set([ key for values in replicas.values() for key in values.keys() ]) ])) return counts
def run(self): """ Reads in outage events (must be ordered by timestep) and makes the connections specified either up or down according to the event state. """ # Track how many connections have outage events together. local_count = 1 # Read through the outage events in an ordered fashion. # Note this will only generate outages until they are exhaused. for event in self.reader: # Validate that the event occurs now or at the clock if event.timestep < self.clock: raise OutagesException( "Unordered outage event '{}' occured at time {}".format( event, self.clock ) ) # Compute delay in simulation and timeout # If the delay is not zero then wait in simulation time. delay = event.timestep - self.clock if delay == 0: local_count += 1 else: self.sim.logger.info( "{} connections {} for {}".format( local_count, event.state, humanizedelta(milliseconds=delay) ) ) local_count = 1 yield self.env.timeout(delay) # Update our internal clock self.clock = self.env.now self.count += 1 # Take the connection up or down depending on the event. conn = self.get_connnection(event.source, event.target) if event.state == ONLINE: conn.up() if event.state == OUTAGE: conn.down() self.sim.logger.debug( "{} is now {}".format(conn, event.state) )
def run(self): """ Reads in outage events (must be ordered by timestep) and makes the connections specified either up or down according to the event state. """ # Track how many connections have outage events together. local_count = 1 # Read through the outage events in an ordered fashion. # Note this will only generate outages until they are exhaused. for event in self.reader: # Validate that the event occurs now or at the clock if event.timestep < self.clock: raise OutagesException( "Unordered outage event '{}' occured at time {}".format( event, self.clock)) # Compute delay in simulation and timeout # If the delay is not zero then wait in simulation time. delay = event.timestep - self.clock if delay == 0: local_count += 1 else: self.sim.logger.info("{} connections {} for {}".format( local_count, event.state, humanizedelta(milliseconds=delay))) local_count = 1 yield self.env.timeout(delay) # Update our internal clock self.clock = self.env.now self.count += 1 # Take the connection up or down depending on the event. conn = self.get_connnection(event.source, event.target) if event.state == ONLINE: conn.up() if event.state == OUTAGE: conn.down() self.sim.logger.debug("{} is now {}".format(conn, event.state))
def run(self): """ The action that generates outages on the passed in set of connections. """ while True: # Get the duration of the current state duration = self.duration() # Log (info) the outage/online state and duration self.sim.logger.info("{} connections {} for {}".format( len(self.connections), self.state, humanizedelta(milliseconds=duration))) # Wait for the duration yield self.env.timeout(duration) # Update the state of the outage self.update()
def run(self): """ The action that generates outages on the passed in set of connections. """ while True: # Get the duration of the current state duration = self.duration() # Log (info) the outage/online state and duration self.sim.logger.info( "{} connections {} for {}".format( len(self.connections), self.state, humanizedelta(milliseconds=duration) ) ) # Wait for the duration yield self.env.timeout(duration) # Update the state of the outage self.update()
def serialize(self): return { 'started': self.started, 'finished': self.finished, 'elapsed': humanizedelta(seconds=self.elapsed), }
def __str__(self): return humanizedelta(seconds=self.elapsed)