def run_test(self, settings, output_path, print_datafile_loc=False): settings = settings.copy() settings.load_test() res = resultset.new(settings) if settings.LOG_FILE is _LOG_DEFER: settings.LOG_FILE = res.dump_filename.replace(res.SUFFIX, ".log") loggers.setup_logfile(settings.LOG_FILE) record_metadata(res, settings.EXTENDED_METADATA, settings.REMOTE_METADATA) if not settings.HOSTS: raise RuntimeError("Must specify host (-H option).") logger.info("Starting %s test. Expected run time: %d seconds.", settings.NAME, settings.TOTAL_LENGTH) self.agg = aggregators.new(settings) res = self.agg.postprocess(self.agg.aggregate(res)) if self.killed: return record_postrun_metadata(res, settings.EXTENDED_METADATA, settings.REMOTE_METADATA) res.dump_dir(output_path) logger.log(loggers.INFO if print_datafile_loc else loggers.DEBUG, "Data file written to %s.", res.dump_filename) formatter = formatters.new(settings) formatter.format([res]) self.tests_run += 1
def run_batch(self, batch_name): if batch_name not in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): logger.info(" Batch marked as abstract. Not running.") return False elif batch.get('disabled', False) or not batch.get('enabled', True): logger.info(" Batch disabled.") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and \ os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [ f for f in filenames if f.endswith(resultset.SUFFIX) ] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") try: self.settings.BATCH_UUID = r.meta("BATCH_UUID") logger.info(" Using previous UUID %s.\n", self.settings.BATCH_UUID) except KeyError: pass break if batch_time is None: raise RuntimeError( "No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME filenames_seen = set() for b, settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix( [os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) \ != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a " "subdirectory of resume path. Bailing.") if os.path.exists( os.path.join( output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): logger.info(" Previous result exists, skipping.") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would output to: %s.\n", output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError( "Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.logfile = loggers.setup_logfile(os.path.join( output_path, "%s.log" % settings.DATA_FILENAME), level=loggers.INFO, replay=False) if b.get('debug_log', False): self.logfile_debug = loggers.setup_logfile( os.path.join(output_path, "%s.debug.log" % settings.DATA_FILENAME), level=loggers.DEBUG, maxlevel=loggers.DEBUG, replay=False) if settings.DATA_FILENAME in filenames_seen: logger.warning("Filename already seen in this run: %s", settings.DATA_FILENAME) filenames_seen.add(settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: logger.info(" Would run test '%s'.", settings.NAME) else: logger.info(" Running test '%s'.", settings.NAME) logger.info(" data_filename=%s", settings.DATA_FILENAME) for k in sorted(b.keys()): if k.upper() in SETTINGS_PARSER: logger.info(" %s=%s", k, b[k]) if settings.BATCH_DRY: self.tests_run += 1 else: # Load test again with informational=False to enable host # lookups and other actions that may fail settings.load_test(informational=False) self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running test: %s", str(e)) else: try: self.run_commands(commands, 'post') except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running post-commands: %s", str(e)) finally: self.kill_children() if self.logfile: loggers.remove_log_handler(self.logfile) self.logfile.close() self.logfile = None if self.logfile_debug: loggers.remove_log_handler(self.logfile_debug) self.logfile_debug.close() self.logfile_debug = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would sleep for %d seconds.", pause) elif not settings.BATCH_DRY: time.sleep(pause)
def __call__(self, parser, namespace, values, option_string=None): if values is None: setattr(namespace, self.dest, _LOG_DEFER) else: loggers.setup_logfile(values) setattr(namespace, self.dest, values)
def __call__(self, parser, namespace, values, option_string=None): loggers.setup_logfile(values) setattr(namespace, self.dest, values)
def run_batch(self, batch_name): if batch_name not in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): logger.info(" Batch marked as abstract. Not running.") return False elif batch.get('disabled', False) or not batch.get('enabled', True): logger.info(" Batch disabled.") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and \ os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [f for f in filenames if f.endswith(resultset.SUFFIX)] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") try: self.settings.BATCH_UUID = r.meta("BATCH_UUID") logger.info(" Using previous UUID %s.\n", self.settings.BATCH_UUID) except KeyError: pass break if batch_time is None: raise RuntimeError("No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME filenames_seen = set() for b, settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix( [os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) \ != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a " "subdirectory of resume path. Bailing.") if os.path.exists(os.path.join(output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): logger.info(" Previous result exists, skipping.") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would output to: %s.\n", output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError("Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.logfile = loggers.setup_logfile( os.path.join(output_path, "%s.log" % settings.DATA_FILENAME), level=loggers.INFO, replay=False) if b.get('debug_log', False): self.logfile_debug = loggers.setup_logfile( os.path.join(output_path, "%s.debug.log" % settings.DATA_FILENAME), level=loggers.DEBUG, maxlevel=loggers.DEBUG, replay=False) if settings.DATA_FILENAME in filenames_seen: logger.warning("Filename already seen in this run: %s", settings.DATA_FILENAME) filenames_seen.add(settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: logger.info(" Would run test '%s'.", settings.NAME) else: logger.info(" Running test '%s'.", settings.NAME) logger.info(" data_filename=%s", settings.DATA_FILENAME) for k in sorted(b.keys()): if k.upper() in SETTINGS_PARSER: logger.info(" %s=%s", k, b[k]) if settings.BATCH_DRY: self.tests_run += 1 else: # Load test again with informational=False to enable host # lookups and other actions that may fail settings.load_test(informational=False) self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running test: %s", str(e)) else: try: self.run_commands(commands, 'post') except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running post-commands: %s", str(e)) finally: self.kill_children() if self.logfile: loggers.remove_log_handler(self.logfile) self.logfile.close() self.logfile = None if self.logfile_debug: loggers.remove_log_handler(self.logfile_debug) self.logfile_debug.close() self.logfile_debug = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would sleep for %d seconds.", pause) elif not settings.BATCH_DRY: time.sleep(pause)