def load_input(self, settings): settings = settings.copy() results = [] test_name = None for filename in settings.INPUT: r = resultset.load(filename, settings.ABSOLUTE_TIME) if test_name is not None and test_name != r.meta( "NAME") and not settings.GUI: raise RuntimeError( "Result sets must be from same test (found %s/%s)" % (test_name, r.meta("NAME"))) test_name = r.meta("NAME") if results and settings.CONCATENATE: results[0].concatenate(r) else: results.append(r) if settings.GUI: load_gui(settings) settings.update(results[0].meta()) settings.load_test(informational=True) # Look for missing data series, and if they are computed from other # values, try to compute them. for res in results: settings.compute_missing_results(res) formatter = formatters.new(settings) formatter.format(results)
def load_input(self, settings): settings = settings.copy() results = [] test_name = None for i, filename in enumerate(settings.INPUT): r = resultset.load(filename, settings.ABSOLUTE_TIME) if test_name is not None and test_name != r.meta("NAME") and \ not settings.GUI: logger.warning( "Result sets are not from the same " "test (found %s/%s).", test_name, r.meta("NAME")) test_name = r.meta("NAME") if results and settings.CONCATENATE: results[0].concatenate(r) else: if len(settings.OVERRIDE_LABELS) > i: r.set_label(settings.OVERRIDE_LABELS[i]) results.append(r) settings.update(results[0].meta()) settings.load_test(informational=True) # Look for missing data series, and if they are computed from other # values, try to compute them. for res in results: settings.compute_missing_results(res) formatter = formatters.new(settings) formatter.format(results)
def runTest(self): r = resultset.load(self.filename) self.settings.update(r.meta()) self.settings.load_test(informational=True) self.settings.compute_missing_results(r) self.settings.FORMAT = 'metadata' for f in TEST_FORMATTERS: try: self.settings.FORMAT = f self.settings.OUTPUT = os.path.join(self.output_dir, "%s.txt" % f) formatter = formatters.new(self.settings) formatter.format([r]) res, _ = formatter.verify() if not res: raise self.failureException( "Verification of formatter '%s' failed" % f) except self.failureException: raise except Exception: tb = traceback.format_exc() new_exc = Exception("Error creating formatter '%s'" % f) new_exc.orig_tb = tb raise new_exc
def runTest(self): r = resultset.load(self.filename) self.settings.update(r.meta()) self.settings.load_test(informational=True) self.settings.compute_missing_results(r) self.settings.FORMAT = 'plot' for p in self.settings.PLOTS.keys(): try: self.settings.PLOT = p self.settings.OUTPUT = os.path.join( self.output_dir, "%s.%s" % (p, self.fmt)) formatter = formatters.new(self.settings) formatter.format([r]) res, plen = formatter.verify() if not res and p not in PLOTS_MAY_FAIL: raise self.failureException( "Verification of plot '%s' failed: %s" % (p, plen)) except self.failureException: raise except Exception as e: tb = traceback.format_exc() new_exc = Exception("Error creating plot '%s'" % p) new_exc.orig_tb = tb raise new_exc
def load(argv): # We parse the args twice - the first pass is just to get the test name and # the name of the rcfile to parse in order to get the defaults settings = parser.parse_args(argv, namespace=Settings(DEFAULT_SETTINGS)) parser.set_defaults(**{k: v for k, v in settings.load_rcfile().items() if getattr(settings, k) == parser.get_default(k)}) settings = parser.parse_args(argv, namespace=Settings(DEFAULT_SETTINGS)) settings.process_args() settings.update_implications() if settings.SCALE_DATA: scale_data = [] for filename in settings.SCALE_DATA: if filename in settings.INPUT: # Do not load input file twice - makes it easier to select a set # of files for plot scaling and supply each one to -i without # having to change the other command line options each time. continue r = resultset.load(filename) scale_data.append(r) settings.SCALE_DATA = scale_data settings.load_test(informational=True) if settings.LIST_PLOTS: list_plots(settings) logger.info("Started Flent %s using Python %s.", VERSION, sys.version.split()[0]) return settings
def load_input(self, settings): settings = settings.copy() results = [] test_name = None for i, filename in enumerate(settings.INPUT): r = resultset.load(filename, settings.ABSOLUTE_TIME) if test_name is not None and test_name != r.meta("NAME") and \ not settings.GUI: logger.warning("Result sets are not from the same " "test (found %s/%s).", test_name, r.meta("NAME")) test_name = r.meta("NAME") if results and settings.CONCATENATE: results[0].concatenate(r) else: if len(settings.OVERRIDE_LABELS) > i: r.set_label(settings.OVERRIDE_LABELS[i]) results.append(r) settings.update(results[0].meta()) settings.load_test(informational=True) # Look for missing data series, and if they are computed from other # values, try to compute them. for res in results: settings.compute_missing_results(res) formatter = formatters.new(settings) formatter.format(results)
def load(argv): (dummy,args) = parser.parse_args(argv, values=settings) if hasattr(settings, 'LIST_TESTS') and settings.LIST_TESTS: list_tests() for a in args: if os.path.exists(a): if settings.SCALE_MODE and settings.INPUT: settings.SCALE_DATA.append(a) else: settings.INPUT.append(a) else: settings.load_test_or_host(a) settings.load_rcfile() if settings.SCALE_DATA: scale_data = [] for filename in settings.SCALE_DATA: if filename in settings.INPUT: # Do not load input file twice - makes it easier to select a set # of files for plot scaling and supply each one to -i without # having to change the other command line options each time. continue r = resultset.load(filename) scale_data.append(r) settings.SCALE_DATA = scale_data settings.load_test(informational=True) if hasattr(settings, 'LIST_PLOTS') and settings.LIST_PLOTS: list_plots() return settings
def load_input(self, settings): settings = settings.copy() results = [] test_name = None for filename in settings.INPUT: r = resultset.load(filename, settings.ABSOLUTE_TIME) if test_name is not None and test_name != r.meta("NAME") and not settings.GUI: sys.stderr.write("Warning: result sets are not from the same test (found %s/%s).\n" % (test_name, r.meta("NAME"))) test_name = r.meta("NAME") if results and settings.CONCATENATE: results[0].concatenate(r) else: results.append(r) if settings.GUI: load_gui(settings) settings.update(results[0].meta()) settings.load_test(informational=True) # Look for missing data series, and if they are computed from other # values, try to compute them. for res in results: settings.compute_missing_results(res) formatter = formatters.new(settings) formatter.format(results)
def load(argv): (dummy, args) = parser.parse_args(argv, values=settings) if hasattr(settings, 'LIST_TESTS') and settings.LIST_TESTS: list_tests() for a in args: if os.path.exists(a): if settings.SCALE_MODE and settings.INPUT: settings.SCALE_DATA.append(a) else: settings.INPUT.append(a) else: settings.load_test_or_host(a) settings.load_rcfile() if settings.SCALE_DATA: scale_data = [] for filename in settings.SCALE_DATA: if filename in settings.INPUT: # Do not load input file twice - makes it easier to select a set # of files for plot scaling and supply each one to -i without # having to change the other command line options each time. continue r = resultset.load(filename) scale_data.append(r) settings.SCALE_DATA = scale_data settings.load_test(informational=True) if hasattr(settings, 'LIST_PLOTS') and settings.LIST_PLOTS: list_plots() return settings
def runTest(self): pool = Pool(initializer=initfunc) results = resultset.load(self.filename) self.settings.update(results.meta()) self.settings.load_test(informational=True) plotters.init_matplotlib("-", False, True) for p in self.settings.PLOTS.keys(): plot = pool.apply(plot_one, (self.settings, p, results)) if not plot.verify() and p not in PLOTS_MAY_FAIL: raise self.failureException( "Verification of plot '%s' failed" % p)
def runTest(self): r = resultset.load(self.filename) self.settings.update(r.meta()) self.settings.load_test(informational=True) self.settings.compute_missing_results(r) self.settings.FORMAT='plot' for p in self.settings.PLOTS.keys(): self.settings.PLOT = p self.settings.OUTPUT = os.path.join(self.output_dir, "%s.%s" % (p,self.fmt)) formatter = formatters.new(self.settings) formatter.format([r])
def runTest(self): results = resultset.load(self.filename) self.settings.update(results.meta()) self.settings.load_test(informational=True) with Pool() as pool: for p in self.settings.PLOTS.keys(): plot = pool.apply(plot_one, (self.settings, p, results)) res, plen = plot.verify() if not res and p not in PLOTS_MAY_FAIL: raise self.failureException( "Verification of plot '%s' failed: %s" % (p, plen))
def runTest(self): r = resultset.load(self.filename) self.settings.update(r.meta()) self.settings.load_test(informational=True) self.settings.compute_missing_results(r) self.settings.FORMAT = 'plot' for p in self.settings.PLOTS.keys(): self.settings.PLOT = p self.settings.OUTPUT = os.path.join(self.output_dir, "%s.%s" % (p, self.fmt)) formatter = formatters.new(self.settings) formatter.format([r])
def runTest(self): r = resultset.load(self.filename) self.settings.update(r.meta()) self.settings.load_test(informational=True) self.settings.compute_missing_results(r) self.settings.FORMAT = 'plot' for p in self.settings.PLOTS.keys(): try: self.settings.PLOT = p self.settings.OUTPUT = os.path.join( self.output_dir, "%s.%s" % (p, self.fmt)) formatter = formatters.new(self.settings) formatter.format([r]) if not formatter.verify() and p not in PLOTS_MAY_FAIL: raise self.failureException( "Verification of plot '%s' failed" % p) except self.failureException: raise except Exception as e: tb = traceback.format_exc() new_exc = Exception("Error creating plot '%s'" % p) new_exc.orig_tb = tb raise new_exc
def run_batch(self, batch_name): if batch_name not in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): logger.info(" Batch marked as abstract. Not running.") return False elif batch.get('disabled', False) or not batch.get('enabled', True): logger.info(" Batch disabled.") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and \ os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [ f for f in filenames if f.endswith(resultset.SUFFIX) ] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") try: self.settings.BATCH_UUID = r.meta("BATCH_UUID") logger.info(" Using previous UUID %s.\n", self.settings.BATCH_UUID) except KeyError: pass break if batch_time is None: raise RuntimeError( "No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME filenames_seen = set() for b, settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix( [os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) \ != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a " "subdirectory of resume path. Bailing.") if os.path.exists( os.path.join( output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): logger.info(" Previous result exists, skipping.") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would output to: %s.\n", output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError( "Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.logfile = loggers.setup_logfile(os.path.join( output_path, "%s.log" % settings.DATA_FILENAME), level=loggers.INFO, replay=False) if b.get('debug_log', False): self.logfile_debug = loggers.setup_logfile( os.path.join(output_path, "%s.debug.log" % settings.DATA_FILENAME), level=loggers.DEBUG, maxlevel=loggers.DEBUG, replay=False) if settings.DATA_FILENAME in filenames_seen: logger.warning("Filename already seen in this run: %s", settings.DATA_FILENAME) filenames_seen.add(settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: logger.info(" Would run test '%s'.", settings.NAME) else: logger.info(" Running test '%s'.", settings.NAME) logger.info(" data_filename=%s", settings.DATA_FILENAME) for k in sorted(b.keys()): if k.upper() in SETTINGS_PARSER: logger.info(" %s=%s", k, b[k]) if settings.BATCH_DRY: self.tests_run += 1 else: # Load test again with informational=False to enable host # lookups and other actions that may fail settings.load_test(informational=False) self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running test: %s", str(e)) else: try: self.run_commands(commands, 'post') except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running post-commands: %s", str(e)) finally: self.kill_children() if self.logfile: loggers.remove_log_handler(self.logfile) self.logfile.close() self.logfile = None if self.logfile_debug: loggers.remove_log_handler(self.logfile_debug) self.logfile_debug.close() self.logfile_debug = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would sleep for %d seconds.", pause) elif not settings.BATCH_DRY: time.sleep(pause)
def run_batch(self, batch_name): if not batch_name in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): sys.stderr.write(" Batch marked as abstract. Not running.\n") return False elif batch.get('disabled', False) or not batch.get('enabled', True): sys.stderr.write(" Batch disabled.\n") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and os.path.isdir( self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [ f for f in filenames if f.endswith(resultset.SUFFIX) ] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") break if batch_time is None: raise RuntimeError( "No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME for b, settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix([ os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME) ]) != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError( "Batch-specified output path is not a subdirectory of resume path. Bailing." ) if os.path.exists( os.path.join( output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): sys.stderr.write(" Previous result exists, skipping.\n") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would output to: %s.\n" % output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError( "Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.log_fd = io.open( os.path.join(output_path, "%s.log" % settings.DATA_FILENAME), "at") if b.get('debug_log', False): settings.LOG_FILE = os.path.join( output_path, "%s.debug.log" % settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: sys.stderr.write(" Would run test '%s'.\n" % settings.NAME) else: sys.stderr.write(" Running test '%s'.\n" % settings.NAME) sys.stderr.write(" data_filename=%s\n" % settings.DATA_FILENAME) for k in sorted([i.lower() for i in CONFIG_TYPES.keys()]): if k in b: sys.stderr.write(" %s=%s\n" % (k, b[k])) if settings.BATCH_DRY: self.tests_run += 1 else: self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running test: %s\n" % " ".join( traceback.format_exception_only(*sys.exc_info()[:2]))) else: try: self.run_commands(commands, 'post') except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running post-commands: %s\n" % " ".join( traceback.format_exception_only( *sys.exc_info()[:2]))) finally: self.kill_children() if self.log_fd: self.log_fd.close() self.log_fd = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would sleep for %d seconds.\n" % pause) elif not settings.BATCH_DRY: time.sleep(pause)
def run_batch(self, batch_name): if batch_name not in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): logger.info(" Batch marked as abstract. Not running.") return False elif batch.get('disabled', False) or not batch.get('enabled', True): logger.info(" Batch disabled.") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and \ os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [f for f in filenames if f.endswith(resultset.SUFFIX)] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") try: self.settings.BATCH_UUID = r.meta("BATCH_UUID") logger.info(" Using previous UUID %s.\n", self.settings.BATCH_UUID) except KeyError: pass break if batch_time is None: raise RuntimeError("No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME filenames_seen = set() for b, settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix( [os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) \ != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a " "subdirectory of resume path. Bailing.") if os.path.exists(os.path.join(output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): logger.info(" Previous result exists, skipping.") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would output to: %s.\n", output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError("Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.logfile = loggers.setup_logfile( os.path.join(output_path, "%s.log" % settings.DATA_FILENAME), level=loggers.INFO, replay=False) if b.get('debug_log', False): self.logfile_debug = loggers.setup_logfile( os.path.join(output_path, "%s.debug.log" % settings.DATA_FILENAME), level=loggers.DEBUG, maxlevel=loggers.DEBUG, replay=False) if settings.DATA_FILENAME in filenames_seen: logger.warning("Filename already seen in this run: %s", settings.DATA_FILENAME) filenames_seen.add(settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: logger.info(" Would run test '%s'.", settings.NAME) else: logger.info(" Running test '%s'.", settings.NAME) logger.info(" data_filename=%s", settings.DATA_FILENAME) for k in sorted(b.keys()): if k.upper() in SETTINGS_PARSER: logger.info(" %s=%s", k, b[k]) if settings.BATCH_DRY: self.tests_run += 1 else: # Load test again with informational=False to enable host # lookups and other actions that may fail settings.load_test(informational=False) self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running test: %s", str(e)) else: try: self.run_commands(commands, 'post') except Exception as e: self.run_commands(commands, 'post', essential_only=True) logger.exception(" Error running post-commands: %s", str(e)) finally: self.kill_children() if self.logfile: loggers.remove_log_handler(self.logfile) self.logfile.close() self.logfile = None if self.logfile_debug: loggers.remove_log_handler(self.logfile_debug) self.logfile_debug.close() self.logfile_debug = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: logger.info(" Would sleep for %d seconds.", pause) elif not settings.BATCH_DRY: time.sleep(pause)
def run_batch(self, batchname): if not batchname in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batchname) batch = self.batches[batchname] batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): sys.stderr.write(" Batch marked as abstract. Not running.\n") return False elif batch.get('disabled', False) or not batch.get('enabled', True): sys.stderr.write(" Batch disabled.\n") return False argsets = [] for k in batch.keys(): if k.startswith("for_"): argset = [] for a in batch[k].split(','): a = a.strip().lower() matches = [arg for arg in self.args if fnmatch(arg, a)] if not matches: raise RuntimeError("No matches for arg: '%s'." % a) argset.extend(matches) argsets.append(argset) reps = range(1, int(batch.get('repetitions', 1)) + 1) argsets.append(reps) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and os.path.isdir( self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk( self.settings.BATCH_RESUME): datafiles = [ f for f in filenames if f.endswith(resultset.SUFFIX) ] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") break if batch_time is None: raise RuntimeError( "No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME for argset in itertools.product(*argsets): rep = argset[-1] argset = argset[:-1] settings = self.settings.copy() sys.stderr.write(" args:%s rep:%02d" % (",".join(argset), rep)) if settings.BATCH_DRY: sys.stderr.write(" (dry run)") sys.stderr.write(".\n") settings.FORMAT = 'null' settings.BATCH_NAME = batchname settings.BATCH_TIME = batch_time settings.TIME = datetime.now() expand_vars = { 'repetition': "%02d" % rep, 'batch_time': settings.BATCH_TIME.strftime("%Y-%m-%dT%H%M%S") } for arg in argset: if not arg in self.args: raise RuntimeError("Invalid arg: '%s'." % arg) expand_vars.update(self.args[arg]) b = self.apply_args(batch, expand_vars, settings) if not 'test_name' in b: raise RuntimeError("Missing test name.") settings.load_rcvalues(b.items(), override=True) settings.NAME = b['test_name'] settings.load_test(informational=settings.BATCH_DRY) settings.DATA_FILENAME = self.gen_filename(settings, b, argset, rep) if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix([ os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME) ]) != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError( "Batch-specified output path is not a subdirectory of resume path. Bailing." ) if os.path.exists( os.path.join( output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): sys.stderr.write(" Previous result exists, skipping.\n") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would output to: %s.\n" % output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError( "Unable to create output path '%s': %s." % (output_path, e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.log_fd = io.open( os.path.join(output_path, "%s.log" % settings.DATA_FILENAME), "at") if b.get('debug_log', False): settings.LOG_FILE = os.path.join( output_path, "%s.debug.log" % settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: sys.stderr.write(" Would run test '%s'.\n" % settings.NAME) else: sys.stderr.write(" Running test '%s'.\n" % settings.NAME) sys.stderr.write(" data_filename=%s\n" % settings.DATA_FILENAME) for k in sorted([i.lower() for i in CONFIG_TYPES.keys()]): if k in b: sys.stderr.write(" %s=%s\n" % (k, b[k])) if settings.BATCH_DRY: self.tests_run += 1 else: self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running test: %s\n" % " ".join( traceback.format_exception_only(*sys.exc_info()[:2]))) else: try: self.run_commands(commands, 'post') except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running post-commands: %s\n" % " ".join( traceback.format_exception_only( *sys.exc_info()[:2]))) finally: self.kill_children() if self.log_fd: self.log_fd.close() self.log_fd = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would sleep for %d seconds.\n" % pause) elif not settings.BATCH_DRY: time.sleep(pause)
def run_batch(self, batchname): if not batchname in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batchname) batch = self.batches[batchname] batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): sys.stderr.write(" Batch marked as abstract. Not running.\n") return False elif batch.get('disabled', False) or not batch.get('enabled', True): sys.stderr.write(" Batch disabled.\n") return False argsets = [] for k in batch.keys(): if k.startswith("for_"): argset = [] for a in batch[k].split(','): a = a.strip().lower() matches = [arg for arg in self.args if fnmatch(arg,a)] if not matches: raise RuntimeError("No matches for arg: '%s'." % a) argset.extend(matches) argsets.append(argset) reps = range(1,int(batch.get('repetitions', 1))+1) argsets.append(reps) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk(self.settings.BATCH_RESUME): datafiles = [f for f in filenames if f.endswith(resultset.SUFFIX)] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") break if batch_time is None: raise RuntimeError("No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME for argset in itertools.product(*argsets): rep = argset[-1] argset = argset[:-1] settings = self.settings.copy() sys.stderr.write(" args:%s rep:%02d" % (",".join(argset),rep)) if settings.BATCH_DRY: sys.stderr.write(" (dry run)") sys.stderr.write(".\n") settings.FORMAT = 'null' settings.BATCH_NAME = batchname settings.BATCH_TIME = batch_time settings.TIME = datetime.now() expand_vars = {'repetition': "%02d" % rep, 'batch_time': settings.BATCH_TIME.strftime("%Y-%m-%dT%H%M%S")} for arg in argset: if not arg in self.args: raise RuntimeError("Invalid arg: '%s'." % arg) expand_vars.update(self.args[arg]) b = self.apply_args(batch, expand_vars, settings) if not 'test_name' in b: raise RuntimeError("Missing test name.") settings.load_rcvalues(b.items(), override=True) settings.NAME = b['test_name'] settings.load_test(informational=settings.BATCH_DRY) settings.DATA_FILENAME = self.gen_filename(settings, b, argset, rep) if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix([os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a subdirectory of resume path. Bailing.") if os.path.exists(os.path.join(output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): sys.stderr.write(" Previous result exists, skipping.\n") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would output to: %s.\n" % output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError("Unable to create output path '%s': %s." % (output_path,e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.log_fd = io.open(os.path.join(output_path,"%s.log" % settings.DATA_FILENAME), "at") if b.get('debug_log', False): settings.LOG_FILE = os.path.join(output_path,"%s.debug.log" % settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: sys.stderr.write(" Would run test '%s'.\n" % settings.NAME) else: sys.stderr.write(" Running test '%s'.\n" % settings.NAME) sys.stderr.write(" data_filename=%s\n" % settings.DATA_FILENAME) for k in sorted([i.lower() for i in CONFIG_TYPES.keys()]): if k in b: sys.stderr.write(" %s=%s\n" % (k, b[k])) if settings.BATCH_DRY: self.tests_run += 1 else: self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running test: %s\n" % " ".join(traceback.format_exception_only(*sys.exc_info()[:2]))) else: try: self.run_commands(commands, 'post') except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running post-commands: %s\n" % " ".join(traceback.format_exception_only(*sys.exc_info()[:2]))) finally: self.kill_children() if self.log_fd: self.log_fd.close() self.log_fd = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would sleep for %d seconds.\n" % pause) elif not settings.BATCH_DRY: time.sleep(pause)
def run_batch(self, batch_name): if not batch_name in self.batches: raise RuntimeError("Can't find batch '%s' to run." % batch_name) batch = self.batches[batch_name].copy() batch.update(self.settings.BATCH_OVERRIDE) # A batch declared 'abstract' is not runnable if batch.get('abstract', False): sys.stderr.write(" Batch marked as abstract. Not running.\n") return False elif batch.get('disabled', False) or not batch.get('enabled', True): sys.stderr.write(" Batch disabled.\n") return False argsets = self.get_argsets(batch) pause = int(batch.get('pause', 0)) batch_time = None if self.settings.BATCH_RESUME is not None and os.path.isdir(self.settings.BATCH_RESUME): # We're resuming a batch run. Try to find a data file we can get the # original batch run time from. for dirpath, dirnames, filenames in os.walk(self.settings.BATCH_RESUME): datafiles = [f for f in filenames if f.endswith(resultset.SUFFIX)] if datafiles: f = datafiles[0] r = resultset.load(os.path.join(dirpath, f)) batch_time = r.meta("BATCH_TIME") try: self.settings.BATCH_UUID = r.meta("BATCH_UUID") sys.stderr.write(" Using previous UUID %s.\n" % self.settings.BATCH_UUID) except KeyError: pass break if batch_time is None: raise RuntimeError("No data files found in resume directory %s." % self.settings.BATCH_RESUME) elif self.settings.BATCH_RESUME: raise RuntimeError("Batch resume directory %s doesn't exist!\n" % self.settings.BATCH_RESUME) else: batch_time = self.settings.TIME filenames_seen = set() for b,settings in self.expand_argsets(batch, argsets, batch_time, batch_name): if 'output_path' in b: output_path = clean_path(b['output_path'], allow_dirs=True) else: output_path = settings.DATA_DIR if settings.BATCH_RESUME is not None: if os.path.commonprefix([os.path.abspath(output_path), os.path.abspath(settings.BATCH_RESUME)]) != os.path.abspath(settings.BATCH_RESUME): raise RuntimeError("Batch-specified output path is not a subdirectory of resume path. Bailing.") if os.path.exists(os.path.join(output_path, "%s%s" % (settings.DATA_FILENAME, resultset.SUFFIX))): sys.stderr.write(" Previous result exists, skipping.\n") continue if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would output to: %s.\n" % output_path) elif not settings.BATCH_DRY and not os.path.exists(output_path): try: os.makedirs(output_path) except OSError as e: raise RuntimeError("Unable to create output path '%s': %s." % (output_path,e)) commands = self.commands_for(b, settings) if not settings.BATCH_DRY: self.log_fd = io.open(os.path.join(output_path,"%s.log" % settings.DATA_FILENAME), "at") if b.get('debug_log', False): settings.LOG_FILE = os.path.join(output_path,"%s.debug.log" % settings.DATA_FILENAME) if settings.DATA_FILENAME in filenames_seen: sys.stderr.write(" Warning: Filename already seen in this run: %s\n" % settings.DATA_FILENAME) filenames_seen.add(settings.DATA_FILENAME) self.run_commands(commands, 'pre') self.run_commands(commands, 'monitor') try: if settings.BATCH_VERBOSE: if settings.BATCH_DRY: sys.stderr.write(" Would run test '%s'.\n" % settings.NAME) else: sys.stderr.write(" Running test '%s'.\n" % settings.NAME) sys.stderr.write(" data_filename=%s\n" % settings.DATA_FILENAME) for k in sorted([i.lower() for i in CONFIG_TYPES.keys()]): if k in b: sys.stderr.write(" %s=%s\n" % (k, b[k])) if settings.BATCH_DRY: self.tests_run += 1 else: self.run_test(settings, output_path) except KeyboardInterrupt: self.run_commands(commands, 'post', essential_only=True) raise except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running test: %s\n" % " ".join(traceback.format_exception_only(*sys.exc_info()[:2]))) if settings.DEBUG_ERROR: traceback.print_exc() else: try: self.run_commands(commands, 'post') except: self.run_commands(commands, 'post', essential_only=True) sys.stderr.write(" Error running post-commands: %s\n" % " ".join(traceback.format_exception_only(*sys.exc_info()[:2]))) if settings.DEBUG_ERROR: traceback.print_exc() finally: self.kill_children() if self.log_fd: self.log_fd.close() self.log_fd = None if settings.BATCH_DRY and settings.BATCH_VERBOSE: sys.stderr.write(" Would sleep for %d seconds.\n" % pause) elif not settings.BATCH_DRY: time.sleep(pause)