def main(args=sys.argv[1:]): # parse command line options usage = "%prog [options] manifest.yml [manifest.yml ...]" parser = PerfConfigurator.PerfConfigurator(usage=usage) parser._dump = False # disable automatic dumping parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help="enable debug") parser.add_option('-n', '--noisy', dest='noisy', action='store_true', default=False, help="DEPRECATED: this is now the default") options, args = parser.parse_args(args) # set variables level = 'info' if options.debug: level = 'debug' utils.startLogger(level) sys.exit(run_tests(parser))
httpd.stop() logging.info("Completed test suite (%s)", timer.elapsed()) # output results if results_urls: talos_results.output(results_urls) if browser_config['develop']: print print( "Thanks for running Talos locally. Results are in" " %s and %s" % (results_urls['results_urls'], results_urls['datazilla_urls'])) # we will stop running tests on a failed test, or we will return 0 for # green return 0 def main(args=sys.argv[1:]): try: config, browser_config = get_configs() except ConfigurationError, exc: sys.exit("ERROR: %s" % exc) utils.startLogger('debug' if config['debug'] else 'info') sys.exit(run_tests(config, browser_config)) if __name__ == '__main__': main()
finally: httpd.stop() logging.info("Completed test suite (%s)", timer.elapsed()) # output results if results_urls: talos_results.output(results_urls) if browser_config['develop']: print print ("Thanks for running Talos locally. Results are in" " %s and %s" % (results_urls['results_urls'], results_urls['datazilla_urls'])) # we will stop running tests on a failed test, or we will return 0 for # green return 0 def main(args=sys.argv[1:]): try: config, browser_config = get_configs() except ConfigurationError, exc: sys.exit("ERROR: %s" % exc) utils.startLogger('debug' if config['debug'] else 'info') sys.exit(run_tests(config, browser_config)) if __name__ == '__main__': main()
def main(): args = get_cmdline_args() # We want to look for modules in the directory local to the pipeline, # just as if the pipeline script had been called directly. # This includes the script itself and the config files imported by getOptions sys.path.insert(0, os.path.dirname(args.pipeline)) # options must be set before pipeline is imported options = getOptions(args) setOptions(options) # import the pipeline so its stages are defined # the name of the pipeline is given on the command line __import__(drop_py_suffix(args.pipeline)) logDir = options.pipeline['logDir'] startLogger() pipelineOptions = options.pipeline endTasks = pipelineOptions['end'] forcedTasks = pipelineOptions['force'] style = pipelineOptions['style'] if pipelineOptions['rebuild'] == 'fromstart': rebuildMode = True elif pipelineOptions['rebuild'] == 'fromend': rebuildMode = False else: rebuildMode = True if style in ['run', 'touchfiles']: touchfiles_flag = (style == 'touchfiles') # Perform the pipeline steps (run the pipeline). pipeline_run( # End points of the pipeline. endTasks, # How many ruffus tasks to run. multiprocess=pipelineOptions['procs'], logger=black_hole_logger, # Force the pipeline to start from here, regardless of whether the # stage is up-to-date or not. forcedtorun_tasks=forcedTasks, # If the style was touchfiles, we will set a flag to bring # files up to date without running anything touch_files_only=touchfiles_flag, # Choose the mode in which ruffus decides how much work needs to be # done. gnu_make_maximal_rebuild_mode=rebuildMode) elif style == 'flowchart': # Draw the pipeline as a diagram. pipeline_printout_graph('flowchart.svg', 'svg', endTasks, no_key_legend=False) elif style == 'print': # Print a textual description of what the piplines would do, #but don't actuall run it. pipeline_printout(sys.stdout, endTasks, verbose=5, wrap_width=100000, forcedtorun_tasks=forcedTasks, gnu_make_maximal_rebuild_mode=rebuildMode)
def main(): args = get_cmdline_args() # We want to look for modules in the directory local to the pipeline, # just as if the pipeline script had been called directly. # This includes the script itself and the config files imported by getOptions sys.path.insert(0, os.path.dirname(args.pipeline)) # options must be set before pipeline is imported options = getOptions(args) setOptions(options) # import the pipeline so its stages are defined # the name of the pipeline is given on the command line __import__(drop_py_suffix(args.pipeline)) logDir = options.pipeline['logDir'] startLogger() pipelineOptions = options.pipeline endTasks = pipelineOptions['end'] forcedTasks = pipelineOptions['force'] style = pipelineOptions['style'] if pipelineOptions['rebuild'] == 'fromstart': rebuildMode = True elif pipelineOptions['rebuild'] == 'fromend': rebuildMode = False else: rebuildMode = True if style in ['run', 'touchfiles']: touchfiles_flag = (style=='touchfiles') # Perform the pipeline steps (run the pipeline). pipeline_run( # End points of the pipeline. endTasks, # How many ruffus tasks to run. multiprocess=pipelineOptions['procs'], logger=black_hole_logger, # Force the pipeline to start from here, regardless of whether the # stage is up-to-date or not. forcedtorun_tasks=forcedTasks, # If the style was touchfiles, we will set a flag to bring # files up to date without running anything touch_files_only=touchfiles_flag, # Choose the mode in which ruffus decides how much work needs to be # done. gnu_make_maximal_rebuild_mode=rebuildMode) elif style == 'flowchart': # Draw the pipeline as a diagram. pipeline_printout_graph( 'flowchart.svg', 'svg', endTasks, no_key_legend=False) elif style == 'print': # Print a textual description of what the piplines would do, #but don't actuall run it. pipeline_printout( sys.stdout, endTasks, verbose=5, wrap_width=100000, forcedtorun_tasks=forcedTasks, gnu_make_maximal_rebuild_mode=rebuildMode)