def _setup_arguments(self, parser): parser.add_argument( '-v', '--verbose', action='store_true', default=False, help='Verbose output.' ) filters.add_test_filter_args(parser) parser.add_argument( '-a', '--all', action='store_true', help='Attempts to remove everything in the working directory, ' 'regardless of age.' )
def _setup_arguments(self, parser): parser.add_argument("-j", "--json", action="store_true", default=False, help="Give the results in json.") group = parser.add_mutually_exclusive_group() group.add_argument("-k", "--key", action='append', nargs="*", default=[], help="Additional result keys to display.") group.add_argument("-f", "--full", action="store_true", default=False, help="Show all result keys.") parser.add_argument( '-r', '--re-run', dest="re_run", action='store_true', default=False, help="Re-run the results based on the latest version of the test " "configs, though only changes to the 'result' section are " "applied. This will not alter anything in the test's run " "directory; the new results will be displayed but not " "otherwise saved or logged.") parser.add_argument( '-s', '--save', action='store_true', default=False, help="Save the re-run to the test's results json and log. Will " "not update the general pavilion result log.") parser.add_argument( '-L', '--show-log', action='store_true', default=False, help="Also show the result processing log. This is particularly" "useful when re-parsing results, as the log is not saved.") parser.add_argument( "tests", nargs="*", help="The tests to show the results for. Use 'last' to get the " "results of the last test series you ran on this machine.") filters.add_test_filter_args(parser)
def _setup_arguments(self, parser): filters.add_test_filter_args(parser) parser.add_argument( 'tests', nargs='*', default=[], action='store', help='Specific Test Ids to graph. ' ) parser.add_argument( '--outfile', '-o', action='store', required=True, help='Desired name of graph when saved to PNG.' ) parser.add_argument( '--exclude', default=[], action='append', help='Exclude specific Test Ids from the graph.' ) parser.add_argument( '--y', '-y', action='append', required=True, help='Specify the value(s) graphed from the results ' 'for each test.' ) parser.add_argument( '--x', '-x', action='store', required=True, help='Specify the value to be used on the X axis.' ) parser.add_argument( '--xlabel', action='store', default="", help='Specify the x axis label.' ) parser.add_argument( '--ylabel', action='store', default="", help='Specify the y axis label.' ) parser.add_argument( '--average', action='append', default=[], help='Generate an average plot for the specified y value(s).' ) parser.add_argument( '--dimensions', action='store', default='', help='Specify the image size. Expects a \'width x height\' format.' )
def _setup_arguments(self, parser): parser.add_argument( '-j', '--json', action='store_true', default=False, help='Give output as json, rather than as standard human readable.' ) parser.add_argument( 'tests', nargs='*', action='store', help="The name(s) of the tests to check. These may be any mix of " "test IDs and series IDs. Use 'last' to get just the last " "series you ran." ) output_mode = parser.add_mutually_exclusive_group() output_mode.add_argument( '-s', '--summary', default=False, action='store_true', help='Display a single line summary of test statuses.' ) output_mode.add_argument( '--history', default=False, action='store_true', help='Display status history for a single test_run.' ) filters.add_test_filter_args(parser)
def test_run_parser_args(self): """Test adding standardized test run filter args.""" class ExitError(RuntimeError): pass class NoExitParser(argparse.ArgumentParser): """Don't exit on failure.""" def error(self, message): """Don't exit on error.""" raise ExitError() def exit(self, status=0, message=None): """Don't exit completely on failure.""" raise ExitError() # You can't override a non-existent field. with self.assertRaises(RuntimeError): filters.add_test_filter_args( arg_parser=NoExitParser(), default_overrides={'doesnt_exist': True}) basic = NoExitParser() filters.add_test_filter_args(basic) args = basic.parse_args(args=[]) defaults = set(filters.TEST_FILTER_DEFAULTS.keys()) for key, value in vars(args).items(): self.assertIn(key, defaults, msg="Missing default for '{}' argument.".format(key)) self.assertEqual( value, filters.TEST_FILTER_DEFAULTS[key], msg="Misapplied default for '{}' argument.".format(key)) defaults.remove(key) self.assertEqual( set(), defaults, msg="TEST_FILTER_DEFAULTS has unused keys '{}'".format(defaults)) now = dt.datetime.now() parser = NoExitParser() filters.add_test_filter_args(parser, default_overrides={ 'newer_than': now, 'name': 'foo.*', }, sort_functions={ 'sort_foo': lambda d: 'foo', 'sort_bar': lambda d: 'bar', }) with self.assertRaises(ExitError): parser.parse_args(['--sort-by=name']) args = parser.parse_args([ '--passed', '--complete', '--newer-than=', # Clearing a default. '--older-than=1 week', '--sort-by=-sort_foo', ]) self.assertTrue(args.passed) self.assertTrue(args.complete) self.assertIsNone(args.newer_than) # Really we're just testing for a datetime. self.assertLess(args.older_than, dt.datetime.now() - dt.timedelta(days=6)) self.assertEqual(args.sort_by, '-sort_foo') common_parser = NoExitParser() series_parser = NoExitParser() sort_opts = list(filters.SERIES_SORT_FUNCS.keys()) filters.add_common_filter_args("", common_parser, filters.SERIES_FILTER_DEFAULTS, sort_options=sort_opts) filters.add_series_filter_args(series_parser) self.assertEqual( vars(common_parser.parse_args([])), vars(series_parser.parse_args([])), msg="The series and common args should be the same. If " "they've diverged, add tests to check the untested " "values (the common ones are tested via the test_run args).")
def _setup_arguments(self, parser): self._parser = parser output_mode = parser.add_mutually_exclusive_group() output_mode.add_argument('--multi-line', action='store_const', default=self.OUTMODE_SPACE, dest='output_mode', const=self.OUTMODE_NEWLINE, help="List the results one per line.") output_mode.add_argument('--long', '-l', action='store_const', dest='output_mode', const=self.OUTMODE_LONG, help="Show additional fields, one per line.\n" "Default fields: {}".format( self.RUN_LONG_FIELDS)) output_mode.add_argument('--csv', action='store_const', const=self.OUTMODE_CSV, dest='output_mode', help="Write output as CSV.") output_mode.add_argument( '--show-fields', action='store_true', default=False, help='Print the available output fields and exit.') parser.add_argument( '--out-fields', '-O', help="A comma separated list of fields to put in the output. More" "than one field requires '--long' or '--csv' mode. " "See --show-fields for available fields.") parser.add_argument( '--header', action='store_true', default=False, help="Print a header when printing in --long or --csv mode.") parser.add_argument( '--vsep', default='|', type=lambda s: s[0], help="Vertical separator for --long mode. Single character only.") parser.add_argument( '--wrap', action='store_true', default=False, help="Auto-wrap the table columns in long output mode.") subparsers = parser.add_subparsers(dest="sub_cmd", help="What to list.") runs_p = subparsers.add_parser( 'test_runs', aliases=['runs', 'tests'], help="List test runs.", description="Print a list of test run id's.") filters.add_test_filter_args(runs_p) runs_p.add_argument('series', nargs="*", help="Print only test runs from these series.") series_p = subparsers.add_parser( 'series', help="List test series.", description="Give a list of test series id's.") filters.add_series_filter_args(series_p)