def cmd(self, args): if args['verbose']: print "Command args -> %s" % args #print "input test suite file -> %s\n" % args['testSuite'] if (os.path.isfile(args['testSuite'])): with open(args['testSuite']) as file: # Build the test configuration tc = YamlTestConfig(args['testSuite']) if args['dict']: print "\nUser test suite configuration (dict style):" print tc.user_config_doc print "\nDefault test suite configuration (dict style):" print tc.default_config_doc print "\nEffective test configuration (dict style, combined User and Default):" print tc.get_effective_config_file() else: print "\nUser test suite configuration (yaml style):" tc.show_user_test_config() print "\nDefault test suite configuration (yaml style):" tc.show_default_config() print "\nEffective test suite configuration (yaml style, combined User and Default):" tc.show_effective_config_file() else: print " Error: could not find test suite %s" % args['testSuite'] sys.exit()
def cmd(self, args): if args['verbose']: print "Command args -> %s" % args #print "input test suite file -> %s\n" % args['testSuite'] tc = YamlTestConfig(args['testSuite'], testname=args['testname'], hostname=args['name'], modelist=args['mode']) if args['dict']: print "\nUser test suite configuration (dict style):" print tc.user_config_doc print "\nDefault test suite configuration (dict style):" print tc.default_config_doc print "\nEffective test configuration (dict style, combined User and Default):" # Check if custom arguments are specified to change individual parameters my_test_suite = tc.get_effective_config_file() if args['custom'] != []: custom_list = [] for custom in args['custom']: if custom[0] == '*': for key, val in my_test_suite.iteritems(): self.logger.info( 'Expanding custom parameter to %s' % (key + custom[1:])) custom_list.append(key + custom[1:]) for custom in custom_list: if '.' in custom: custom_dict = decompose_str(custom) modify_dict(my_test_suite, custom_dict.keys()[0], custom_dict.values()[0]) else: my_test_suite[custom.split('=')[0]] = custom.split( '=')[1] print my_test_suite else: print "\nUser test suite configuration (yaml style):" tc.show_user_test_config() print "\nDefault test suite configuration (yaml style):" tc.show_default_config() print "\nEffective test suite configuration (yaml style, combined User and Default):" # Check if custom arguments are specified to change individual parameters my_test_suite = tc.get_effective_config_file() if args['custom'] != []: custom_list = [] for custom in args['custom']: if custom[0] == '*': for key, val in my_test_suite.iteritems(): self.logger.info( 'Expanding custom parameter to %s' % (key + custom[1:])) custom_list.append(key + custom[1:]) for custom in custom_list: if '.' in custom: custom_dict = decompose_str(custom) modify_dict(my_test_suite, custom_dict.keys()[0], custom_dict.values()[0]) else: my_test_suite[custom.split('=')[0]] = custom.split( '=')[1] print json.dumps(my_test_suite, sort_keys=True, indent=4)
def cmd(self, args): """ Every class used as a plugin (sub-command) MUST have a method by the name of cmd. This is executed when the given sub-command is executed. """ # Build the test configuration tc = YamlTestConfig(args['testSuite'], testname=args['testname'], hostname=args['name'], modelist=args['mode']) if args['verbose']: print "Command args -> %s" % args #print "TestSuite search path -> " + os.path.dirname( # os.path.realpath(args['testSuite'])) # print "User test suite:" # print " %s" % utc # get the "merged" test stanza for each test in the test suite my_test_suite = tc.get_effective_config_file() # Check if custom arguments are specified to change individual parameters if args['custom'] != []: custom_list = [] for custom in args['custom']: if custom[0] == '*': for key, val in my_test_suite.iteritems(): self.logger.info('Expanding custom parameter to %s' % (key + custom[1:])) custom_list.append(key + custom[1:]) for custom in custom_list: if '.' in custom: custom_dict = decompose_str(custom) modify_dict(my_test_suite, custom_dict.keys()[0], custom_dict.values()[0]) else: my_test_suite[custom.split('=')[0]] = custom.split('=')[1] # if we are running in debug mode we are then done because we do not need # to submit anything if args['debug']: return # Process and launch each test entry (stanza) from the test suite. submit_again = True while submit_again: for entry_id, test_suite_entry in my_test_suite.iteritems(): # Don't process the DTS definition if "DefaultTestSuite" in entry_id: continue # Don't process include directive if "IncludeTestSuite" in entry_id: continue # Don't process it it is not in the test list (if a test list is specified) if args['test']: if entry_id not in args['test']: if args['verbose']: print "Skipping %s" % entry_id continue # instantiate a new object for each test Entry type ( Raw, Moab, etc. ) # i.e. , te = MoabTestEntry(...) try: st = test_suite_entry['run']['scheduler'] scheduler_type = st.capitalize() except AttributeError: scheduler_type = "Raw" # There needs to be this type of scheduler object implemented to support this # See the testEntry.py file for examples object_name = scheduler_type + "TestEntry" test_args_restrictions = [] if 'test_args_restrictions' in test_suite_entry[ 'run'] and test_suite_entry['run'][ 'test_args_restrictions']: test_args_restrictions = test_suite_entry['run'][ 'test_args_restrictions'] test_suite_entry['run']['test_args'] = expansion( test_suite_entry['run']['test_args'], test_args_restrictions) try: te = globals()[object_name](entry_id, test_suite_entry, args) except KeyError: raise ValueError( scheduler_type + " scheduler type not supported (check the test entry), exiting!" ) # If user specifies a max level of jobs to queue and run (watermark) then # don't launch a new set if this level is reached. if (args['w'] and te.room_to_run(args)) or not args['w']: # print "plenty of room to run" # launch a new process for each test variation and/or count for test_entry in te.get_test_variations(): # initialize a unique LDMS for each job os.environ['LDMS_START_CMD'] = '' if args['ldms'] or ('ldms' in test_suite_entry and test_suite_entry['ldms']['state']): #print test_suite_entry['ldms']['state'] te.prep_ldms() for _ in range(te.get_run_count()): #print "dispatch with:" #print test_entry.get_id() self.job_dispatcher(test_entry, args) submit_again = RunTestSuite.submit_delay(args)
def cmd(self, args): """ Every class used as a plugin (sub-command) MUST have a method by the name of cmd. This is executed when the given sub-command is executed. """ # Build the test configuration tc = YamlTestConfig(args['testSuite']) utc = tc.user_config_doc if args['verbose']: print "Command args -> %s" % args print "TestSuite search path -> " + os.path.dirname( os.path.realpath(args['testSuite'])) print "User test suite:" print " %s" % utc # get the "merged" test stanza for each test in the test suite my_test_suite = tc.get_effective_config_file() # Process and launch each test entry (stanza) from the test suite. submit_again = True while submit_again: for entry_id, test_suite_entry in my_test_suite.iteritems(): # Don't process the DTS definition if "DefaultTestSuite" in entry_id: continue # Don't process include directive if "IncludeTestSuite" in entry_id: continue # instantiate a new object for each test Entry type ( Raw, Moab, etc. ) # i.e. , te = MoabTestEntry(...) try: st = test_suite_entry['run']['scheduler'] scheduler_type = st.capitalize() except AttributeError: scheduler_type = "Raw" # There needs to be this type of scheduler object implemented to support this # See the testEntry.py file for examples object_name = scheduler_type + "TestEntry" try: te = globals()[object_name](entry_id, test_suite_entry, args) except KeyError: raise ValueError(scheduler_type + " scheduler type not supported (check the test entry), exiting!") # If user specifies a max level of jobs to queue and run (watermark) then # don't launch a new set if this level is reached. if (args['w'] and te.room_to_run(args)) or not args['w']: # print "plenty of room to run" # launch a new process for each test variation and/or count for test_entry in te.get_test_variations(): # initialize a unique LDMS for each job os.environ['LDMS_START_CMD'] = '' if args['ldms'] or ('ldms' in test_suite_entry and test_suite_entry['ldms']['state']): #print test_suite_entry['ldms']['state'] te.prep_ldms() for _ in range(te.get_run_count()): #print "dispatch with:" #print test_entry.get_id() self.job_dispatcher(test_entry, args) submit_again = RunTestSuite.submit_delay(args)
def test_invalid_yaml_file(self): with self.assertRaises(SystemExit): YamlTestConfig('invalid_yaml.yaml')
def test_malformed_default_file(self): with self.assertRaises(SystemExit): YamlTestConfig('malformed_default.yaml')
def cmd(self, args): if args['verbose']: print "Command args -> %s" % args # is test_suite specified? if args['ts']: dts = str(args['ts'][0]) else: print "Will look for default_test_config.yaml in current working directory ..." dts = os.getcwd() + "/default_test_config.yaml" tc = YamlTestConfig(dts) if args['verbose']: print "effective test suite configuration:" tsc = tc.get_effective_config_file() print tsc # *** need to handle ALL result locations here! res_loc_list = tc.get_result_locations() # print res_loc_list for results_dir in res_loc_list: # print "\nFor results location: %s " % results_dir os.environ['PV_RESULT_ROOT'] = results_dir try: if os.access(results_dir, os.R_OK) is False: print " Warning: results directory (%s) not readable, skipping" % results_dir continue except Exception as ex: template = "An exception of type {0} occurred." print "No results 'root' directory defined in the test suite config file(s), exiting!" message = template.format(type(ex).__name__, ex.args) #print message sys.exit() # call something here that gets the results self.logger.debug('get_results from %s' % results_dir) # add in all the possible args # implement different shared Nix groups later, using gzshared for now bc = "/scripts/get_results -g gzshared" if args['pass']: bc += " -p " if args['fail']: bc += " -f " if args['verbose']: bc += " -v " if args['inc']: bc += " -i " if args['s']: bc += " -s " + args['s'][0] if args['S']: bc += " -S " + args['S'][0] if args['e']: bc += " -e " + args['e'][0] if args['E']: bc += " -E " + args['E'][0] if args['t']: bc += " -t " + args['t'][0] if args['u']: bc += " -u " + args['u'][0] if args['xtime']: bc += " -x " if args['td']: bc += " -T " if args['make_box_plots']: plot_cmd = os.environ['PVINSTALL'] + "/PAV/modules/makeboxplots.py" gr_cmd = os.environ['PVINSTALL'] + "/PAV" + bc + " -T -l " + results_dir + " | " + plot_cmd elif args['show_linecharts']: gr_cmd = os.environ['PVINSTALL'] + "/PAV/scripts/showtd " + results_dir + "/test_results.csv" if args['t']: gr_cmd += " -t " + args['t'][0] if args['s']: gr_cmd += " -s " + args['s'][0] if args['e']: gr_cmd += " -e " + args['e'][0] elif args['make_baselines']: bl1_cmd = os.environ['PVINSTALL'] + "/PAV/modules/makebaselines.py" bl2_cmd = os.environ['PVINSTALL'] + "/PAV/scripts/mkBaselines" gr_cmd = os.environ['PVINSTALL'] + "/PAV" + bc + " -T -l " + results_dir + " | " +\ bl1_cmd + " | " + bl2_cmd else: gr_cmd = os.environ['PVINSTALL'] + "/PAV" + bc + " -l " + results_dir if args['verbose']: print "Using command:" print gr_cmd gr_output = subprocess.check_output(gr_cmd, shell=True) print "\n" + gr_output
def cmd(self, args): if args['verbose']: print "Command args -> %s" % args if args['delimiter']: delim = str(args['delimiter'][0]) if args['verbose']: print 'delimiter is "' + delim + '"' else: delim = " " # NOTE if maxfilesize becomes 0 it will be logically equivalent to None # below and not checked. So zero is equivalent to infinity here. if args['maxsize']: maxfilesize = int(args['maxsize'][0]) else: # default to 100 mb maxfilesize = None # is test_suite specified? if args['ts']: dts = str(args['ts'][0]) tc = YamlTestConfig(dts) tsc = tc.get_effective_config_file() if args['verbose']: print "effective test suite configuration:" print tsc res_loc_list = tc.get_result_locations() else: if args['verbose']: print "will look in the working directory ..." res_loc_list = [os.getcwd()] if args['verbose']: print res_loc_list # establish time window # we'll need this for start time constraints # getting time in epoch form so it's easy to subtract and compare now = time.mktime(time.localtime()) mdytimeformat = "%m-%d-%YT%H:%M:%S" ymdtimeformat = "%Y-%m-%dT%H:%M:%S" # default is 15 days ago to now starttimeinterval = (time.localtime(now - 1296000), time.localtime(now)) endtimeinterval = (time.localtime(now - 1296000), time.localtime(now)) if args['s']: starttimeinterval = (time.strptime(args['s'][0], ymdtimeformat), starttimeinterval[1]) if args['S']: starttimeinterval = (starttimeinterval[0], time.strptime(args['S'][0], ymdtimeformat)) if args['e']: endtimeinterval = (time.strptime(args['e'][0], ymdtimeformat), endtimeinterval[1]) if args['E']: endtimeinterval = (endtimeinterval[0], time.strptime(args['E'][0], ymdtimeformat)) if args['verbose']: print "showing tests that start in the interval: " + str( starttimeinterval) print "showing tests that end in the interval: " + str( endtimeinterval) # output format if args['o']: outputform = args['o'][0].split() elif ("PTH_TABLEFORMAT" in os.environ): outputform = os.environ["PTH_TABLEFORMAT"].split() if args['verbose']: print "using PTH_TABLEFORMAT: " + os.environ["PTH_TABLEFORMAT"] else: outputform = ['n', 'r', 's', 'e'] outputheader = [] for field in outputform: if field == "f": outputheader.append("filename") if field == "r": outputheader.append("result") if field == "s": outputheader.append("start-time") if field == "e": outputheader.append("end-time") if field == "n": outputheader.append("test-name") if field == "o": outputheader.append("#nodes") if field == "c": outputheader.append("#cores") if field[0] == "d": outputheader.append(field[1:]) # get the results log files outputlist = [] logfilecount = 0 for results_dir in res_loc_list: if not os.path.isdir(results_dir): print "pav: error: looking in an invalid directory: " + str( results_dir) else: for base, dirs, files in os.walk(results_dir): for alogfile in fnmatch.filter(files, "*.log"): logfilecount += 1 if args['verbose']: print "considering log file: " + base + "/" + alogfile # check file name match before opening the file # NOTE, assuming that the file name will contain the test name # otherwise we need to parse test name out of contents if args['t'] and not args['t'][0] in alogfile: if args['verbose']: print " doesnt match test name pattern" continue # enqueue time is in the directory path, don't need to open the file yet starttime = re.search( "[0-9]+-[0-9]+-[0-9]+T[0-9]+:[0-9]+:[0-9]+", base) if starttime: starttime = time.strptime(str(starttime.group(0)), ymdtimeformat) # if we didnt get a start time, assume it is okay if starttime and not ( starttime >= starttimeinterval[0] and starttime <= starttimeinterval[1]): if args['verbose']: print " start outside allowed interval" continue # is file "too big"? # better warn here anyway so we know when files are skipped if (maxfilesize and os.stat(base + "/" + alogfile).st_size > maxfilesize): if args['verbose']: print " file size is greater than maximum allowed : " + \ str(os.stat(base + "/" + alogfile).st_size) + " > " + str(maxfilesize) continue contents = open(base + "/" + alogfile, 'r').read() # check pass/fail failed = False passed = False state = "I" if "<result> failed" in contents: failed = True state = "F" elif "<result> passed" in contents: passed = True state = "P" # get out if we weren't interested in that kind of result if failed and (args['pass'] or args['inc'] or args['exclude_fail']): if args['verbose']: print " doesn't match pass/fail conditions" continue if passed and (args['fail'] or args['inc'] or args['exclude_pass']): if args['verbose']: print " doesn't match pass/fail conditions" continue if not passed and not failed and (args['fail'] or args['pass'] or args['exclude_inc']): if args['verbose']: print " doesn't match pass/fail conditions" continue # we checked start time above endtime = re.search("<end>.*", contents) # endtime is either after "<end>" or after "remove WS:" if endtime: # convert it to epoch seconds endtime = time.strptime( str(endtime.group(0).split()[1]), mdytimeformat) else: endtime = re.search( "remove WS:.*\.([0-9]+-[0-9]+-[0-9]+T[0-9]+:[0-9]+:[0-9]+)", contents) if endtime: endtime = time.strptime( str(endtime.group(1)), mdytimeformat) if endtime and not (endtime >= endtimeinterval[0] and endtime <= endtimeinterval[1]): if args['verbose']: print " end outside allowed interval" continue # need a specific target segment? # in this case no segment doesnt match segment = re.search("<target_seg> (.*)", contents) if segment: segment = str(segment.group(1)) if args["u"] and (not segment or not args['u'][0] in segment): if args['verbose']: print " target segment doesn't match " + args[ 'u'][0] + "<>" + str(segment) continue # store the required fields for pretty printing later output = [] for field in outputform: if field == 'f': output.append(base + "/" + alogfile) if field == 'r': output.append(state) if field == 's': if starttime: output.append( time.strftime(ymdtimeformat, starttime)) else: output.append("") if field == 'e': if endtime: output.append( time.strftime(ymdtimeformat, endtime)) else: output.append("") if field == 'n': # still assuming logfile name == test name output.append(alogfile[:-4]) if field == 'o': numnodes = re.search("<nnodes> (.*)", contents) if numnodes: output.append(str(numnodes.group(1))) else: output.append("") if field == 'c': npes = re.search("<npes> (.*)", contents) if numnodes: output.append(str(npes.group(1))) else: output.append("") if field[0] == 'd': found = re.search("<" + field[1:] + "> (.*)", contents) if found: output.append(found.group(1)) else: output.append("") outputlist.append(output) # space buffered table # we could use a package to make this easier, but I dont want to add a dependency bufferlist = [] for f in outputheader: bufferlist.append(len(f)) for l in outputlist: for fn in range(len(l)): if len(l[fn]) > bufferlist[fn]: bufferlist[fn] = len(l[fn]) for fn in range(len(outputheader)): print outputheader[fn] + " " * (bufferlist[fn] - len(outputheader[fn])) + delim, print "" for l in outputlist: for fn in range(len(l)): print l[fn] + " " * (bufferlist[fn] - len(l[fn])) + delim, print "" print "\n" + str(len(outputlist)) + " results found out of " + str( logfilecount) + " .log files"