def doExec( category, package, version, action, opts ): utils.startTimer("%s for %s" % ( action,package),1) utils.debug( "emerge doExec called. action: %s opts: %s" % (action, opts), 2 ) fileName = portage.getFilename( category, package, version ) opts_string = ( "%s " * len( opts ) ) % tuple( opts ) commandstring = "python %s %s %s" % ( fileName, action, opts_string ) utils.debug( "file: " + fileName, 1 ) try: #Switched to import the packages only, because otherwise degugging is very hard, if it troubles switch back #makes touble for xcompile -> changed back if not utils.system( commandstring ): utils.die( "running %s" % commandstring ) #mod = portage.__import__( fileName ) #mod.Package().execute(action) except OSError: utils.stopTimer("%s for %s" % ( action,package)) return False utils.stopTimer("%s for %s" % ( action,package)) return True
try: mytest = TTest(browser_config['remote']) browser_dump, counter_dump, print_format = mytest.runTest(browser_config, test) utils.debug("Received test results: " + " ".join(browser_dump)) results[testname] = [browser_dump, counter_dump, print_format] # If we're doing CSV, write this test immediately (bug 419367) if csv_dir != '': send_to_csv(csv_dir, {testname : results[testname]}) if to_screen or amo: send_to_csv(None, {testname : results[testname]}) except talosError, e: utils.stamped_msg("Failed " + testname, "Stopped") print 'FAIL: Busted: ' + testname print 'FAIL: ' + e.msg.replace('\n','\nRETURN:') utils.stamped_msg("Completed test " + testname, "Stopped") elapsed = utils.stopTimer() print "RETURN: cycle time: " + elapsed + "<br>" utils.stamped_msg(title, "Stopped") #process the results if (results_server != '') and (results_link != ''): #send results to the graph server try: if (results_server is not None and results_server is not '' and results_link is not None and results_link is not ''): utils.stamped_msg("Sending results", "Started") links = send_to_graph(results_server, results_link, title, date, browser_config, results, amo) results_from_graph(links, results_server, amo) utils.stamped_msg("Completed sending results", "Stopped")
def doExec( package, action, continueFlag = False ): utils.startTimer( "%s for %s" % ( action, package ), 1 ) EmergeDebug.info("Action: %s for %s" % (action, package)) ret = package.execute( action ) utils.stopTimer( "%s for %s" % ( action, package ) ) return ret or continueFlag
msg += portage.getHostAndTarget( hostEnabled and not hostInstalled, targetEnabled and not targetInstalled ) else: msg = "" utils.warning( "pretending %s/%s-%s%s" % ( mainCategory, mainPackage, mainVersion, msg ) ) else: mainAction = mainBuildAction if mainBuildAction == "install-deps": mainAction = "all" if not handlePackage( mainCategory, mainPackage, mainVersion, mainAction, mainOpts ): utils.error( "fatal error: package %s/%s-%s %s failed" % \ ( mainCategory, mainPackage, mainVersion, mainBuildAction ) ) exit( 1 ) utils.new_line() if len( nextArguments ) > 0: command = "\"" + sys.executable + "\" -u " + executableName + " " + " ".join( nextArguments ) #for element in environ.keys(): # if environ[ element ]: # os.environ[ element ] = environ[ element ] # elif element == "EMERGE_VERBOSE": # os.environ[ element ] = "1" # else: # os.environ[ element ] = "" if not utils.system(command): utils.die( "cannot execute next commands cmd: %s" % command ) utils.stopTimer("Emerge")
except (talosCrash, talosError): # NOTE: if we get into this condition, talos has an internal problem and cannot continue # this will prevent future tests from running utils.stamped_msg("Failed %s" % testname, "Stopped") talosError_tb = sys.exc_info() traceback.print_exception(*talosError_tb) print_logcat() if httpd: httpd.stop() # indicate a failure to buildbot, turn the job red return 2 utils.stamped_msg("Completed test " + testname, "Stopped") print_logcat() elapsed = utils.stopTimer() print "cycle time: " + elapsed utils.stamped_msg(title, "Stopped") # stop the webserver if running if httpd: httpd.stop() # output results if results_urls: talos_results.output(results_urls, **results_options) # we will stop running tests on a failed test, or we will return 0 for green return 0