def doExec( category, package, version, action, opts ): utils.startTimer("%s for %s" % ( action,package),1) utils.debug( "emerge doExec called. action: %s opts: %s" % (action, opts), 2 ) fileName = portage.getFilename( category, package, version ) opts_string = ( "%s " * len( opts ) ) % tuple( opts ) commandstring = "python %s %s %s" % ( fileName, action, opts_string ) utils.debug( "file: " + fileName, 1 ) try: #Switched to import the packages only, because otherwise degugging is very hard, if it troubles switch back #makes touble for xcompile -> changed back if not utils.system( commandstring ): utils.die( "running %s" % commandstring ) #mod = portage.__import__( fileName ) #mod.Package().execute(action) except OSError: utils.stopTimer("%s for %s" % ( action,package)) return False utils.stopTimer("%s for %s" % ( action,package)) return True
def test_file(filename, to_screen, amo): """Runs the talos tests on the given config file and generates a report. Args: filename: the name of the file to run the tests on to_screen: boolean, determine if all results should be outputed directly to stdout """ browser_config = [] tests = [] title = '' testdate = '' csv_dir = '' results_server = '' results_link = '' results = {} # Read in the profile info from the YAML config file config_file = open(filename, 'r') yaml_config = yaml.load(config_file) config_file.close() for item in yaml_config: if item == 'title': title = yaml_config[item] elif item == 'testdate': testdate = yaml_config[item] elif item == 'csv_dir': csv_dir = os.path.normpath(yaml_config[item]) if not os.path.exists(csv_dir): print "FAIL: path \"" + csv_dir + "\" does not exist" sys.exit(0) elif item == 'results_server': results_server = yaml_config[item] elif item == 'results_link' : results_link = yaml_config[item] if (results_link != results_server != ''): if not post_file.link_exists(results_server, results_link): print 'WARNING: graph server link does not exist' browser_config = {'preferences' : yaml_config['preferences'], 'extensions' : yaml_config['extensions'], 'browser_path' : yaml_config['browser_path'], 'browser_log' : yaml_config['browser_log'], 'symbols_path' : yaml_config.get('symbols_path', None), 'browser_wait' : yaml_config['browser_wait'], 'process' : yaml_config['process'], 'extra_args' : yaml_config['extra_args'], 'branch' : yaml_config['branch'], 'title' : yaml_config.get('title', ''), 'buildid' : yaml_config['buildid'], 'env' : yaml_config['env'], 'dirs' : yaml_config.get('dirs', {}), 'bundles' : yaml_config.get('bundles', {}), 'init_url' : yaml_config['init_url'], 'child_process' : yaml_config.get('child_process', 'plugin-container'), 'branch_name' : yaml_config.get('branch_name', ''), 'test_name_extension': yaml_config.get('test_name_extension', ''), 'sourcestamp' : yaml_config.get('sourcestamp', 'NULL'), 'repository' : yaml_config.get('repository', 'NULL'), 'host' : yaml_config.get('deviceip', ''), 'port' : yaml_config.get('deviceport', ''), 'webserver' : yaml_config.get('webserver', ''), 'deviceroot' : yaml_config.get('deviceroot', ''), 'remote' : yaml_config.get('remote', False), 'test_timeout' : yaml_config.get('test_timeout', 1200), 'addon_id' : yaml_config.get('addon_id', 'NULL'), 'bcontroller_config' : yaml_config.get('bcontroller_config', 'bcontroller.yml'), 'xperf_path' : yaml_config.get('xperf_path', None)} #normalize paths to work accross platforms dm = None if (browser_config['remote'] == True): import devicemanager if (browser_config['port'] == -1): import devicemanagerADB dm = devicemanagerADB.DeviceManagerADB(browser_config['host'], browser_config['port']) else: import devicemanagerSUT dm = devicemanagerSUT.DeviceManagerSUT(browser_config['host'], browser_config['port']) browser_config['browser_path'] = os.path.normpath(browser_config['browser_path']) for dir in browser_config['dirs']: browser_config['dirs'][dir] = os.path.normpath(browser_config['dirs'][dir]) for bname in browser_config['bundles']: browser_config['bundles'][bname] = os.path.normpath(browser_config['bundles'][bname]) tests = yaml_config['tests'] config_file.close() if (testdate != ''): date = int(time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) #TODO get this into own file utils.debug("using testdate: %d" % date) utils.debug("actual date: %d" % int(time.time())) print 'RETURN:s: %s' % title #pull buildid & sourcestamp from browser browser_config = browserInfo(browser_config, devicemanager = dm) if (browser_config['remote'] == True): procName = browser_config['browser_path'].split('/')[-1] if (dm.processExist(procName)): dm.killProcess(procName) utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] utils.stamped_msg("Running test " + testname, "Started") try: mytest = TTest(browser_config['remote']) browser_dump, counter_dump, print_format = mytest.runTest(browser_config, test) utils.debug("Received test results: " + " ".join(browser_dump)) results[testname] = [browser_dump, counter_dump, print_format] # If we're doing CSV, write this test immediately (bug 419367) if csv_dir != '': send_to_csv(csv_dir, {testname : results[testname]}) if to_screen or amo: send_to_csv(None, {testname : results[testname]}) except talosError, e: utils.stamped_msg("Failed " + testname, "Stopped") print 'FAIL: Busted: ' + testname print 'FAIL: ' + e.msg.replace('\n','\nRETURN:') utils.stamped_msg("Completed test " + testname, "Stopped")
remote=browser_config['remote'], test_name_extension=browser_config['test_name_extension']) # results links results_urls, results_options = configurator.output_options() talos_results.check_output_formats(results_urls, **results_options) # setup a webserver, if --develop is specified to PerfConfigurator.py httpd = None if browser_config['develop'] == True: httpd = setup_webserver(browser_config['webserver']) if httpd: httpd.start() # run the tests utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] utils.stamped_msg("Running test " + testname, "Started") if os.path.exists('logcat.log'): os.unlink('logcat.log') try: mytest = TTest(browser_config['remote']) if mytest: talos_results.add(mytest.runTest(browser_config, test)) else: utils.stamped_msg("Error found while running %s" % testname, "Error") except talosRegression, tr:
return utilOfActionType def printStateUtilities(state:State, actionUtils:dict): bestActionType = argmaxDict(actionUtils) occurrence = book.getPatternOccurrence(startPattern) print(f'{state.pattern} : ',end='') print(f'bull {actionUtils[ActionType.BULL]: 4.2f} ',end='') print(f'bear {actionUtils[ActionType.BEAR]: 4.2f} ', end='') print(f'none {actionUtils[ActionType.NONE]: 4.2f} ', end='') print(f'({occurrence}) choose {bestActionType}') if __name__ == '__main__': FOLDER, FILE_NAME = argv[1], argv[2] conf.renkoSnapMode = FOLDER print('Dataset file:', FOLDER, FILE_NAME) print(conf.getStringInfo()) startTimer() dataset = loadSequence(FOLDER, FILE_NAME) book = craftBook(dataset, conf.window, True) for startPattern in book.counterOf.keys(): startState = State.create(book, startPattern, PositionType.NONE) utilOfActionType = getStateUtilityDict(startState) printStateUtilities(startState, utilOfActionType) print() print(f'execution time: {timeSinceStart():.2f} s' )
success = utils.error( "could not understand this buildAction: %s" % buildAction ) return success # # "main" action starts here # # TODO: all the rest should go into main(). But here I am not # sure - maybe some of those variables are actually MEANT to # be used in other modules. Put this back for now # but as a temporary solution rename variables to mainXXX # where it is safe so there are less redefinitions in inner scopes utils.startTimer("Emerge") mainBuildAction = "all" packageName = None doPretend = False outDateVCS = False outDatePackage = False stayQuiet = False disableHostBuild = False disableTargetBuild = False ignoreInstalled = False updateAll = False continueFlag = False if len( sys.argv ) < 2: usage() utils.die("")
def doExec( package, action, continueFlag = False ): utils.startTimer( "%s for %s" % ( action, package ), 1 ) EmergeDebug.info("Action: %s for %s" % (action, package)) ret = package.execute( action ) utils.stopTimer( "%s for %s" % ( action, package ) ) return ret or continueFlag
# results links results_urls, results_options = configurator.output_options() talos_results.check_output_formats(results_urls, **results_options) results_log = browser_config['results_log'] # setup a webserver, if --develop is specified to PerfConfigurator.py httpd = None if browser_config['develop'] == True: httpd = setup_webserver(browser_config['webserver']) if httpd: httpd.start() # run the tests utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] test['browser_log'] = browser_config['browser_log'] utils.stamped_msg("Running test " + testname, "Started") if os.path.exists('logcat.log'): os.unlink('logcat.log') try: mytest = TTest(browser_config['remote']) if mytest: talos_results.add(mytest.runTest(browser_config, test)) else: utils.stamped_msg("Error found while running %s" % testname,
def test_file(filename): """Runs the talos tests on the given config file and generates a report. Args: filename: the name of the file to run the tests on """ browser_config = [] tests = [] title = '' testdate = '' csv_dir = '' results_server = '' results_link = '' results = {} # Read in the profile info from the YAML config file config_file = open(filename, 'r') yaml_config = yaml.load(config_file) config_file.close() for item in yaml_config: if item == 'title': title = yaml_config[item] elif item == 'testdate': testdate = yaml_config[item] elif item == 'csv_dir': csv_dir = os.path.normpath(yaml_config[item]) if not os.path.exists(csv_dir): print "FAIL: path \"" + csv_dir + "\" does not exist" sys.exit(0) elif item == 'results_server': results_server = yaml_config[item] elif item == 'results_link' : results_link = yaml_config[item] if (results_link != results_server != ''): if not post_file.link_exists(results_server, results_link): sys.exit(0) browser_config = {'preferences' : yaml_config['preferences'], 'extensions' : yaml_config['extensions'], 'browser_path' : yaml_config['browser_path'], 'browser_log' : yaml_config['browser_log'], 'symbols_path' : yaml_config.get('symbols_path', None), 'browser_wait' : yaml_config['browser_wait'], 'process' : yaml_config['process'], 'extra_args' : yaml_config['extra_args'], 'branch' : yaml_config['branch'], 'buildid' : yaml_config['buildid'], 'env' : yaml_config['env'], 'dirs' : yaml_config['dirs'], 'init_url' : yaml_config['init_url']} if 'branch_name' in yaml_config: browser_config['branch_name'] = yaml_config['branch_name'] if 'test_name_extension' in yaml_config: browser_config['test_name_extension'] = yaml_config['test_name_extension'] else: browser_config['test_name_extension'] = '' #normalize paths to work accross platforms browser_config['browser_path'] = os.path.normpath(browser_config['browser_path']) for dir in browser_config['dirs']: browser_config['dirs'][dir] = os.path.normpath(browser_config['dirs'][dir]) tests = yaml_config['tests'] config_file.close() if (testdate != ''): date = int(time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) #TODO get this into own file utils.debug("using testdate: %d" % date) utils.debug("actual date: %d" % int(time.time())) print 'RETURN:s: %s' % title #pull buildid & sourcestamp from browser browser_config = browserInfo(browser_config) if ffprocess.checkAllProcesses(browser_config['process']): print "FAIL: all firefox processes must be closed before tests can be run" sys.exit(0) utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] utils.stamped_msg("Running test " + testname, "Started") try: browser_dump, counter_dump, print_format = ttest.runTest(browser_config, test) utils.debug("Received test results: " + " ".join(browser_dump)) results[testname] = [browser_dump, counter_dump, print_format] # If we're doing CSV, write this test immediately (bug 419367) if csv_dir != '': send_to_csv(csv_dir, {testname : results[testname]}) except talosError, e: utils.stamped_msg("Failed " + testname, "Stopped") print 'FAIL: Busted: ' + testname print 'FAIL: ' + e.msg.replace('\n','\nRETURN:') utils.stamped_msg("Completed test " + testname, "Stopped")
def test_file(filename, to_screen): """Runs the talos tests on the given config file and generates a report. Args: filename: the name of the file to run the tests on to_screen: boolean, determine if all results should be outputed directly to stdout """ browser_config = [] tests = [] title = '' testdate = '' csv_dir = '' results_server = '' results_link = '' results = {} # Read in the profile info from the YAML config file config_file = open(filename, 'r') yaml_config = yaml.load(config_file) config_file.close() for item in yaml_config: if item == 'title': title = yaml_config[item] elif item == 'testdate': testdate = yaml_config[item] elif item == 'csv_dir': csv_dir = os.path.normpath(yaml_config[item]) if not os.path.exists(csv_dir): print "FAIL: path \"" + csv_dir + "\" does not exist" sys.exit(0) elif item == 'results_server': results_server = yaml_config[item] elif item == 'results_link': results_link = yaml_config[item] if (results_link != results_server != ''): if not post_file.link_exists(results_server, results_link): sys.exit(0) browser_config = { 'preferences': yaml_config['preferences'], 'extensions': yaml_config['extensions'], 'browser_path': yaml_config['browser_path'], 'browser_log': yaml_config['browser_log'], 'symbols_path': yaml_config.get('symbols_path', None), 'browser_wait': yaml_config['browser_wait'], 'process': yaml_config['process'], 'extra_args': yaml_config['extra_args'], 'branch': yaml_config['branch'], 'buildid': yaml_config['buildid'], 'env': yaml_config['env'], 'dirs': yaml_config.get('dirs', {}), 'bundles': yaml_config.get('bundles', {}), 'init_url': yaml_config['init_url'] } if 'child_process' in yaml_config: browser_config['child_process'] = yaml_config['child_process'] else: browser_config['child_process'] = 'plugin-container' if 'branch_name' in yaml_config: browser_config['branch_name'] = yaml_config['branch_name'] if 'test_name_extension' in yaml_config: browser_config['test_name_extension'] = yaml_config[ 'test_name_extension'] else: browser_config['test_name_extension'] = '' if 'sourcestamp' in yaml_config: browser_config['sourcestamp'] = yaml_config['sourcestamp'] if 'repository' in yaml_config: browser_config['repository'] = yaml_config['repository'] if 'deviceip' in yaml_config: browser_config['host'] = yaml_config['deviceip'] else: browser_config['host'] = '' if 'deviceport' in yaml_config: browser_config['port'] = yaml_config['deviceport'] else: browser_config['port'] = '' if 'webserver' in yaml_config: browser_config['webserver'] = yaml_config['webserver'] else: browser_config['webserver'] = '' if 'deviceroot' in yaml_config: browser_config['deviceroot'] = yaml_config['deviceroot'] else: browser_config['deviceroot'] = '' if 'remote' in yaml_config: browser_config['remote'] = yaml_config['remote'] else: browser_config['remote'] = False #normalize paths to work accross platforms dm = None if (browser_config['remote'] == True): import devicemanager dm = devicemanager.DeviceManager(browser_config['host'], browser_config['port']) browser_config['browser_path'] = os.path.normpath( browser_config['browser_path']) for dir in browser_config['dirs']: browser_config['dirs'][dir] = os.path.normpath( browser_config['dirs'][dir]) for bname in browser_config['bundles']: browser_config['bundles'][bname] = os.path.normpath( browser_config['bundles'][bname]) tests = yaml_config['tests'] config_file.close() if (testdate != ''): date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) #TODO get this into own file utils.debug("using testdate: %d" % date) utils.debug("actual date: %d" % int(time.time())) print 'RETURN:s: %s' % title #pull buildid & sourcestamp from browser browser_config = browserInfo(browser_config, devicemanager=dm) utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] utils.stamped_msg("Running test " + testname, "Started") try: mytest = TTest(browser_config['remote']) browser_dump, counter_dump, print_format = mytest.runTest( browser_config, test) utils.debug("Received test results: " + " ".join(browser_dump)) results[testname] = [browser_dump, counter_dump, print_format] # If we're doing CSV, write this test immediately (bug 419367) if csv_dir != '': send_to_csv(csv_dir, {testname: results[testname]}) if to_screen: send_to_csv(None, {testname: results[testname]}) except talosError, e: utils.stamped_msg("Failed " + testname, "Stopped") print 'FAIL: Busted: ' + testname print 'FAIL: ' + e.msg.replace('\n', '\nRETURN:') utils.stamped_msg("Completed test " + testname, "Stopped")
def test_file(filename): """Runs the talos tests on the given config file and generates a report. Args: filename: the name of the file to run the tests on """ browser_config = [] tests = [] title = '' testdate = '' csv_dir = '' results_server = '' results_link = '' old_results_server = '' old_results_link = '' results = {} # Read in the profile info from the YAML config file config_file = open(filename, 'r') yaml_config = yaml.load(config_file) config_file.close() for item in yaml_config: if item == 'title': title = yaml_config[item] elif item == 'testdate': testdate = yaml_config[item] elif item == 'csv_dir': csv_dir = os.path.normpath(yaml_config[item]) if not os.path.exists(csv_dir): print "FAIL: path \"" + csv_dir + "\" does not exist" sys.exit(0) elif item == 'results_server': results_server = yaml_config[item] elif item == 'results_link': results_link = yaml_config[item] elif item == 'old_results_server': old_results_server = yaml_config[item] elif item == 'old_results_link': old_results_link = yaml_config[item] if (results_link != results_server != ''): if not post_file.link_exists(results_server, results_link): sys.exit(0) if (old_results_link != old_results_server != ''): if not post_file.link_exists(old_results_server, old_results_link): sys.exit(0) browser_config = { 'preferences': yaml_config['preferences'], 'extensions': yaml_config['extensions'], 'browser_path': yaml_config['browser_path'], 'browser_log': yaml_config['browser_log'], 'symbols_path': yaml_config.get('symbols_path', None), 'browser_wait': yaml_config['browser_wait'], 'process': yaml_config['process'], 'extra_args': yaml_config['extra_args'], 'branch': yaml_config['branch'], 'buildid': yaml_config['buildid'], 'profile_path': yaml_config['profile_path'], 'env': yaml_config['env'], 'dirs': yaml_config['dirs'], 'init_url': yaml_config['init_url'] } if 'branch_name' in yaml_config: browser_config['branch_name'] = yaml_config['branch_name'] if 'test_name_extension' in yaml_config: browser_config['test_name_extension'] = yaml_config[ 'test_name_extension'] else: browser_config['test_name_extension'] = '' #normalize paths to work accross platforms browser_config['browser_path'] = os.path.normpath( browser_config['browser_path']) if browser_config['profile_path'] != {}: browser_config['profile_path'] = os.path.normpath( browser_config['profile_path']) for dir in browser_config['dirs']: browser_config['dirs'][dir] = os.path.normpath( browser_config['dirs'][dir]) tests = yaml_config['tests'] config_file.close() if (testdate != ''): date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) #TODO get this into own file utils.debug("using testdate: %d" % date) utils.debug("actual date: %d" % int(time.time())) print 'RETURN:s: %s' % title #pull buildid & sourcestamp from browser browser_config = browserInfo(browser_config) utils.startTimer() utils.stamped_msg(title, "Started") for test in tests: testname = test['name'] utils.stamped_msg("Running test " + testname, "Started") try: browser_dump, counter_dump = ttest.runTest(browser_config, test) utils.debug("Received test results: " + " ".join(browser_dump)) results[testname] = [browser_dump, counter_dump] # If we're doing CSV, write this test immediately (bug 419367) if csv_dir != '': send_to_csv(csv_dir, {testname: results[testname]}) except talosError, e: utils.stamped_msg("Failed " + testname, "Stopped") print 'FAIL: Busted: ' + testname print 'FAIL: ' + e.msg utils.stamped_msg("Completed test " + testname, "Stopped")