def setting(self): conf = configparser.ConfigParser() conf.read('./settings.ini') self.account = conf['oanda']['account_id'] self.key = conf['oanda']['api_key'] self.db_name = conf['db']['name'] self.db_driver = conf['db']['driver'] self.web_port = int(conf['web']['port']) self.trade_duration = conf['trading']['trade_duration'].lower() self.back_test = bool_from_str(conf['trading']['back_test']) self.use_percent = float(conf['trading']['use_percent']) self.past_period = int(conf['trading']['past_period']) self.stop_limit_percent = float(conf['trading']['stop_limit_percent']) self.num_ranking = int(conf['trading']['num_ranking']) return
def get_raptor_test_list(args, oskey): ''' A test ini (i.e. raptor-firefox-tp6.ini) will have one or more subtests inside, each with it's own name ([the-ini-file-test-section]). We want the ability to eiter: - run * all * of the subtests listed inside the test ini; - or - - just run a single one of those subtests that are inside the ini A test name is received on the command line. This will either match the name of a single subtest (within an ini) - or - if there's no matching single subtest with that name, then the test name provided might be the name of a test ini itself (i.e. raptor-firefox-tp6) that contains multiple subtests. First look for a single matching subtest name in the list of all availble tests, and if it's found we will just run that single subtest. Then look at the list of all available tests - each available test has a manifest name associated to it - and pull out all subtests whose manifest name matches the test name provided on the command line i.e. run all subtests in a specified ini. If no tests are found at all then the test name is invalid. ''' tests_to_run = [] # get list of all available tests for the browser we are testing against available_tests = get_browser_test_list(args.app, args.run_local) # look for single subtest that matches test name provided on cmd line for next_test in available_tests: if next_test['name'] == args.test: tests_to_run.append(next_test) break # no matches, so now look for all subtests that come from a test ini # manifest that matches the test name provided on the commmand line if len(tests_to_run) == 0: _ini = args.test + ".ini" for next_test in available_tests: head, tail = os.path.split(next_test['manifest']) if tail == _ini: # subtest comes from matching test ini file name, so add it tests_to_run.append(next_test) # go through each test and set the page-cycles and page-timeout, and some config flags # the page-cycles value in the INI can be overriden when debug-mode enabled, when # gecko-profiling enabled, or when --page-cycles cmd line arg was used (that overrides all) for next_test in tests_to_run: LOG.info("configuring settings for test %s" % next_test['name']) max_page_cycles = next_test.get('page_cycles', 1) max_browser_cycles = next_test.get('browser_cycles', 1) # if using playback, the playback recording info may need to be transformed if next_test.get('playback') is not None: next_test['playback_pageset_manifest'] = \ transform_subtest(next_test['playback_pageset_manifest'], next_test['name']) next_test['playback_recordings'] = \ transform_subtest(next_test['playback_recordings'], next_test['name']) if args.gecko_profile is True: next_test['gecko_profile'] = True LOG.info('gecko-profiling enabled') max_page_cycles = 3 max_browser_cycles = 3 if 'gecko_profile_entries' in args and args.gecko_profile_entries is not None: next_test['gecko_profile_entries'] = str( args.gecko_profile_entries) LOG.info('gecko-profiling entries set to %s' % args.gecko_profile_entries) if 'gecko_profile_interval' in args and args.gecko_profile_interval is not None: next_test['gecko_profile_interval'] = str( args.gecko_profile_interval) LOG.info('gecko-profiling interval set to %s' % args.gecko_profile_interval) if 'gecko_profile_threads' in args and args.gecko_profile_threads is not None: threads = filter( None, next_test.get('gecko_profile_threads', '').split(',')) threads.extend(args.gecko_profile_threads) next_test['gecko_profile_threads'] = ','.join(threads) LOG.info('gecko-profiling extra threads %s' % args.gecko_profile_threads) else: # if the gecko profiler is not enabled, ignore all of it's settings next_test.pop('gecko_profile_entries', None) next_test.pop('gecko_profile_interval', None) next_test.pop('gecko_profile_threads', None) if args.debug_mode is True: next_test['debug_mode'] = True LOG.info("debug-mode enabled") max_page_cycles = 2 # if --page-cycles was provided on the command line, use that instead of INI # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled if args.page_cycles is not None: next_test['page_cycles'] = args.page_cycles LOG.info("setting page-cycles to %d as specified on cmd line" % args.page_cycles) else: if int(next_test.get('page_cycles', 1)) > max_page_cycles: next_test['page_cycles'] = max_page_cycles LOG.info( "setting page-cycles to %d because gecko-profling is enabled" % next_test['page_cycles']) # if --browser-cycles was provided on the command line, use that instead of INI # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled if args.browser_cycles is not None: next_test['browser_cycles'] = args.browser_cycles LOG.info("setting browser-cycles to %d as specified on cmd line" % args.browser_cycles) else: if int(next_test.get('browser_cycles', 1)) > max_browser_cycles: next_test['browser_cycles'] = max_browser_cycles LOG.info( "setting browser-cycles to %d because gecko-profilng is enabled" % next_test['browser_cycles']) # if --page-timeout was provided on the command line, use that instead of INI if args.page_timeout is not None: LOG.info("setting page-timeout to %d as specified on cmd line" % args.page_timeout) next_test['page_timeout'] = args.page_timeout # for browsertime jobs, cold page-load mode is determined by command line argument; for # raptor-webext jobs cold page-load is determined by the 'cold' key in test manifest INI _running_cold = False if args.browsertime is True: if args.cold is True: _running_cold = True else: # running warm page-load so ignore browser-cycles if it was provided (set to 1) next_test['browser_cycles'] = 1 else: if next_test.get("cold", "false") == "true": _running_cold = True if _running_cold: # when running in cold mode, set browser-cycles to the page-cycles value; as we want # the browser to restart between page-cycles; and set page-cycles to 1 as we only # want 1 single page-load for every browser-cycle next_test['cold'] = True next_test['expected_browser_cycles'] = int( next_test['browser_cycles']) next_test['page_cycles'] = 1 # also ensure '-cold' is in test name so perfherder results indicate warm cold-load if "-cold" not in next_test['name']: next_test['name'] += "-cold" else: # when running in warm mode, just set test-cycles to 1 and leave page-cycles as/is next_test['cold'] = False next_test['expected_browser_cycles'] = 1 # either warm or cold-mode, initialize the starting current 'browser-cycle' next_test['browser_cycle'] = 1 # if --test-url-params was provided on the command line, add the params to the test_url # provided in the INI if args.test_url_params is not None: initial_test_url = next_test['test_url'] next_test['test_url'] = add_test_url_params( initial_test_url, args.test_url_params) LOG.info( "adding extra test_url params (%s) as specified on cmd line " "to the current test_url (%s), resulting: %s" % (args.test_url_params, initial_test_url, next_test['test_url'])) if next_test.get('use_live_sites', "false") == "true": # when using live sites we want to turn off playback LOG.info("using live sites so turning playback off!") next_test['playback'] = None LOG.info("using live sites so appending '-live' to the test name") next_test['name'] = next_test['name'] + "-live" # allow a slightly higher page timeout due to remote page loads next_test['page_timeout'] = int( next_test['page_timeout']) * LIVE_SITE_TIMEOUT_MULTIPLIER LOG.info("using live sites so using page timeout of %dms" % next_test['page_timeout']) # browsertime doesn't use the 'measure' test ini setting; however just for the sake # of supporting both webext and browsertime, just provide a dummy 'measure' setting # here to prevent having to check in multiple places; it has no effect on what # browsertime actually measures; remove this when eventually we remove webext support if args.browsertime and next_test.get('measure') is None: next_test['measure'] = "fnbpaint, fcp, dcf, loadtime" # convert 'measure =' test INI line to list if next_test.get('measure') is not None: _measures = [] for m in [m.strip() for m in next_test['measure'].split(',')]: # build the 'measures =' list _measures.append(m) next_test['measure'] = _measures # if using live sites, don't measure hero element as it only exists in recordings if 'hero' in next_test['measure'] and \ next_test.get('use_live_sites', "false") == "true": # remove 'hero' from the 'measures =' list next_test['measure'].remove('hero') # remove the 'hero =' line since no longer measuring hero del next_test['hero'] if next_test.get('lower_is_better') is not None: next_test['lower_is_better'] = bool_from_str( next_test.get('lower_is_better')) if next_test.get('subtest_lower_is_better') is not None: next_test['subtest_lower_is_better'] = bool_from_str( next_test.get('subtest_lower_is_better')) # write out .json test setting files for the control server to read and send to web ext if len(tests_to_run) != 0: for test in tests_to_run: if validate_test_ini(test): write_test_settings_json(args, test, oskey) else: # test doesn't have valid settings, remove it from available list LOG.info("test %s is not valid due to missing settings" % test['name']) tests_to_run.remove(test) return tests_to_run
def get_raptor_test_list(args, oskey): """ A test ini (i.e. raptor-firefox-tp6.ini) will have one or more subtests inside, each with it's own name ([the-ini-file-test-section]). We want the ability to eiter: - run * all * of the subtests listed inside the test ini; - or - - just run a single one of those subtests that are inside the ini A test name is received on the command line. This will either match the name of a single subtest (within an ini) - or - if there's no matching single subtest with that name, then the test name provided might be the name of a test ini itself (i.e. raptor-firefox-tp6) that contains multiple subtests. First look for a single matching subtest name in the list of all availble tests, and if it's found we will just run that single subtest. Then look at the list of all available tests - each available test has a manifest name associated to it - and pull out all subtests whose manifest name matches the test name provided on the command line i.e. run all subtests in a specified ini. If no tests are found at all then the test name is invalid. """ tests_to_run = [] # get list of all available tests for the browser we are testing against available_tests = get_browser_test_list(args.app, args.run_local) # look for single subtest that matches test name provided on cmd line for next_test in available_tests: if next_test["name"] == args.test: tests_to_run.append(next_test) break # no matches, so now look for all subtests that come from a test ini # manifest that matches the test name provided on the commmand line if len(tests_to_run) == 0: _ini = args.test + ".ini" for next_test in available_tests: head, tail = os.path.split(next_test["manifest"]) if tail == _ini: # subtest comes from matching test ini file name, so add it tests_to_run.append(next_test) # enable live sites if requested with --live-sites if args.live_sites: for next_test in tests_to_run: # set use_live_sites to `true` and disable mitmproxy playback # immediately so we don't follow playback paths below next_test["use_live_sites"] = "true" next_test["playback"] = None # go through each test and set the page-cycles and page-timeout, and some config flags # the page-cycles value in the INI can be overriden when debug-mode enabled, when # gecko-profiling enabled, or when --page-cycles cmd line arg was used (that overrides all) for next_test in tests_to_run: LOG.info("configuring settings for test %s" % next_test["name"]) max_page_cycles = int(next_test.get("page_cycles", 1)) max_browser_cycles = int(next_test.get("browser_cycles", 1)) # If using playback, the playback recording info may need to be transformed. # This transformation needs to happen before the test name is changed # below (for cold tests for instance) if next_test.get("playback") is not None: next_test["playback_pageset_manifest"] = transform_subtest( next_test["playback_pageset_manifest"], next_test["name"] ) next_test["playback_recordings"] = transform_subtest( next_test["playback_recordings"], next_test["name"] ) if args.gecko_profile is True: next_test["gecko_profile"] = True LOG.info("gecko-profiling enabled") max_page_cycles = 3 max_browser_cycles = 3 if ( "gecko_profile_entries" in args and args.gecko_profile_entries is not None ): next_test["gecko_profile_entries"] = str(args.gecko_profile_entries) LOG.info( "gecko-profiling entries set to %s" % args.gecko_profile_entries ) if ( "gecko_profile_interval" in args and args.gecko_profile_interval is not None ): next_test["gecko_profile_interval"] = str(args.gecko_profile_interval) LOG.info( "gecko-profiling interval set to %s" % args.gecko_profile_interval ) if ( "gecko_profile_threads" in args and args.gecko_profile_threads is not None ): # pylint --py3k: W1639 threads = list( filter(None, next_test.get("gecko_profile_threads", "").split(",")) ) threads.extend(args.gecko_profile_threads) next_test["gecko_profile_threads"] = ",".join(threads) LOG.info( "gecko-profiling extra threads %s" % args.gecko_profile_threads ) else: # if the gecko profiler is not enabled, ignore all of it's settings next_test.pop("gecko_profile_entries", None) next_test.pop("gecko_profile_interval", None) next_test.pop("gecko_profile_threads", None) if args.debug_mode is True: next_test["debug_mode"] = True LOG.info("debug-mode enabled") max_page_cycles = 2 # if --page-cycles was provided on the command line, use that instead of INI # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled if args.page_cycles is not None: next_test["page_cycles"] = args.page_cycles LOG.info( "setting page-cycles to %d as specified on cmd line" % args.page_cycles ) else: if int(next_test.get("page_cycles", 1)) > max_page_cycles: next_test["page_cycles"] = max_page_cycles LOG.info( "setting page-cycles to %d because gecko-profling is enabled" % next_test["page_cycles"] ) # if --browser-cycles was provided on the command line, use that instead of INI # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled if args.browser_cycles is not None: next_test["browser_cycles"] = args.browser_cycles LOG.info( "setting browser-cycles to %d as specified on cmd line" % args.browser_cycles ) else: if int(next_test.get("browser_cycles", 1)) > max_browser_cycles: next_test["browser_cycles"] = max_browser_cycles LOG.info( "setting browser-cycles to %d because gecko-profilng is enabled" % next_test["browser_cycles"] ) # if --page-timeout was provided on the command line, use that instead of INI if args.page_timeout is not None: LOG.info( "setting page-timeout to %d as specified on cmd line" % args.page_timeout ) next_test["page_timeout"] = args.page_timeout _running_cold = False # check command line to see if we set cold page load from command line if args.cold or next_test.get("cold") == "true": # for raptor-webext jobs cold page-load is determined by the 'cold' key # in test manifest INI _running_cold = True else: # if it's a warm load test ignore browser_cycles if set next_test["browser_cycles"] = 1 if _running_cold: # when running in cold mode, set browser-cycles to the page-cycles value; as we want # the browser to restart between page-cycles; and set page-cycles to 1 as we only # want 1 single page-load for every browser-cycle next_test["cold"] = True next_test["expected_browser_cycles"] = int(next_test["browser_cycles"]) if args.chimera: next_test["page_cycles"] = 2 else: next_test["page_cycles"] = 1 # also ensure '-cold' is in test name so perfherder results indicate warm cold-load # Bug 1644344 we can remove this condition once we're migrated away from WebExtension if "-cold" not in next_test["name"] and not args.browsertime: next_test["name"] += "-cold" else: # when running in warm mode, just set test-cycles to 1 and leave page-cycles as/is next_test["cold"] = False next_test["expected_browser_cycles"] = 1 # either warm or cold-mode, initialize the starting current 'browser-cycle' next_test["browser_cycle"] = 1 # if --test-url-params was provided on the command line, add the params to the test_url # provided in the INI if args.test_url_params is not None: initial_test_url = next_test["test_url"] next_test["test_url"] = add_test_url_params( initial_test_url, args.test_url_params ) LOG.info( "adding extra test_url params (%s) as specified on cmd line " "to the current test_url (%s), resulting: %s" % (args.test_url_params, initial_test_url, next_test["test_url"]) ) if next_test.get("use_live_sites", "false") == "true": # when using live sites we want to turn off playback LOG.info("using live sites so turning playback off!") next_test["playback"] = None # Only for raptor-youtube-playback tests until they are removed # in favor of the browsertime variant if "raptor-youtube-playback" in next_test["name"]: next_test["name"] = next_test["name"] + "-live" # allow a slightly higher page timeout due to remote page loads next_test["page_timeout"] = ( int(next_test["page_timeout"]) * LIVE_SITE_TIMEOUT_MULTIPLIER ) LOG.info( "using live sites so using page timeout of %dms" % next_test["page_timeout"] ) if not args.browsertime and "browsertime" in next_test.get("manifest", ""): raise Exception( "%s test can only be run with --browsertime" % next_test.get("name", "Unknown") ) # browsertime doesn't use the 'measure' test ini setting; however just for the sake # of supporting both webext and browsertime, just provide a dummy 'measure' setting # here to prevent having to check in multiple places; it has no effect on what # browsertime actually measures; remove this when eventually we remove webext support if ( args.browsertime and next_test.get("measure") is None and next_test.get("type") == "pageload" ): next_test["measure"] = "fnbpaint, fcp, dcf, loadtime" # convert 'measure =' test INI line to list if next_test.get("measure") is not None: _measures = [] for measure in [m.strip() for m in next_test["measure"].split(",")]: # build the 'measures =' list _measures.append(measure) next_test["measure"] = _measures # if using live sites, don't measure hero element as it only exists in recordings if ( "hero" in next_test["measure"] and next_test.get("use_live_sites", "false") == "true" ): # remove 'hero' from the 'measures =' list next_test["measure"].remove("hero") # remove the 'hero =' line since no longer measuring hero del next_test["hero"] if next_test.get("lower_is_better") is not None: next_test["lower_is_better"] = bool_from_str( next_test.get("lower_is_better") ) if next_test.get("subtest_lower_is_better") is not None: next_test["subtest_lower_is_better"] = bool_from_str( next_test.get("subtest_lower_is_better") ) # write out .json test setting files for the control server to read and send to web ext if len(tests_to_run) != 0: for test in tests_to_run: if validate_test_ini(test): write_test_settings_json(args, test, oskey) else: # test doesn't have valid settings, remove it from available list LOG.info("test %s is not valid due to missing settings" % test["name"]) tests_to_run.remove(test) return tests_to_run
"""settings.py""" import configparser from utils import bool_from_str conf = configparser.ConfigParser() conf.read("settings.ini") photo_work_dir = conf["dir"]["photo"] text_work_dir = conf["dir"]["text"] s_size = conf["size"]["size_1"] m_size = conf["size"]["size_2"] l_size = conf["size"]["size_3"] image_sizes = [s_size, m_size, l_size] ftps_host = conf["ftps"]["server"] ftps_user = conf["ftps"]["user"] ftps_passwd = conf["ftps"]["passwd"] ftps_product_path = conf["ftps"]["product_path"] ftps_test_path = conf["ftps"]["test_path"] ftps_test_flag = bool_from_str(conf["ftps"]["test_env_flag"]) ftps_home_url = conf["ftps"]["home"] bitly_api_url = conf["bitly"]["api"] bitly_access_token = conf["bitly"]["token"]
def test_bool_from_str_with_invalid_values(invalid_value): with pytest.raises(ValueError): bool_from_str(invalid_value)
def test_bool_from_str(value, expected_result): assert expected_result == bool_from_str(value)