def run(self): params = pytaf_utils.get_params(self.config, self.test) if params != None: try: from pytaf import Pytaf pytaf_instance = Pytaf() ''' instantiate the Pytaf.do_test method using reflection and then invoke it with parameters ''' method_to_call = getattr(Pytaf, 'do_test') result = method_to_call(pytaf_instance, self.modules, self.settings, self.test, params) if result == True: self.manager.passed() else: self.manager.failed() amt = float(random.random() * self.throttle_rate) time.sleep(amt) return result except Exception as inst: print('LoadRunner.run exception: %s' % str(inst)) time.sleep(float(self.throttle_rate))
def setup(self, args): # Parse command line options parser = optparse.OptionParser() parser.add_option("-b", "--browser", default=None, type="string") parser.add_option("-c", "--config_file", default=None, type="string") parser.add_option("-d", "--db", default="false", type="string") parser.add_option("-e", "--excluded", default=None, type="string") parser.add_option("-l", "--logfile", default=None, type="string") parser.add_option("-m", "--modules", default=None, type="string") parser.add_option("-s", "--selenium_server", default=None, type="string") parser.add_option("-t", "--test", default=None, type="string") parser.add_option("-u", "--url", default=None, type="string") parser.add_option("-y", "--test_type", default=None, type="string") parser.add_option("-z", "--loadtest_settings", default=None, type="string") options, args_out = parser.parse_args(args) if options.config_file == None: print("-c (--config_file) is required") sys.exit(-1) if options.logfile != None: """ redirect stdout to the logfile """ the_file = open(options.logfile, "a", 0) sys.stdout = the_file if options.excluded != None: """ build a list of tests explicitly excluded from the command-line option """ excluded_list = options.excluded.split(",") else: excluded_list = None """ load the config file """ try: config_path = os.getenv("PYTAF_HOME") + os.sep + "config" + os.sep the_file = open("%s%s" % (config_path, options.config_file), "r").read() config = json.loads(the_file) except: print(pytaf_utils.formatExceptionInfo()) print("problem with config file %s%s" % (config_path, options.config_file)) sys.exit() try: # try to open the default db_config file the_file = open("%s%s" % (config_path, "db_config.json"), "r").read() db_config = json.loads(the_file) except: db_config = {} # command-line -u overrides config file for url if options.url != None: config["settings"]["url"] = options.url # for browser tests # command-line -b overrides config file for browser # can be in the form of 'Firefox' or 'Firefox,10,WINDOWS' if options.browser != None: config["settings"]["browser"] = options.browser # command-line -s option overrides the selenium_server # host and port settings if options.selenium_server != None: if options.selenium_server.find(":") >= 0: sel_list = options.selenium_server.split(":") config["settings"]["selenium_host"] = sel_list[0] config["settings"]["selenium_port"] = int(sel_list[1]) else: config["settings"]["selenium_host"] = options.selenium_server # reset the settings object for passing on to test methods settings = config["settings"] # dynamically import all modules found in the config file if options.modules == None: modules_array = pytaf_utils.get_all_modules(config) else: # only load the specified module(s) from the config file modules_array = options.modules.split(",") if DEBUG: print("modules: %s" % modules_array) mapped_modules = map(__import__, modules_array) passed = 0 failed = 0 if options.test_type == "load": """ the command-line may override load_test_settings with -z --loadtest_settings in the form of duration:max_threads:ramp_steps:ramp_interval:throttle_rate e.g. 3600,500,10,30,1 which would run the load test for 1 hour (3600 seconds) ramping up to a total of 500 threads in 10 steps (each step would add 50 threads (500/10)) and these batches of threads would be added in 30 second installments (approximately) the final value (throttle_rate=1) is used to brake the entire load test operation by sleeping for that amount (in seconds) between chunks of test case allocations """ if options.loadtest_settings != None: load_list = options.loadtest_settings.split(",") if len(load_list) == 5: config["settings"]["load_test_settings"] = { "duration": int(load_list[0]), "max_threads": int(load_list[1]), "ramp_steps": int(load_list[2]), "ramp_interval": int(load_list[3]), "throttle_rate": int(load_list[4]), } else: print("load test settings are not complete.") print("they must be in the form of" "duration:max_threads:ramp_steps:ramp_interval:throttle_rate") sys.exit(-1) # now start the load test passed, failed = self.do_load_test(mapped_modules, config) # if --test is specified, try and get the params and run each one elif options.test != None: test_list = options.test.split(",") for i in range(0, len(test_list)): test = test_list[i] if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params == None: print("could not find params for test %s" % test) sys.exit() else: status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: print("%s is on the excluded list" % test) # if --test is not specified, collect and run all the # tests in the config file else: tests = pytaf_utils.get_all_tests(config, mapped_modules) for test in tests: if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params != None: status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: print("%s is on the excluded list" % test) print("---------------") print("Tests Run: %s" % (passed + failed)) print("Passed: %s" % passed) print("Failed: %s" % failed) print_results = [] for result in self.results: print_results.append(result["status"] + " " + result["test_method"]) for result in sorted(print_results): print(result) # post results to the database if pytaf_utils.str2bool(options.db) == True: pytaf_utils.post_results(self.results, settings, db_config, passed, failed)
def setup(self, args): # Parse command line options parser = PassThroughOptionParser() parser.add_option('-a', '--virtual_config_file', default=None, type='string') parser.add_option('-b', '--browser', default=None, type='string') parser.add_option('-c', '--config_file', default=None, type='string') parser.add_option('-d', '--db', default="false", type='string') parser.add_option('-e', '--excluded', default=None, type='string') parser.add_option('-g', '--grid_address', default=None, type='string') parser.add_option('-i', '--override_settings', default=None, type='string') # override global settings with this comma-separated, key:value string parser.add_option('-l', '--logfile', default=None, type='string') parser.add_option('-m', '--modules', default=None, type='string') parser.add_option('-o', '--override_params', default=None, type='string') # override specific test params with this comma-separated, key:value string parser.add_option('-s', '--settings', default=None, type='string') parser.add_option('-t', '--test', default=None, type='string') parser.add_option('-u', '--url', default=None, type='string') parser.add_option('-y', '--test_type', default=None, type='string') parser.add_option('-z', '--loadtest_settings', default=None, type='string') options, args_out = parser.parse_args(args) self.override_params = options.override_params self.override_settings = options.override_settings global_settings = options.settings global_settings_config = None config = {} sub_configs = [] test_overrides = {} if options.config_file == None and options.virtual_config_file == None and options.settings == None: print '-c (--config_file) or -a (--virtual_config_file) or -s (--settings) is required' sys.exit(2) if options.logfile != None: ''' redirect stdout to the logfile ''' f = open(options.logfile, "a", 0) sys.stdout = f if options.excluded != None: ''' build a list of tests explicitly excluded from the command-line option ''' excluded_list = options.excluded.split(',') else: excluded_list = None ''' load the config file ''' # optional global settings config file # can be a comma-separated list, in which case the global_settings config is the first item # and any number of config files can be added to it, merging fields as needed # global settings override all sub_config settings, but are overridden by command-line -i overrides # tests['includes'] and tests['excludes'] in sub_configs are simply added together # an exclude in any sub_config will override any include of the same test elsewhere in the merged configs if global_settings != None: gsettings = global_settings.split(",") config_path = os.getenv('PYTAFHOME') + os.sep + "config" + os.sep #print gsettings # first import the "global" or top-most, settings file f = open("%s%s" % (config_path, gsettings[0]), 'r').read() global_settings_config = json.loads(f) test_overrides = global_settings_config.get('test_overrides', {}) #print test_overrides #print global_settings_config for i in range(1, len(gsettings)): sub_configs.append(gsettings[i]) # handle the virtual config file try: if options.virtual_config_file != None: #print options.virtual_config_file config = json.loads(options.virtual_config_file) db_config = {} elif options.config_file != None: config_path = os.getenv('PYTAF_HOME') + os.sep + "config" + os.sep f = open("%s%s" % (config_path,options.config_file), 'r').read() config = json.loads(f) # allow for the possibility of nested config files new_settings = {} # allow for an import from override config files config_to_import = config.get('import', None) if config_to_import != None: sub_configs.append(config_to_import) if global_settings_config != None and len(sub_configs) > 0: top_config = global_settings_config else: top_config = config # {} at this point # import each sub_config and merge if len(sub_configs) > 0: for sub_config in sub_configs: #print "sub_config file = %s" % sub_config f = open("%s%s" % (config_path, sub_config), 'r').read() imported_config = json.loads(f) # let any original config 'settings' override the imported config's settings by merging the dictionaries if imported_config.has_key('settings'): new_settings = dict(imported_config['settings'].items() + top_config['settings'].items()) # if there's a global settings config, it will trump all settings if global_settings_config != None: new_settings = dict(new_settings.items() + global_settings_config['settings'].items()) # the new, merged config's settings config['settings'] = new_settings try: config['settings']['config_file'] += "," + sub_config except: # if not already there, initialize it config['settings']['config_file'] = gsettings[0] + "," + sub_config top_config['settings'] = config['settings'] # update this as well if imported_config.has_key('tests'): #print "merge the tests includes sections" if config.has_key('tests') == False: config['tests'] = {} try: config['tests']['includes'] += imported_config['tests']['includes'] except: config['tests']['includes'] = imported_config['tests']['includes'] if imported_config['tests'].has_key('excludes'): for exclude_element in imported_config['tests']['excludes']: for method in exclude_element['methods']: #print "adding to exclude list: %s" % method['name'] excluded_list.append(method['name']) # the params dictionary of individual tests can be overridden, e.g. # "test_overrides": # { # "test_owner_add_private_channel_invalid_code": { "bad_code" : "AAAAACHECKITOUT" } # } test_overrides = dict(list(test_overrides.items()) + list(imported_config.get("test_overrides", {}).items())) #print test_overrides # additional excludes can be added to the base config file's excluded tests list, e.g: #"additional_excludes": # [ # "test_owner_manage_subscriptions" # ] #print "look for additional excludes" additional_excludes = top_config.get('additional_excludes', []) #print "additional excludes = %s" % additional_excludes if len(excluded_list) == 0 and len(additional_excludes) > 0: #print "set excluded_list to %s" % excluded_list excluded_list = additional_excludes else: #print "else?" for j in range (len(additional_excludes)): excluded_list.append(additional_excludes[j]) #print "excluded_list now = %s" % excluded_list #print "config = %s" % config else: # if there's no overrides config but there is a global_settings config # if there's a global settings config, it will trump all settings if global_settings_config != None: #print 'global settings override' #print config['settings'] if config.has_key('settings'): new_settings = dict(config['settings'].items() + global_settings_config['settings'].items()) config['settings'] = new_settings else: config['settings'] = global_settings_config['settings'] #print "config = %s" % config except: print pytaf_utils.formatExceptionInfo() if len(sub_configs) > 0: cf = gsettings else: cf = options.config_file print "JSON problem in a config file: %s" % cf sys.exit(2) config['settings']['config_file'] = options.config_file try: # try to open the default db_config file f2 = open("%s%s" % (config_path, "db_config.json"), 'r').read() db_config = json.loads(f2) except: db_config = {} # command-line -u overrides config file for url if options.url != None: config['settings']['url'] = options.url # for browser tests # command-line -b overrides config file for browser # can be in the form of 'Firefox' or 'Firefox,10,WINDOWS' if options.browser != None: config['settings']['browser'] = options.browser # command-line -g overrides config file setting for test_host (and optionally test_port as well) # used for selenium_grid or local selenium server if options.grid_address != None: if options.grid_address.find(":") >= 0: g = options.grid_address.split(":") config['settings']['test_host'] = g[0] config['settings']['test_port'] = int(g[1]) else: config['settings']['test_host'] = options.grid_address # reset the settings object for passing on to test methods settings = config['settings'] # initialize the root logger self.setup_logger(settings) # dynamically import all modules found in the config file if options.modules == None: modules_array = pytaf_utils.get_all_modules(config) else: # only load the specified module(s) from the config file modules_array = options.modules.split(",") logging.debug('modules: %s' % modules_array) mapped_modules = map(__import__, modules_array) passed = 0 failed = 0 if options.test_type == 'load': ''' the command-line may override load_test_settings with -z --loadtest_settings in the form of duration:max_threads:ramp_steps:ramp_interval:throttle_rate e.g. 3600,500,10,30,1 which would run the load test for 1 hour (3600 seconds) ramping up to a total of 500 threads in 10 steps (each step would add 50 threads (500/10)) and these batches of threads would be added in 30 second installments (approximately) the final value (throttle_rate=1) is used to brake the entire load test operation by sleeping for that amount (in seconds) between chunks of test case allocations ''' if options.loadtest_settings != None: p = options.loadtest_settings.split(",") if len(p) == 5: config['settings']['load_test_settings'] = \ {"duration": int(p[0]), "max_threads": int(p[1]), "ramp_steps": int(p[2]), "ramp_interval": int(p[3]), "throttle_rate": int(p[4])} else: logging.fatal('load test settings are not complete.') logging.fatal('they must be in the form of' \ 'duration:max_threads:ramp_steps:ramp_interval:throttle_rate') sys.exit(-1) # now start the load test passed, failed = self.do_load_test(mapped_modules, config) # if --test is specified, try and get the params and run each one elif options.test != None: ts = options.test.split(",") for i in range(0, len(ts)): test = ts[i] if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params == None: logging.fatal("could not find params for test %s" % test) sys.exit() else: #if test_overrides.get(test, None): # params = dict(params.items() + test_overrides[test].items()) params, settings = self.do_overrides(params, settings, test, test_overrides, self.override_settings, self.override_params) status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: logging.info("%s is on the excluded list" % test) # if --test is not specified, collect and run all the # tests in the config file else: tests = pytaf_utils.get_all_tests(config, mapped_modules) for test in tests: if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params != None: #if test_overrides.get(test, None): # params = dict(params.items() + test_overrides[test].items()) params, settings = self.do_overrides(params, settings, test, test_overrides, self.override_settings, self.override_params) status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: logging.info("%s is on the excluded list" % test) logging.info("---------------") logging.info("Tests Run: %s" % (passed + failed)) logging.info("Passed: %s" % passed) logging.info("Failed: %s" % failed) print_results = [] for r in self.results: print_results.append(r['status'] + " " + r['test_method']) for r in sorted(print_results): logging.info(r) # post results to the database if pytaf_utils.str2bool(options.db) == True: pytaf_utils.post_results(self.results, settings, db_config, passed, failed)