def test_simple_post(args={}): ''' example of json parsing from http post ''' params = args['params'] settings = args['settings'] url = params.get('url',"httpbin.org") try: errors = [] headers = {} request = '' locator = "/post" lib = ApiLib() response = lib.do_post(url, locator, request, settings, headers, False) data = json.loads(response['data']) print(data) origin = data.get('origin', None) if origin == None: errors.append("did not get expected 'origin' field") else: print("origin = %s" % origin) return pytaf_utils.verify(len(errors) == 0, 'there were errors: %s' % errors) except: return (False, pytaf_utils.formatExceptionInfo())
def test_web(args={}): try: # initialize the error strings array errors = [] # parse the global settings and test method # params from the args provided settings = args['settings'] params = args['params'] # do some selenium specific test stuff ... goto_url = params.get('url', 'http://www.google.com') lib = WebLib(settings) lib.driver.get(goto_url) element = lib.driver.find_element_by_id('gbqfq') if element == None: errors.append('did not find the google input element') else: logging.info('found the google input element') # call the utility method to verify the absence or errors or # pack up the error messages if any return pytaf_utils.verify(len(errors) == 0, 'there were errors: %s' % errors) except Exception as inst: logging.error(inst) # fail on any exception and include a stack trace return (False, pytaf_utils.formatExceptionInfo()) finally: # cleanup lib.driver.quit()
def test_api(args={}): ''' generic test showing params and settings extraction from args ''' settings = args['settingss'] params = args['params'] try: apilib = ApiLib() print(settings, params) apilib.some_function() return (True, '') except: return (False, pytaf_utils.formatExceptionInfo())
def test_api(args={}): # required test_config fields settings = args['settings'] params = args['params'] var = params.get('var', 'default') logging.info("var = %s" % var) logging.info("settings = %s" % settings) url = settings['url'] try: apilib = ApiLib() apilib.some_function() return (True, '') except: return (False, pytaf_utils.formatExceptionInfo())
def test_simple_get(args={}): ''' example of json parsing from http get http://httpbin.org/ It echoes the data used in your request for any of these types: http://httpbin.org/ip Returns Origin IP. http://httpbin.org/user-agent Returns user-agent. http://httpbin.org/headers Returns header dict. http://httpbin.org/get Returns GET data. http://httpbin.org/post Returns POST data. http://httpbin.org/put Returns PUT data. http://httpbin.org/delete Returns DELETE data http://httpbin.org/gzip Returns gzip-encoded data. http://httpbin.org/status/:code Returns given HTTP Status code. http://httpbin.org/redirect/:n 302 Redirects n times. http://httpbin.org/cookies Returns cookie data. http://httpbin.org/cookies/set/:name/:value Sets a simple cookie. http://httpbin.org/basic-auth/:user/:passwd Challenges HTTPBasic Auth. http://httpbin.org/hidden-basic-auth/:user/:passwd 404'd BasicAuth. http://httpbin.org/stream/:n Streams n–100 lines. http://httpbin.org/delay/:n Delays responding for n–10 seconds. ''' params = args['params'] settings = args['settings'] url = params.get('url',"httpbin.org") try: errors = [] headers = {} locator = "/get" lib = ApiLib() response = lib.do_get(url, locator, settings, headers, False) data = json.loads(response['data']) print(data) origin = data.get('origin', None) if origin == None: errors.append("did not get expected 'origin' field") else: print("origin = %s" % origin) return pytaf_utils.verify(len(errors) == 0, 'there were errors: %s' % errors) except: return (False, pytaf_utils.formatExceptionInfo())
def __init__(self, settings): browser = settings.get('browser', '*firefox,10,Windows') browser_fields = browser.split(',') if len(browser_fields) == 3: browser_name = browser_fields[0] browser_version = browser_fields[1] browser_platform = browser_fields[2] else: browser_name = browser_fields[0] browser_version = '' browser_platform = '' selenium_host = settings.get('selenium_host', 'localhost') selenium_port = settings.get('selenium_port', '4444') self.driver = None webdriver_url = "http://%s:%s/wd/hub" % (selenium_host, selenium_port) logging.info("webdriver_url = %s" % webdriver_url) try: if browser_name.find("firefox") >= 0 or browser_name.find("*chrome") >= 0: dc = DesiredCapabilities.FIREFOX elif browser_name.find("*googlechrome") >= 0: dc = DesiredCapabilities.CHROME dc["chrome.switches"] = ["--ignore-certificate-errors"] elif browser_name.find("*mock") >= 0: dc = DesiredCapabilities.HTMLUNIT elif browser_name.find("android") >= 0: dc = DesiredCapabilities.ANDROID elif browser_name.find("*ie") >= 0: dc = DesiredCapabilities.INTERNETEXPLORER else: # default to Firefox dc = DesiredCapabilities.FIREFOX if browser_version != '': dc['version'] = browser_version if browser_platform != '': dc['platform'] = browser_platform self.driver = webdriver.Remote(webdriver_url, dc) except: print(pytaf_utils.formatExceptionInfo())
def test_html_get(args={}): ''' example using BeautifulSoup to parse html ''' params = args['params'] settings = args['settings'] url = params.get('url',"www.google.com") try: errors = [] headers = {} locator = "/index.html" lib = ApiLib() response = lib.do_get(url, locator, settings, headers, False) data = response['data'] soup = BeautifulSoup(data) divs = soup.findAll('div') for div in divs: try: print("div style = %s" % div['style']) except: pass return pytaf_utils.verify(len(errors) == 0, 'there were errors: %s' % errors) except: return (False, pytaf_utils.formatExceptionInfo())
def test_html_get(args={}): ''' example using BeautifulSoup to parse html ''' settings = args['settings'] url = settings['url'] try: errors = [] headers = {} url = "www.google.com" u = "/index.html" lib = ApiLib() response = lib.do_get(url, u, args['settings'], headers, False) data = response['data'] soup = BeautifulSoup(data) divs = soup.findAll('div') for d in divs: try: logging.info("div style = %s" % d['style']) except: pass return pytaf_utils.verify(len(errors) == 0, 'there were errors: %s' % errors) except: return (False, pytaf_utils.formatExceptionInfo())
def setup(self, args): # Parse command line options parser = optparse.OptionParser() parser.add_option("-b", "--browser", default=None, type="string") parser.add_option("-c", "--config_file", default=None, type="string") parser.add_option("-d", "--db", default="false", type="string") parser.add_option("-e", "--excluded", default=None, type="string") parser.add_option("-l", "--logfile", default=None, type="string") parser.add_option("-m", "--modules", default=None, type="string") parser.add_option("-s", "--selenium_server", default=None, type="string") parser.add_option("-t", "--test", default=None, type="string") parser.add_option("-u", "--url", default=None, type="string") parser.add_option("-y", "--test_type", default=None, type="string") parser.add_option("-z", "--loadtest_settings", default=None, type="string") options, args_out = parser.parse_args(args) if options.config_file == None: print("-c (--config_file) is required") sys.exit(-1) if options.logfile != None: """ redirect stdout to the logfile """ the_file = open(options.logfile, "a", 0) sys.stdout = the_file if options.excluded != None: """ build a list of tests explicitly excluded from the command-line option """ excluded_list = options.excluded.split(",") else: excluded_list = None """ load the config file """ try: config_path = os.getenv("PYTAF_HOME") + os.sep + "config" + os.sep the_file = open("%s%s" % (config_path, options.config_file), "r").read() config = json.loads(the_file) except: print(pytaf_utils.formatExceptionInfo()) print("problem with config file %s%s" % (config_path, options.config_file)) sys.exit() try: # try to open the default db_config file the_file = open("%s%s" % (config_path, "db_config.json"), "r").read() db_config = json.loads(the_file) except: db_config = {} # command-line -u overrides config file for url if options.url != None: config["settings"]["url"] = options.url # for browser tests # command-line -b overrides config file for browser # can be in the form of 'Firefox' or 'Firefox,10,WINDOWS' if options.browser != None: config["settings"]["browser"] = options.browser # command-line -s option overrides the selenium_server # host and port settings if options.selenium_server != None: if options.selenium_server.find(":") >= 0: sel_list = options.selenium_server.split(":") config["settings"]["selenium_host"] = sel_list[0] config["settings"]["selenium_port"] = int(sel_list[1]) else: config["settings"]["selenium_host"] = options.selenium_server # reset the settings object for passing on to test methods settings = config["settings"] # dynamically import all modules found in the config file if options.modules == None: modules_array = pytaf_utils.get_all_modules(config) else: # only load the specified module(s) from the config file modules_array = options.modules.split(",") if DEBUG: print("modules: %s" % modules_array) mapped_modules = map(__import__, modules_array) passed = 0 failed = 0 if options.test_type == "load": """ the command-line may override load_test_settings with -z --loadtest_settings in the form of duration:max_threads:ramp_steps:ramp_interval:throttle_rate e.g. 3600,500,10,30,1 which would run the load test for 1 hour (3600 seconds) ramping up to a total of 500 threads in 10 steps (each step would add 50 threads (500/10)) and these batches of threads would be added in 30 second installments (approximately) the final value (throttle_rate=1) is used to brake the entire load test operation by sleeping for that amount (in seconds) between chunks of test case allocations """ if options.loadtest_settings != None: load_list = options.loadtest_settings.split(",") if len(load_list) == 5: config["settings"]["load_test_settings"] = { "duration": int(load_list[0]), "max_threads": int(load_list[1]), "ramp_steps": int(load_list[2]), "ramp_interval": int(load_list[3]), "throttle_rate": int(load_list[4]), } else: print("load test settings are not complete.") print("they must be in the form of" "duration:max_threads:ramp_steps:ramp_interval:throttle_rate") sys.exit(-1) # now start the load test passed, failed = self.do_load_test(mapped_modules, config) # if --test is specified, try and get the params and run each one elif options.test != None: test_list = options.test.split(",") for i in range(0, len(test_list)): test = test_list[i] if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params == None: print("could not find params for test %s" % test) sys.exit() else: status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: print("%s is on the excluded list" % test) # if --test is not specified, collect and run all the # tests in the config file else: tests = pytaf_utils.get_all_tests(config, mapped_modules) for test in tests: if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params != None: status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: print("%s is on the excluded list" % test) print("---------------") print("Tests Run: %s" % (passed + failed)) print("Passed: %s" % passed) print("Failed: %s" % failed) print_results = [] for result in self.results: print_results.append(result["status"] + " " + result["test_method"]) for result in sorted(print_results): print(result) # post results to the database if pytaf_utils.str2bool(options.db) == True: pytaf_utils.post_results(self.results, settings, db_config, passed, failed)
def do_test(self, modules, settings, test, params): result = (False, "error") start_time = end_time = elapsed_time = 0 found_module = None test_was_found = False for module in modules: try: if DEBUG: print("do test %s from module %s" % (test, module)) method_to_call = getattr(module, test) found_module = str(module) test_was_found = True start_time = int(time.time()) print("------------\n starting test: %s" % test) print(" start time: %s" % datetime.datetime.now()) print("------------") args = {"settings": settings, "params": params} result = method_to_call(args) end_time = int(time.time()) elapsed_time = end_time - start_time except Exception as inst: if DEBUG: print("exception from methodToCall: %s" % sys.exc_info()[0]) continue if test_was_found == False: print( "error: pytaf did not find the test case (%s) \ in the modules defined in the config file (%s)" % (test, str(modules)) ) return # tests return (True|False, String) error_message = str(result[1]) # could be Exception, hence the cast status = result[0] try: if status == True: # any return value except False is PASSED status_string = "PASSED" else: status_string = "FAILED" module_string = "" if found_module != None: idx1 = found_module.rfind(os.sep) + 1 idx2 = found_module.find(".py") module_string = found_module[idx1:idx2] self.results.append( {"test_method": test, "status": status_string, "message": error_message[:1024], "module": module_string} ) if status != False: result_str = "RESULT ===> PASSED: %s" % test else: result_str = "RESULT ===> FAILED: %s, %s" % (test, pytaf_utils.anystring_as_utf8(error_message)) if elapsed_time > 0: result_str = "%s, elapsed time: %s seconds" % (result_str, str(elapsed_time)) print("%s\n---------------" % result_str) except: print(pytaf_utils.formatExceptionInfo()) return status
def setup(self, args): # Parse command line options parser = PassThroughOptionParser() parser.add_option('-a', '--virtual_config_file', default=None, type='string') parser.add_option('-b', '--browser', default=None, type='string') parser.add_option('-c', '--config_file', default=None, type='string') parser.add_option('-d', '--db', default="false", type='string') parser.add_option('-e', '--excluded', default=None, type='string') parser.add_option('-g', '--grid_address', default=None, type='string') parser.add_option('-i', '--override_settings', default=None, type='string') # override global settings with this comma-separated, key:value string parser.add_option('-l', '--logfile', default=None, type='string') parser.add_option('-m', '--modules', default=None, type='string') parser.add_option('-o', '--override_params', default=None, type='string') # override specific test params with this comma-separated, key:value string parser.add_option('-s', '--settings', default=None, type='string') parser.add_option('-t', '--test', default=None, type='string') parser.add_option('-u', '--url', default=None, type='string') parser.add_option('-y', '--test_type', default=None, type='string') parser.add_option('-z', '--loadtest_settings', default=None, type='string') options, args_out = parser.parse_args(args) self.override_params = options.override_params self.override_settings = options.override_settings global_settings = options.settings global_settings_config = None config = {} sub_configs = [] test_overrides = {} if options.config_file == None and options.virtual_config_file == None and options.settings == None: print '-c (--config_file) or -a (--virtual_config_file) or -s (--settings) is required' sys.exit(2) if options.logfile != None: ''' redirect stdout to the logfile ''' f = open(options.logfile, "a", 0) sys.stdout = f if options.excluded != None: ''' build a list of tests explicitly excluded from the command-line option ''' excluded_list = options.excluded.split(',') else: excluded_list = None ''' load the config file ''' # optional global settings config file # can be a comma-separated list, in which case the global_settings config is the first item # and any number of config files can be added to it, merging fields as needed # global settings override all sub_config settings, but are overridden by command-line -i overrides # tests['includes'] and tests['excludes'] in sub_configs are simply added together # an exclude in any sub_config will override any include of the same test elsewhere in the merged configs if global_settings != None: gsettings = global_settings.split(",") config_path = os.getenv('PYTAFHOME') + os.sep + "config" + os.sep #print gsettings # first import the "global" or top-most, settings file f = open("%s%s" % (config_path, gsettings[0]), 'r').read() global_settings_config = json.loads(f) test_overrides = global_settings_config.get('test_overrides', {}) #print test_overrides #print global_settings_config for i in range(1, len(gsettings)): sub_configs.append(gsettings[i]) # handle the virtual config file try: if options.virtual_config_file != None: #print options.virtual_config_file config = json.loads(options.virtual_config_file) db_config = {} elif options.config_file != None: config_path = os.getenv('PYTAF_HOME') + os.sep + "config" + os.sep f = open("%s%s" % (config_path,options.config_file), 'r').read() config = json.loads(f) # allow for the possibility of nested config files new_settings = {} # allow for an import from override config files config_to_import = config.get('import', None) if config_to_import != None: sub_configs.append(config_to_import) if global_settings_config != None and len(sub_configs) > 0: top_config = global_settings_config else: top_config = config # {} at this point # import each sub_config and merge if len(sub_configs) > 0: for sub_config in sub_configs: #print "sub_config file = %s" % sub_config f = open("%s%s" % (config_path, sub_config), 'r').read() imported_config = json.loads(f) # let any original config 'settings' override the imported config's settings by merging the dictionaries if imported_config.has_key('settings'): new_settings = dict(imported_config['settings'].items() + top_config['settings'].items()) # if there's a global settings config, it will trump all settings if global_settings_config != None: new_settings = dict(new_settings.items() + global_settings_config['settings'].items()) # the new, merged config's settings config['settings'] = new_settings try: config['settings']['config_file'] += "," + sub_config except: # if not already there, initialize it config['settings']['config_file'] = gsettings[0] + "," + sub_config top_config['settings'] = config['settings'] # update this as well if imported_config.has_key('tests'): #print "merge the tests includes sections" if config.has_key('tests') == False: config['tests'] = {} try: config['tests']['includes'] += imported_config['tests']['includes'] except: config['tests']['includes'] = imported_config['tests']['includes'] if imported_config['tests'].has_key('excludes'): for exclude_element in imported_config['tests']['excludes']: for method in exclude_element['methods']: #print "adding to exclude list: %s" % method['name'] excluded_list.append(method['name']) # the params dictionary of individual tests can be overridden, e.g. # "test_overrides": # { # "test_owner_add_private_channel_invalid_code": { "bad_code" : "AAAAACHECKITOUT" } # } test_overrides = dict(list(test_overrides.items()) + list(imported_config.get("test_overrides", {}).items())) #print test_overrides # additional excludes can be added to the base config file's excluded tests list, e.g: #"additional_excludes": # [ # "test_owner_manage_subscriptions" # ] #print "look for additional excludes" additional_excludes = top_config.get('additional_excludes', []) #print "additional excludes = %s" % additional_excludes if len(excluded_list) == 0 and len(additional_excludes) > 0: #print "set excluded_list to %s" % excluded_list excluded_list = additional_excludes else: #print "else?" for j in range (len(additional_excludes)): excluded_list.append(additional_excludes[j]) #print "excluded_list now = %s" % excluded_list #print "config = %s" % config else: # if there's no overrides config but there is a global_settings config # if there's a global settings config, it will trump all settings if global_settings_config != None: #print 'global settings override' #print config['settings'] if config.has_key('settings'): new_settings = dict(config['settings'].items() + global_settings_config['settings'].items()) config['settings'] = new_settings else: config['settings'] = global_settings_config['settings'] #print "config = %s" % config except: print pytaf_utils.formatExceptionInfo() if len(sub_configs) > 0: cf = gsettings else: cf = options.config_file print "JSON problem in a config file: %s" % cf sys.exit(2) config['settings']['config_file'] = options.config_file try: # try to open the default db_config file f2 = open("%s%s" % (config_path, "db_config.json"), 'r').read() db_config = json.loads(f2) except: db_config = {} # command-line -u overrides config file for url if options.url != None: config['settings']['url'] = options.url # for browser tests # command-line -b overrides config file for browser # can be in the form of 'Firefox' or 'Firefox,10,WINDOWS' if options.browser != None: config['settings']['browser'] = options.browser # command-line -g overrides config file setting for test_host (and optionally test_port as well) # used for selenium_grid or local selenium server if options.grid_address != None: if options.grid_address.find(":") >= 0: g = options.grid_address.split(":") config['settings']['test_host'] = g[0] config['settings']['test_port'] = int(g[1]) else: config['settings']['test_host'] = options.grid_address # reset the settings object for passing on to test methods settings = config['settings'] # initialize the root logger self.setup_logger(settings) # dynamically import all modules found in the config file if options.modules == None: modules_array = pytaf_utils.get_all_modules(config) else: # only load the specified module(s) from the config file modules_array = options.modules.split(",") logging.debug('modules: %s' % modules_array) mapped_modules = map(__import__, modules_array) passed = 0 failed = 0 if options.test_type == 'load': ''' the command-line may override load_test_settings with -z --loadtest_settings in the form of duration:max_threads:ramp_steps:ramp_interval:throttle_rate e.g. 3600,500,10,30,1 which would run the load test for 1 hour (3600 seconds) ramping up to a total of 500 threads in 10 steps (each step would add 50 threads (500/10)) and these batches of threads would be added in 30 second installments (approximately) the final value (throttle_rate=1) is used to brake the entire load test operation by sleeping for that amount (in seconds) between chunks of test case allocations ''' if options.loadtest_settings != None: p = options.loadtest_settings.split(",") if len(p) == 5: config['settings']['load_test_settings'] = \ {"duration": int(p[0]), "max_threads": int(p[1]), "ramp_steps": int(p[2]), "ramp_interval": int(p[3]), "throttle_rate": int(p[4])} else: logging.fatal('load test settings are not complete.') logging.fatal('they must be in the form of' \ 'duration:max_threads:ramp_steps:ramp_interval:throttle_rate') sys.exit(-1) # now start the load test passed, failed = self.do_load_test(mapped_modules, config) # if --test is specified, try and get the params and run each one elif options.test != None: ts = options.test.split(",") for i in range(0, len(ts)): test = ts[i] if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params == None: logging.fatal("could not find params for test %s" % test) sys.exit() else: #if test_overrides.get(test, None): # params = dict(params.items() + test_overrides[test].items()) params, settings = self.do_overrides(params, settings, test, test_overrides, self.override_settings, self.override_params) status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: logging.info("%s is on the excluded list" % test) # if --test is not specified, collect and run all the # tests in the config file else: tests = pytaf_utils.get_all_tests(config, mapped_modules) for test in tests: if self.test_excluded(test, excluded_list) == False: params = pytaf_utils.get_params(config, test) if params != None: #if test_overrides.get(test, None): # params = dict(params.items() + test_overrides[test].items()) params, settings = self.do_overrides(params, settings, test, test_overrides, self.override_settings, self.override_params) status = self.do_test(mapped_modules, settings, test, params) if status == True: passed = passed + 1 else: failed = failed + 1 else: logging.info("%s is on the excluded list" % test) logging.info("---------------") logging.info("Tests Run: %s" % (passed + failed)) logging.info("Passed: %s" % passed) logging.info("Failed: %s" % failed) print_results = [] for r in self.results: print_results.append(r['status'] + " " + r['test_method']) for r in sorted(print_results): logging.info(r) # post results to the database if pytaf_utils.str2bool(options.db) == True: pytaf_utils.post_results(self.results, settings, db_config, passed, failed)