def _runTest(self, method, printTestName=False, numberOfAssertionFailed=0): exceptionRaised = False if printTestName: print '\t' + Colors.Cyan(method.__name__) try: if self.args.debug: raw_input( '\tenv is up, attach to any process with gdb and press any button to continue.' ) method() except unittest.SkipTest: print '\t' + Colors.Green('Skipping test') except Exception as err: msg = 'Unhandled exception: %s' % err print '\t' + Colors.Bred(msg) traceback.print_exc(file=sys.stdout) exceptionRaised = True isTestFaild = self.currEnv is None or self.currEnv.getNumberOfFailedAssertion( ) > numberOfAssertionFailed or exceptionRaised if isTestFaild: print '\t' + Colors.Bred('Test Failed') self.testsFailed.add(self.currEnv) else: print '\t' + Colors.Green('Test Passed') if self.args.stop_on_failure and isTestFaild: if self.args.interactive_debugger: while self.currEnv.isUp(): time.sleep(1) raw_input('press any button to move to the next test') return self.currEnv.getNumberOfFailedAssertion()
def checkExitCode(self): ret = True if self.masterExitCode != 0: print '\t' + Colors.Bred('bad exit code for serverId %s' % str(self.masterServerId)) ret = False if self.useSlaves and self.slaveExitCode != 0: print '\t' + Colors.Bred('bad exit code for serverId %s' % str(self.slaveServerId)) ret = False return ret
def _assertion(self, checkStr, trueValue, depth=0): if trueValue and self.verbose: print '\t' + Colors.Green('assertion success:\t') + Colors.Yellow( checkStr) + '\t' + Colors.Gray( self._getCallerPosition(3 + depth)) elif not trueValue: FailureSummery = Colors.Bred('assertion faild:\t') + Colors.Yellow( checkStr) + '\t' + Colors.Gray( self._getCallerPosition(3 + depth)) print '\t' + FailureSummery self.assertionFailedSummery.append(FailureSummery)
def takeEnvDown(self, fullShutDown=False): if self.currEnv: if self.args.env_reuse and not fullShutDown: self.currEnv.flush() else: self.currEnv.stop() if self.args.use_valgrind and self.currEnv and not self.currEnv.checkExitCode( ): print Colors.Bred('\tvalgrind check failure') self.testsFailed.add(self.currEnv) self.currEnv = None
def _assertion(self, checkStr, trueValue, depth=0): basemsg = Colors.Yellow(checkStr) + '\t' + Colors.Gray( self._getCallerPosition(3 + depth)) if trueValue and self.verbose: print '\t' + Colors.Green('✅ (OK):\t') + basemsg elif not trueValue: failureSummary = Colors.Bred('❌ (FAIL):\t') + basemsg print '\t' + failureSummary if self.defaultExitOnFailure: raise TestAssertionFailure('Assertion Failed!') self.assertionFailedSummary.append(failureSummary)
def _stopProcess(self, role): process = self.masterProcess if role == MASTER else self.slaveProcess serverId = self.masterServerId if role == MASTER else self.slaveServerId if not self._isAlive(process): if not self.has_interactive_debugger: # on interactive debugger its expected that then process will not be alive print '\t' + Colors.Bred('process is not alive, might have crash durring test execution, check this out. server id : %s' % str(serverId)) return try: process.terminate() process.wait() if role == MASTER: self.masterExitCode = process.poll() else: self.slaveExitCode = process.poll() except OSError: pass
def __init__(self): parser = CustomArgumentParser( fromfile_prefix_chars=RLTest_CONFIG_FILE_PREFIX, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Test Framework for redis and redis module') parser.add_argument('--module', default=None, help='path to the module file') parser.add_argument('--module-args', default=None, help='arguments to give to the module on loading') parser.add_argument( '--env', '-e', default='oss', choices=['oss', 'oss-cluster', 'enterprise', 'enterprise-cluster'], help='env on which to run the test') parser.add_argument('--oss-redis-path', default='redis-server', help='path to the oss redis binary') parser.add_argument('--enterprise-redis-path', default=os.path.join( RLTest_WORKING_DIR, 'opt/redislabs/bin/redis-server'), help='path to the entrprise redis binary') parser.add_argument('--stop-on-failure', action='store_const', const=True, default=False, help='stop running on failure') parser.add_argument('--verbose', '-v', action='count', default=0, help='print more information about the test') parser.add_argument('--debug', action='store_const', const=True, default=False, help='stop before each test allow gdb attachment') parser.add_argument( '-t', '--test', help='Specify test to run, in the form of "file:test"') parser.add_argument('--tests-dir', default='.', help='directory on which to run the tests') parser.add_argument('--test-name', default=None, help='test name to run') parser.add_argument( '--tests-file', default=None, help='tests file to run (with out the .py extention)') parser.add_argument('--env-only', action='store_const', const=True, default=False, help='start the env but do not run any tests') parser.add_argument( '--clear-logs', action='store_const', const=True, default=False, help='deleting the log direcotry before the execution') parser.add_argument('--log-dir', default='./logs', help='directory to write logs to') parser.add_argument('--use-slaves', action='store_const', const=True, default=False, help='run env with slaves enabled') parser.add_argument('--shards-count', default=1, type=int, help='Number shards in bdb') parser.add_argument('--download-enterprise-binaries', action='store_const', const=True, default=False, help='run env with slaves enabled') parser.add_argument('--proxy-binary-path', default=os.path.join(RLTest_WORKING_DIR, 'opt/redislabs/bin/dmcproxy'), help='dmc proxy binary path') parser.add_argument( '--enterprise-lib-path', default=os.path.join(RLTest_WORKING_DIR, 'opt/redislabs/lib/'), help='path of needed libraries to run enterprise binaries') parser.add_argument( '--env-reuse', action='store_const', const=True, default=False, help= 'reuse exists env, this feature is based on best efforts, if the env can not be reused then it will be taken down.' ) parser.add_argument('--use-aof', action='store_const', const=True, default=False, help='use aof instead of rdb') parser.add_argument('--debug-print', action='store_const', const=True, default=False, help='print debug messages') parser.add_argument( '--use-valgrind', action='store_const', const=True, default=False, help= 'running redis under valgrind (assuming valgrind is install on the machine)' ) parser.add_argument('--valgrind-suppressions-file', default=None, help='path valgrind suppressions file') parser.add_argument( '--config-file', default=None, help= 'path to configuration file, parameters value will be taken from configuration file,' 'values which was not specified on configuration file will get their value from the command line args,' 'values which was not specifies either on configuration file nor on command line args will be getting their default value' ) parser.add_argument( '--interactive-debugger', action='store_const', const=True, default=False, help='runs the redis on a debuger (gdb/lldb) interactivly.' 'debugger interactive mode is only possible on a single process and so unsupported on cluste or with slaves.' 'it is also not possible to use valgrind on interactive mode.' 'interactive mode direcly applies: --no-output-catch and --stop-on-failure.' 'it is also implies that only one test will be run (if --inv-only was not specify), an error will be raise otherwise.' ) parser.add_argument('--debugger-args', default=None, help='arguments to the interactive debugger') parser.add_argument( '--no-output-catch', action='store_const', const=True, default=False, help='all output will be written to the stdout, no log files.') configFilePath = './%s' % RLTest_CONFIG_FILE_NAME if os.path.exists(configFilePath): args = [ '%s%s' % (RLTest_CONFIG_FILE_PREFIX, RLTest_CONFIG_FILE_NAME) ] + sys.argv[1:] else: args = sys.argv[1:] self.args = parser.parse_args(args=args) if self.args.interactive_debugger: if self.args.env != 'oss' and self.args.env != 'enterprise': print Colors.Bred( 'interactive debugger can only be used on non cluster env') sys.exit(1) if self.args.use_valgrind: print Colors.Bred( 'can not use valgrind with interactive debugger') sys.exit(1) if self.args.use_slaves: print Colors.Bred( 'can not use slaves with interactive debugger') sys.exit(1) self.args.no_output_catch = True self.args.stop_on_failure = True if self.args.download_enterprise_binaries: self._downloadEnterpriseBinaries() if self.args.clear_logs: try: shutil.rmtree(self.args.log_dir) except Exception: pass Env.defaultModule = self.args.module Env.defaultModuleArgs = self.args.module_args Env.defaultEnv = self.args.env Env.defaultOssRedisBinary = self.args.oss_redis_path Env.defaultVerbose = self.args.verbose Env.defaultLogDir = self.args.log_dir Env.defaultUseSlaves = self.args.use_slaves Env.defaultShardsCount = self.args.shards_count Env.defaultProxyBinaryPath = self.args.proxy_binary_path Env.defaultEnterpriseRedisBinaryPath = self.args.enterprise_redis_path Env.defaultEnterpriseLibsPath = self.args.enterprise_lib_path Env.defaultUseAof = self.args.use_aof Env.defaultDebugPrints = self.args.debug_print Env.defaultUseValgrind = self.args.use_valgrind Env.defaultValgrindSuppressionsFile = self.args.valgrind_suppressions_file Env.defaultInteractiveDebugger = self.args.interactive_debugger Env.defaultInteractiveDebuggerArgs = self.args.debugger_args Env.defaultNoCatch = self.args.no_output_catch sys.path.append(self.args.tests_dir) self.tests = [] self.currEnv = None
def execute(self): self.testsFailed = set() Env.RTestInstance = self if self.args.env_only: Env.defaultVerbose = 2 env = Env(testName='manual test env') if self.args.interactive_debugger: while env.isUp(): time.sleep(1) raw_input('press any button to stop') env.stop() return if self.args.tests_file: self._loadFileTests(self.args.tests_file) else: self._loadTests() done = 0 startTime = time.time() if self.args.interactive_debugger and len(self.tests) != 1: print Colors.Bred( 'only one test can be run on interactive-debugger use --test-name' ) sys.exit(1) while self.tests: with self.envScopeGuard(): test = self.tests.pop(0) if inspect.isclass(test): # checking if there are tests to run methodsToTest = [] for m in dir(test): if self.args.test_name is not None: if self.args.test_name == m: methodsToTest.append(m) elif m.startswith('test') or m.startswith('Test'): methodsToTest.append(m) if len(methodsToTest) == 0: continue try: testObj = test() except unittest.SkipTest: print '\t' + Colors.Green('Skipping test') continue except Exception as err: msg = 'Unhandled exception: %s' % err print '\t' + Colors.Bred(msg) traceback.print_exc(file=sys.stdout) print '\t' + Colors.Bred('Test Failed') if self.currEnv: self.testsFailed.add(self.currEnv) continue methods = [ getattr(testObj, m) for m in dir(testObj) if callable(getattr(testObj, m)) and ( m.startswith('test') or m.startswith('Test')) ] numberOfAssertionFailed = 0 for m in methods: if self.args.test_name is None or self.args.test_name == m.__name__: numberOfAssertionFailed = self._runTest( m, printTestName=True, numberOfAssertionFailed=numberOfAssertionFailed ) done += 1 elif not inspect.isfunction(test): continue elif len(inspect.getargspec(test).args) > 0: env = Env(testName='%s.%s' % (str(test.__module__), test.func_name)) self._runTest(lambda: test(env)) done += 1 else: self._runTest(test) done += 1 self.takeEnvDown(fullShutDown=True) endTime = time.time() print Colors.Bold('Test Took: %d sec' % (endTime - startTime)) print Colors.Bold( 'Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, len(self.testsFailed), done - len(self.testsFailed))) if len(self.testsFailed) > 0: print Colors.Bold('Faild Tests Summery:') for testFaild in self.testsFailed: print '\t' + Colors.Bold(testFaild.testNamePrintable) testFaild.printFailuresSummery('\t\t') sys.exit(1)