def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py')] binary = os.path.join(os.path.dirname(self.binary_path), 'b2g-bin') cmd.extend(self._build_arg('--binary', binary)) cmd.extend(self._build_arg('--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) cmd.extend(self._build_arg('--symbols-path', self.symbols_path)) output_parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) # I don't like this output_timeout hardcode, but bug 920153 code = self.run_command(cmd, output_parser=output_parser, output_timeout=1760) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Bug 1046694 - add environment variables which govern test chunking env = {} if self.config.get('this_chunk') and self.config.get('total_chunks'): env["PART"] = self.config.get('this_chunk') env["NBPARTS"] = self.config.get('total_chunks') env = self.query_env(partial_env=env) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` code = self.run_command([ 'make', 'test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json' ], cwd=dirs['abs_gaia_dir'], env=env, output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Copy the b2g desktop we built to the gaia directory so that it # gets used by the marionette-js-runner. self.copytree( os.path.join(dirs['abs_work_dir'], 'b2g'), os.path.join(dirs['abs_gaia_dir'], 'b2g'), overwrite='clobber' ) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` make = self.query_exe('make', return_type='list') make = make + ['test-integration'] code = self.run_command(make, cwd=dirs['abs_gaia_dir'], env={ 'MOCHA_REPORTER': 'mocha-tbpl-reporter', 'NPM_REGISTRY': self.config.get('npm_registry') }, output_parser=output_parser) output_parser.print_summary('gaia-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() # Copy the b2g desktop we built to the gaia directory so that it # gets used by the marionette-js-runner. self.copytree( os.path.join(os.path.dirname(self.binary_path)), os.path.join(dirs['abs_gaia_dir'], 'b2g'), overwrite='clobber' ) self.make_node_modules() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` code = self.run_command([ 'make', 'test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json' ], cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-integration-tests') self.publish(code)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [ python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py') ] cmd.extend( self._build_arg( '--binary', os.path.join(dirs['abs_work_dir'], 'b2g', 'b2g-bin'))) cmd.extend( self._build_arg( '--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command(cmd, output_parser=output_parser) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py')] cmd.extend(self._build_arg('--binary', os.path.join(dirs['abs_work_dir'], 'b2g', 'b2g-bin'))) cmd.extend(self._build_arg('--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) output_parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command(cmd, output_parser=output_parser) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` make = self.query_exe('make', return_type='string') cmd = [make, 'test-integration'] code = self.run_command(cmd, cwd=dirs['abs_gaia_dir'], env={ 'MOCHA_REPORTER': 'mocha-tbpl-reporter', 'NPM_REGISTRY': self.config.get('npm_registry') }, output_parser=output_parser) output_parser.print_summary('gaia-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) cmd = [ 'make', 'build-test-integration', 'REPORTER=mocha-tbpl-reporter', 'NODE_MODULES_SRC=npm-cache', 'VIRTUALENV_EXISTS=1', 'TRY_ENV=1' ] # for Mulet if 'firefox' in self.binary_path: cmd += ['RUNTIME=%s' % self.binary_path] code = self.run_command(cmd, cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=600) output_parser.print_summary('gaia-build-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Bug 1046694 - add environment variables which govern test chunking env = {} if self.config.get('this_chunk') and self.config.get('total_chunks'): env["PART"] = self.config.get('this_chunk') env["NBPARTS"] = self.config.get('total_chunks') env = self.query_env(partial_env=env) # Bug 1137884 - marionette-js-runner needs to know about virtualenv gaia_runner_service = ( dirs['abs_gaia_dir'] + '/node_modules/marionette-js-runner/host/python/runner-service') # Check whether python package is around since there exist versions # of gaia that depend on versions of marionette-js-runner without # the python stuff. if os.path.exists(gaia_runner_service): self.install_module('gaia-runner-service', gaia_runner_service) env['VIRTUALENV_PATH'] = self.query_virtualenv_path() env['HOST_LOG'] = os.path.join(dirs['abs_log_dir'], 'goanna_output.log') cmd = [ 'make', 'test-integration', 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json', 'NODE_MODULE_SRC=npm-cache', 'VIRTUALENV_EXISTS=1' ] # for Mulet if 'firefox' in self.binary_path: cmd += ['RUNTIME=%s' % self.binary_path] code = self.run_command(cmd, cwd=dirs['abs_gaia_dir'], env=env, output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-integration-tests') self.publish(code, passed=output_parser.passed, failed=output_parser.failed)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() self.make_node_modules() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), xre_url=self.config.get('xre_url'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [ python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py') ] binary = os.path.join(os.path.dirname(self.binary_path), 'b2g-bin') cmd.extend(self._build_arg('--binary', binary)) cmd.extend( self._build_arg( '--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) cmd.extend(self._build_arg('--symbols-path', self.symbols_path)) cmd.extend( self._build_arg('--browser-arg', self.config.get('browser_arg'))) output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) upload_dir = self.query_abs_dirs()['abs_blob_upload_dir'] if not os.path.isdir(upload_dir): self.mkdir_p(upload_dir) env = self.query_env() env['MOZ_UPLOAD_DIR'] = upload_dir # I don't like this output_timeout hardcode, but bug 920153 code = self.run_command(cmd, env=env, output_parser=output_parser, output_timeout=1760) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Bug 1046694 - add environment variables which govern test chunking env = {} if self.config.get('this_chunk') and self.config.get('total_chunks'): env["PART"] = self.config.get('this_chunk') env["NBPARTS"] = self.config.get('total_chunks') env = self.query_env(partial_env=env) # Bug 1137884 - marionette-js-runner needs to know about virtualenv gaia_runner_service = ( dirs['abs_gaia_dir'] + '/node_modules/marionette-js-runner/host/python/runner-service') # Check whether python package is around since there exist versions # of gaia that depend on versions of marionette-js-runner without # the python stuff. if os.path.exists(gaia_runner_service): self.install_module('gaia-runner-service', gaia_runner_service) env['VIRTUALENV_PATH'] = self.query_virtualenv_path() env['HOST_LOG'] = os.path.join(dirs['abs_log_dir'], 'gecko_output.log') cmd = [ 'make', 'test-integration', 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json', 'NODE_MODULE_SRC=npm-cache', 'VIRTUALENV_EXISTS=1' ] # for Mulet if 'firefox' in self.binary_path: cmd += ['RUNTIME=%s' % self.binary_path] code = self.run_command(cmd, cwd=dirs['abs_gaia_dir'], env=env, output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-integration-tests') self.publish(code, passed=output_parser.passed, failed=output_parser.failed)
def run_tests(self): """ Run the Gaia unit tests """ dirs = self.query_abs_dirs() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py')] cmd.extend(self._build_arg('--binary', os.path.join(dirs['abs_work_dir'], 'b2g', 'b2g-bin'))) cmd.extend(self._build_arg('--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) output_parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command(cmd, output_parser=output_parser) level = INFO if code == 0: status = "success" tbpl_status = TBPL_SUCCESS elif code == 10: status = "test failures" tbpl_status = TBPL_WARNING else: status = "harness failures" level = ERROR tbpl_status = TBPL_FAILURE output_parser.print_summary('gaia-unit-tests') self.log("Gaia-unit-tests exited with return code %s: %s" % (code, status), level=level) self.buildbot_status(tbpl_status)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() self.make_node_modules() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), xre_url=self.config.get('xre_url'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py')] binary = os.path.join(os.path.dirname(self.binary_path), 'b2g-bin') cmd.extend(self._build_arg('--binary', binary)) cmd.extend(self._build_arg('--profile', os.path.join(dirs['abs_gaia_dir'], 'profile-debug'))) cmd.extend(self._build_arg('--symbols-path', self.symbols_path)) cmd.extend(self._build_arg('--browser-arg', self.config.get('browser_arg'))) output_parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) upload_dir = self.query_abs_dirs()['abs_blob_upload_dir'] if not os.path.isdir(upload_dir): self.mkdir_p(upload_dir) env = self.query_env() env['MOZ_UPLOAD_DIR'] = upload_dir # I don't like this output_timeout hardcode, but bug 920153 code = self.run_command(cmd, env=env, output_parser=output_parser, output_timeout=1760) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Copy the b2g desktop we built to the gaia directory so that it # gets used by the marionette-js-runner. self.copytree( os.path.join(dirs['abs_work_dir'], 'b2g'), os.path.join(dirs['abs_gaia_dir'], 'b2g'), overwrite='clobber' ) self.run_command(['npm', 'cache', 'clean']) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` code = self.run_command([ 'make', 'test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json' ], cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=330) # Dump npm-debug.log, if it exists npm_debug = os.path.join(dirs['abs_gaia_dir'], 'npm-debug.log') if os.access(npm_debug, os.F_OK): self.info('dumping npm-debug.log') self.run_command(['cat', npm_debug]) output_parser.print_summary('gaia-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command([ 'make', 'build-test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter' ], cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-build-integration-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) # Bug 1046694 - add environment variables which govern test chunking env = {} if self.config.get('this_chunk') and self.config.get('total_chunks'): env["PART"] = self.config.get('this_chunk') env["NBPARTS"] = self.config.get('total_chunks') env = self.query_env(partial_env=env) # `make test-integration \ # MOCHA_REPORTER=mocha-tbpl-reporter \ # NPM_REGISTRY=http://npm-mirror.pub.build.mozilla.org` cmd = [ 'make', 'test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter', 'TEST_MANIFEST=./shared/test/integration/tbpl-manifest.json' ] # for Mulet if 'firefox' in self.binary_path: cmd += ['RUNTIME=%s' % self.binary_path] code = self.run_command(cmd, cwd=dirs['abs_gaia_dir'], env=env, output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-integration-tests') self.publish(code, passed=output_parser.passed, failed=output_parser.failed)
def run_tests(self): """ Run the gaia build unit test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command([ 'make', 'build-test-unit', 'REPORTER=mocha-tbpl-reporter', 'NODE_MODULES_SRC=npm-cache', 'VIRTUALENV_EXISTS=1', 'TRY_ENV=1' ], cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-build-unit-tests') self.publish(code)
def run_tests(self): """ Run the integration test suite. """ dirs = self.query_abs_dirs() self.node_setup() output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) code = self.run_command([ 'make', 'build-test-integration', 'NPM_REGISTRY=' + self.config.get('npm_registry'), 'REPORTER=mocha-tbpl-reporter', 'TRY_ENV=1' ], cwd=dirs['abs_gaia_dir'], output_parser=output_parser, output_timeout=330) output_parser.print_summary('gaia-build-integration-tests') self.publish(code)
def run_tests(self): """ Run the tests """ dirs = self.query_abs_dirs() error_list = self.error_list error_list.extend(BaseErrorList) suite = self.config["test_suite"] if suite not in self.test_suites: self.fatal("Don't know how to run --test-suite '%s'!" % suite) cmd = self._query_abs_base_cmd(suite) cwd = dirs["abs_%s_dir" % suite] # TODO we probably have to move some of the code in # scripts/desktop_unittest.py and scripts/marionette.py to # mozharness.mozilla.testing.unittest so we can share it. # In the short term, I'm ok with some duplication of code if it # expedites things; please file bugs to merge if that happens. suite_name = [x for x in self.test_suites if x in self.config["test_suite"]][0] if self.config.get("this_chunk"): suite = "%s-%s" % (suite_name, self.config["this_chunk"]) else: suite = suite_name env = {} if self.query_minidump_stackwalk(): env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path env["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"] if not os.path.isdir(env["MOZ_UPLOAD_DIR"]): self.mkdir_p(env["MOZ_UPLOAD_DIR"]) env = self.query_env(partial_env=env) success_codes = self._get_success_codes(suite_name) if suite_name == "marionette": parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) else: parser = self.get_test_output_parser( suite_name, config=self.config, log_obj=self.log_obj, error_list=error_list ) return_code = self.run_command( cmd, cwd=cwd, env=env, output_timeout=1000, output_parser=parser, success_codes=success_codes ) logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log") qemu = os.path.join(dirs["abs_work_dir"], "qemu.log") if os.path.isfile(qemu): self.copyfile(qemu, os.path.join(env["MOZ_UPLOAD_DIR"], os.path.basename(qemu))) tbpl_status, log_level = parser.evaluate_parser(return_code, success_codes=success_codes) if os.path.isfile(logcat): if tbpl_status != TBPL_SUCCESS: # On failure, dump logcat, check if the emulator is still # running, and if it is still accessible via adb. self.info("dumping logcat") self.run_command(["cat", logcat], error_list=LogcatErrorList) self.run_command(["ps", "-C", "emulator"]) self.run_command([self.adb_path, "devices"]) # upload logcat to blobber self.copyfile(logcat, os.path.join(env["MOZ_UPLOAD_DIR"], os.path.basename(logcat))) else: self.info("no logcat file found") parser.append_tinderboxprint_line(suite_name) self.buildbot_status(tbpl_status, level=log_level) self.log("The %s suite: %s ran with return status: %s" % (suite_name, suite, tbpl_status), level=log_level)
def run_tests(self): """ Run the tests """ dirs = self.query_abs_dirs() error_list = self.error_list error_list.extend(BaseErrorList) suite = self.config['test_suite'] if suite not in self.test_suites: self.fatal("Don't know how to run --test-suite '%s'!" % suite) cmd = self._query_abs_base_cmd(suite) cwd = dirs['abs_%s_dir' % suite] # TODO we probably have to move some of the code in # scripts/desktop_unittest.py and scripts/marionette.py to # mozharness.mozilla.testing.unittest so we can share it. # In the short term, I'm ok with some duplication of code if it # expedites things; please file bugs to merge if that happens. suite_name = [x for x in self.test_suites if x in self.config['test_suite']][0] if self.config.get('this_chunk'): suite = '%s-%s' % (suite_name, self.config['this_chunk']) else: suite = suite_name env = {} if self.query_minidump_stackwalk(): env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir'] if not os.path.isdir(env['MOZ_UPLOAD_DIR']): self.mkdir_p(env['MOZ_UPLOAD_DIR']) env = self.query_env(partial_env=env) success_codes = self._get_success_codes(suite_name) if suite_name == "marionette": parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) else: parser = self.get_test_output_parser(suite_name, config=self.config, log_obj=self.log_obj, error_list=error_list) return_code = self.run_command(cmd, cwd=cwd, env=env, output_timeout=1000, output_parser=parser, success_codes=success_codes) logcat = os.path.join(dirs['abs_work_dir'], 'emulator-5554.log') qemu = os.path.join(dirs['abs_work_dir'], 'qemu.log') if os.path.isfile(qemu): self.copyfile(qemu, os.path.join(env['MOZ_UPLOAD_DIR'], os.path.basename(qemu))) tbpl_status, log_level = parser.evaluate_parser(return_code, success_codes=success_codes) if os.path.isfile(logcat): if tbpl_status != TBPL_SUCCESS: # On failure, dump logcat, check if the emulator is still # running, and if it is still accessible via adb. self.info('dumping logcat') self.run_command(['cat', logcat], error_list=LogcatErrorList) self.run_command(['ps', '-C', 'emulator']) self.run_command([self.adb_path, 'devices']) # upload logcat to blobber self.copyfile(logcat, os.path.join(env['MOZ_UPLOAD_DIR'], os.path.basename(logcat))) else: self.info('no logcat file found') parser.append_tinderboxprint_line(suite_name) self.buildbot_status(tbpl_status, level=log_level) self.log("The %s suite: %s ran with return status: %s" % (suite_name, suite, tbpl_status), level=log_level)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() self.make_node_modules() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), xre_url=self.config.get('xre_url'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py')] executable = 'firefox' if 'b2g' in self.binary_path: executable = 'b2g-bin' profile = os.path.join(dirs['abs_gaia_dir'], 'profile-debug') binary = os.path.join(os.path.dirname(self.binary_path), executable) cmd.extend(self._build_arg('--binary', binary)) cmd.extend(self._build_arg('--profile', profile)) cmd.extend(self._build_arg('--symbols-path', self.symbols_path)) cmd.extend(self._build_arg('--browser-arg', self.config.get('browser_arg'))) # Add support for chunking if self.config.get('total_chunks') and self.config.get('this_chunk'): chunker = [ os.path.join(dirs['abs_gaia_dir'], 'bin', 'chunk'), self.config.get('total_chunks'), self.config.get('this_chunk') ] disabled_tests = [] disabled_manifest = os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'disabled.json') with open(disabled_manifest, 'r') as m: try: disabled_tests = json.loads(m.read()) except: print "Error while decoding disabled.json; please make sure this file has valid JSON syntax." sys.exit(1) # Construct a list of all tests unit_tests = [] for path in ('apps', 'tv_apps'): test_root = os.path.join(dirs['abs_gaia_dir'], path) full_paths = glob.glob(os.path.join(test_root, '*/test/unit/*_test.js')) unit_tests += map(lambda x: os.path.relpath(x, test_root), full_paths) # Remove the tests that are disabled active_unit_tests = filter(lambda x: x not in disabled_tests, unit_tests) # Chunk the list as requested tests_to_run = subprocess.check_output(chunker + active_unit_tests).strip().split(' ') cmd.extend(tests_to_run) output_parser = TestSummaryOutputParserHelper(config=self.config, log_obj=self.log_obj, error_list=self.error_list) upload_dir = self.query_abs_dirs()['abs_blob_upload_dir'] if not os.path.isdir(upload_dir): self.mkdir_p(upload_dir) env = self.query_env() env['MOZ_UPLOAD_DIR'] = upload_dir # I don't like this output_timeout hardcode, but bug 920153 code = self.run_command(cmd, env=env, output_parser=output_parser, output_timeout=1760) output_parser.print_summary('gaia-unit-tests') self.publish(code)
def run_tests(self): """ Run the unit test suite. """ dirs = self.query_abs_dirs() self.make_node_modules() # make the gaia profile self.make_gaia(dirs['abs_gaia_dir'], self.config.get('xre_path'), xre_url=self.config.get('xre_url'), debug=True) # build the testrunner command arguments python = self.query_python_path('python') cmd = [ python, '-u', os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'main.py') ] executable = 'firefox' if 'b2g' in self.binary_path: executable = 'b2g-bin' profile = os.path.join(dirs['abs_gaia_dir'], 'profile-debug') binary = os.path.join(os.path.dirname(self.binary_path), executable) cmd.extend(self._build_arg('--binary', binary)) cmd.extend(self._build_arg('--profile', profile)) cmd.extend(self._build_arg('--symbols-path', self.symbols_path)) cmd.extend( self._build_arg('--browser-arg', self.config.get('browser_arg'))) # Add support for chunking if self.config.get('total_chunks') and self.config.get('this_chunk'): chunker = [ os.path.join(dirs['abs_gaia_dir'], 'bin', 'chunk'), self.config.get('total_chunks'), self.config.get('this_chunk') ] disabled_tests = [] disabled_manifest = os.path.join(dirs['abs_runner_dir'], 'gaia_unit_test', 'disabled.json') with open(disabled_manifest, 'r') as m: try: disabled_tests = json.loads(m.read()) except: print "Error while decoding disabled.json; please make sure this file has valid JSON syntax." sys.exit(1) # Construct a list of all tests unit_tests = [] for path in ('apps', 'tv_apps'): test_root = os.path.join(dirs['abs_gaia_dir'], path) full_paths = glob.glob( os.path.join(test_root, '*/test/unit/*_test.js')) unit_tests += map(lambda x: os.path.relpath(x, test_root), full_paths) # Remove the tests that are disabled active_unit_tests = filter(lambda x: x not in disabled_tests, unit_tests) # Chunk the list as requested tests_to_run = subprocess.check_output( chunker + active_unit_tests).strip().split(' ') cmd.extend(tests_to_run) output_parser = TestSummaryOutputParserHelper( config=self.config, log_obj=self.log_obj, error_list=self.error_list) upload_dir = self.query_abs_dirs()['abs_blob_upload_dir'] if not os.path.isdir(upload_dir): self.mkdir_p(upload_dir) env = self.query_env() env['MOZ_UPLOAD_DIR'] = upload_dir # I don't like this output_timeout hardcode, but bug 920153 code = self.run_command(cmd, env=env, output_parser=output_parser, output_timeout=1760) output_parser.print_summary('gaia-unit-tests') self.publish(code)