Ejemplo n.º 1
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)

    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']
    for test in tests:

        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolate(test['url'])
        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # with addon signing for production talos, we want to develop without it
    if browser_config['develop'] or browser_config['branch_name'] == 'Try':
        browser_config['preferences']['xpinstall.signatures.required'] = False
        browser_config['extensions'] = [
            os.path.dirname(i) for i in browser_config['extensions']
        ]

    # set defaults
    title = config.get('title', '')
    testdate = config.get('testdate', '')

    if browser_config['e10s'] and not title.endswith(".e"):
        # we are running in e10s mode
        title = "%s.e" % (title, )

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [
        utils.interpolate(i) for i in browser_config['extensions']
    ]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print "unable to find changeset or repository: %s" % version_info
            sys.exit()
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            datazilla_urls=['local.json'], )
    else:
        # local mode, output to files
        results_urls = dict(datazilla_urls=[os.path.abspath('local.json')])
    talos_results.check_output_formats(results_urls)

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    testname = None
    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            mytest = TTest()
            talos_results.add(mytest.runTest(browser_config, test))

            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname,
                     status='FAIL',
                     message=unicode(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname,
                     status='ERROR',
                     message=unicode(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # output results
    if results_urls:
        talos_results.output(results_urls)
        if browser_config['develop']:
            print("Thanks for running Talos locally. Results are in %s" %
                  (results_urls['datazilla_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 2
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report."""
    # get the test data
    tests = config["tests"]
    tests = useBaseTestDefaults(config.get("basetest", {}), tests)
    paths = ["profile_path", "tpmanifest", "extensions", "setup", "cleanup"]

    for test in tests:
        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                if path == "extensions":
                    for _index, _ext in enumerate(test["extensions"]):
                        test["extensions"][_index] = utils.interpolate(_ext)
                else:
                    test[path] = utils.interpolate(test[path])
        if test.get("tpmanifest"):
            test["tpmanifest"] = os.path.normpath(
                "file:/%s" %
                (six.moves.urllib.parse.quote(test["tpmanifest"], "/\\t:\\")))
            test["preferences"]["talos.tpmanifest"] = test["tpmanifest"]

        # if using firstNonBlankPaint, set test preference for it
        # so that the browser pref will be turned on (in ffsetup)
        if test.get("fnbpaint", False):
            LOG.info(
                "Test is using firstNonBlankPaint, browser pref will be turned on"
            )
            test["preferences"][
                "dom.performance.time_to_non_blank_paint.enabled"] = True

        test["setup"] = utils.interpolate(test["setup"])
        test["cleanup"] = utils.interpolate(test["cleanup"])

        if not test.get("profile", False):
            test["profile"] = config.get("profile")

    if mozinfo.os == "win":
        browser_config["extra_args"] = ["-wait-for-browser", "-no-deelevate"]
    else:
        browser_config["extra_args"] = []

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config["develop"]:
        browser_config["extra_args"].append("--no-remote")

    # Pass subtests filter argument via a preference
    if browser_config["subtests"]:
        browser_config["preferences"]["talos.subtests"] = browser_config[
            "subtests"]

    if browser_config.get("enable_fission", False):
        browser_config["preferences"]["fission.autostart"] = True
        browser_config["preferences"][
            "dom.serviceWorkers.parent_intercept"] = True

    browser_config["preferences"]["network.proxy.type"] = 2
    browser_config["preferences"]["network.proxy.autoconfig_url"] = (
        """data:text/plain,
function FindProxyForURL(url, host) {
  if (url.startsWith('http')) {
   return 'PROXY %s';
  }
  return 'DIRECT';
}""" % browser_config["webserver"])

    # If --code-coverage files are expected, set flag in browser config so ffsetup knows
    # that it needs to delete any ccov files resulting from browser initialization
    # NOTE: This is only supported in production; local setup of ccov folders and
    # data collection not supported yet, so if attempting to run with --code-coverage
    # flag locally, that is not supported yet
    if config.get("code_coverage", False):
        if browser_config["develop"]:
            raise TalosError("Aborting: talos --code-coverage flag is only "
                             "supported in production")
        else:
            browser_config["code_coverage"] = True

    # set defaults
    testdate = config.get("testdate", "")

    # get the process name from the path to the browser
    if not browser_config["process"]:
        browser_config["process"] = os.path.basename(
            browser_config["browser_path"])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config["extensions"] = [
        utils.interpolate(i) for i in browser_config["extensions"]
    ]
    browser_config["bcontroller_config"] = utils.interpolate(
        browser_config["bcontroller_config"])

    # normalize browser path to work across platforms
    browser_config["browser_path"] = os.path.normpath(
        browser_config["browser_path"])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config["browser_name"] = version_info["application_name"]
    browser_config["browser_version"] = version_info["application_version"]
    browser_config["buildid"] = version_info["application_buildid"]
    try:
        browser_config["repository"] = version_info["application_repository"]
        browser_config["sourcestamp"] = version_info["application_changeset"]
    except KeyError:
        if not browser_config["develop"]:
            print("Abort: unable to find changeset or repository: %s" %
                  version_info)
            sys.exit(1)
        else:
            browser_config["repository"] = "develop"
            browser_config["sourcestamp"] = "develop"

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, "%a, %d %b %Y %H:%M:%S GMT")))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config["develop"] and not config["gecko_profile"]:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=["local.json"], )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath("local.json")])

    httpd = setup_webserver(browser_config["webserver"])
    httpd.start()

    # legacy still required for perfherder data
    talos_results.add_extra_option("e10s")
    talos_results.add_extra_option("stylo")

    # measuring the difference of a a certain thread level
    if config.get("stylothreads", 0) > 0:
        talos_results.add_extra_option("%s_thread" % config["stylothreads"])

    if config["gecko_profile"]:
        talos_results.add_extra_option("gecko-profile")

    # differentiate fission vs non-fission results in perfherder
    if browser_config.get("enable_fission", False):
        talos_results.add_extra_option("fission")

    # differentiate webrender from non-webrender results
    if browser_config["preferences"].get("gfx.webrender.software", False):
        talos_results.add_extra_option("webrender-sw")
    elif browser_config.get("enable_webrender", False):
        talos_results.add_extra_option("webrender")

    # differentiate webgl from webgl-ipc results
    if browser_config["preferences"].get("webgl.out-of-process", False):
        talos_results.add_extra_option("webgl-ipc")

    testname = None

    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test["name"] for test in tests])
    try:
        for test in tests:
            testname = test["name"]
            LOG.test_start(testname)

            if not test.get("url"):
                # set browser prefs for pageloader test setings (doesn't use cmd line args / url)
                test["url"] = None
                set_tp_preferences(test, browser_config)

            mytest = TTest()

            # some tests like ts_paint return multiple results in a single iteration
            if test.get("firstpaint", False) or test.get("userready", None):
                # we need a 'testeventmap' to tell us which tests each event should map to
                multi_value_result = None
                separate_results_list = []

                test_event_map = test.get("testeventmap", None)
                if test_event_map is None:
                    raise TalosError("Need 'testeventmap' in test.py for %s" %
                                     test.get("name"))

                # run the test
                multi_value_result = mytest.runTest(browser_config, test)
                if multi_value_result is None:
                    raise TalosError("Abort: no results returned for %s" %
                                     test.get("name"))

                # parse out the multi-value results, and 'fake it' to appear like separate tests
                separate_results_list = convert_to_separate_test_results(
                    multi_value_result, test_event_map)

                # now we have three separate test results, store them
                for test_result in separate_results_list:
                    talos_results.add(test_result)

            # some tests like bloom_basic run two separate tests and then compare those values
            # we want the results in perfherder to only be the actual difference between those
            # and store the base and reference test replicates in results.json for upload
            elif test.get("base_vs_ref", False):
                # run the test, results will be reported for each page like two tests in the suite
                base_and_reference_results = mytest.runTest(
                    browser_config, test)
                # now compare each test, and create a new test object for the comparison
                talos_results.add(
                    make_comparison_result(base_and_reference_results))
            else:
                # just expecting regular test - one result value per iteration
                talos_results.add(mytest.runTest(browser_config, test))
            LOG.test_end(testname, status="OK")

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname,
                     status="FAIL",
                     message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname,
                     status="ERROR",
                     message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # output results
    if results_urls and not browser_config["no_upload_results"]:
        talos_results.output(results_urls)
        if browser_config["develop"] or config["gecko_profile"]:
            print("Thanks for running Talos locally. Results are in %s" %
                  (results_urls["output_urls"]))

    # when running talos locally with gecko profiling on, use the view-gecko-profile
    # tool to automatically load the latest gecko profile in profiler.firefox.com
    if config["gecko_profile"] and browser_config["develop"]:
        if os.environ.get("DISABLE_PROFILE_LAUNCH", "0") == "1":
            LOG.info(
                "Not launching profiler.firefox.com because DISABLE_PROFILE_LAUNCH=1"
            )
        else:
            view_gecko_profile_from_talos()

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 3
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)
    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']

    for test in tests:
        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                if path == 'extensions':
                    for _index, _ext in enumerate(test['extensions']):
                        test['extensions'][_index] = utils.interpolate(_ext)
                else:
                    test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
            test['preferences']['talos.tpmanifest'] = test['tpmanifest']

        # if using firstNonBlankPaint, set test preference for it
        # so that the browser pref will be turned on (in ffsetup)
        if test.get('fnbpaint', False):
            LOG.info(
                "Test is using firstNonBlankPaint, browser pref will be turned on"
            )
            test['preferences'][
                'dom.performance.time_to_non_blank_paint.enabled'] = True

        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

        if not test.get('profile', False):
            test['profile'] = config.get('profile')

    if mozinfo.os == 'win':
        browser_config['extra_args'] = ['-wait-for-browser', '-no-deelevate']
    else:
        browser_config['extra_args'] = []

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'].append('--no-remote')

    # Pass subtests filter argument via a preference
    if browser_config['subtests']:
        browser_config['preferences']['talos.subtests'] = browser_config[
            'subtests']

    # If --code-coverage files are expected, set flag in browser config so ffsetup knows
    # that it needs to delete any ccov files resulting from browser initialization
    # NOTE: This is only supported in production; local setup of ccov folders and
    # data collection not supported yet, so if attempting to run with --code-coverage
    # flag locally, that is not supported yet
    if config.get('code_coverage', False):
        if browser_config['develop']:
            raise TalosError('Aborting: talos --code-coverage flag is only '
                             'supported in production')
        else:
            browser_config['code_coverage'] = True

    # set defaults
    testdate = config.get('testdate', '')

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [
        utils.interpolate(i) for i in browser_config['extensions']
    ]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print("Abort: unable to find changeset or repository: %s" %
                  version_info)
            sys.exit(1)
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop'] and not config['gecko_profile']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=['local.json'], )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath('local.json')])

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    # legacy still required for perfherder data
    talos_results.add_extra_option('e10s')
    talos_results.add_extra_option('stylo')

    # measuring the difference of a a certain thread level
    if config.get('stylothreads', 0) > 0:
        talos_results.add_extra_option('%s_thread' % config['stylothreads'])

    if config['gecko_profile']:
        talos_results.add_extra_option('geckoProfile')

    # some tests use mitmproxy to playback pages
    mitmproxy_recordings_list = config.get('mitmproxy', False)
    if mitmproxy_recordings_list is not False:
        # needed so can tell talos ttest to allow external connections
        browser_config['mitmproxy'] = True

        # start mitmproxy playback; this also generates the CA certificate
        mitmdump_path = config.get('mitmdumpPath', False)
        if mitmdump_path is False:
            # cannot continue, need path for mitmdump playback tool
            raise TalosError(
                'Aborting: mitmdumpPath not provided on cmd line but is required'
            )

        mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
        mitmproxy_proc = mitmproxy.start_mitmproxy_playback(
            mitmdump_path, mitmproxy_recording_path,
            mitmproxy_recordings_list.split(), browser_config['browser_path'])

        # install the generated CA certificate into Firefox
        # mitmproxy cert setup needs path to mozharness install; mozharness has set this
        mitmproxy.install_mitmproxy_cert(mitmproxy_proc,
                                         browser_config['browser_path'])

    testname = None

    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            if not test.get('url'):
                # set browser prefs for pageloader test setings (doesn't use cmd line args / url)
                test['url'] = None
                set_tp_preferences(test, browser_config)

            mytest = TTest()

            # some tests like ts_paint return multiple results in a single iteration
            if test.get('firstpaint', False) or test.get('userready', None):
                # we need a 'testeventmap' to tell us which tests each event should map to
                multi_value_result = None
                separate_results_list = []

                test_event_map = test.get('testeventmap', None)
                if test_event_map is None:
                    raise TalosError("Need 'testeventmap' in test.py for %s" %
                                     test.get('name'))

                # run the test
                multi_value_result = mytest.runTest(browser_config, test)
                if multi_value_result is None:
                    raise TalosError("Abort: no results returned for %s" %
                                     test.get('name'))

                # parse out the multi-value results, and 'fake it' to appear like separate tests
                separate_results_list = convert_to_separate_test_results(
                    multi_value_result, test_event_map)

                # now we have three separate test results, store them
                for test_result in separate_results_list:
                    talos_results.add(test_result)

            # some tests like bloom_basic run two separate tests and then compare those values
            # we want the results in perfherder to only be the actual difference between those
            # and store the base and reference test replicates in results.json for upload
            elif test.get('base_vs_ref', False):
                # run the test, results will be reported for each page like two tests in the suite
                base_and_reference_results = mytest.runTest(
                    browser_config, test)
                # now compare each test, and create a new test object for the comparison
                talos_results.add(
                    make_comparison_result(base_and_reference_results))
            else:
                # just expecting regular test - one result value per iteration
                talos_results.add(mytest.runTest(browser_config, test))
            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname,
                     status='FAIL',
                     message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname,
                     status='ERROR',
                     message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # if mitmproxy was used for page playback, stop it
    if mitmproxy_recordings_list is not False:
        mitmproxy.stop_mitmproxy_playback(mitmproxy_proc)

    # output results
    if results_urls and not browser_config['no_upload_results']:
        talos_results.output(results_urls)
        if browser_config['develop'] or config['gecko_profile']:
            print("Thanks for running Talos locally. Results are in %s" %
                  (results_urls['output_urls']))

    # when running talos locally with gecko profiling on, use the view-gecko-profile
    # tool to automatically load the latest gecko profile in perf-html.io
    if config['gecko_profile'] and browser_config['develop']:
        if os.environ.get('DISABLE_PROFILE_LAUNCH', '0') == '1':
            LOG.info(
                "Not launching perf-html.io because DISABLE_PROFILE_LAUNCH=1")
        else:
            view_gecko_profile(config['browser_path'])

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 4
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)

    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']
    for test in tests:

        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolate(test['url'])
        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # with addon signing for production talos, we want to develop without it
    if browser_config['develop'] or browser_config['branch_name'] == 'Try':
        browser_config['preferences']['xpinstall.signatures.required'] = False
        browser_config['extensions'] = [os.path.dirname(i)
                                        for i in browser_config['extensions']]

    # set defaults
    title = config.get('title', '')
    testdate = config.get('testdate', '')

    if browser_config['e10s'] and not title.endswith(".e"):
        # we are running in e10s mode
        title = "%s.e" % (title,)

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [utils.interpolate(i)
                                    for i in browser_config['extensions']]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print "unable to find changeset or repository: %s" % version_info
            sys.exit()
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(time.mktime(time.strptime(testdate,
                                             '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            datazilla_urls=['local.json'],
        )
    else:
        # local mode, output to files
        results_urls = dict(datazilla_urls=[os.path.abspath('local.json')])
    talos_results.check_output_formats(results_urls)

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    testname = None
    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            mytest = TTest()
            talos_results.add(mytest.runTest(browser_config, test))

            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname, status='FAIL', message=unicode(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname, status='ERROR', message=unicode(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # output results
    if results_urls:
        talos_results.output(results_urls)
        if browser_config['develop']:
            print ("Thanks for running Talos locally. Results are in %s"
                   % (results_urls['datazilla_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 5
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)
    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']
    for test in tests:

        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolate(test['url'])
        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # with addon signing for production talos, we want to develop without it
    if browser_config['develop'] or browser_config['branch_name'] == 'Try':
        browser_config['preferences']['xpinstall.signatures.required'] = False

    browser_config['preferences']['extensions.allow-non-mpc-extensions'] = True

    # set defaults
    testdate = config.get('testdate', '')

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [utils.interpolate(i)
                                    for i in browser_config['extensions']]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print("unable to find changeset or repository: %s" % version_info)
            sys.exit()
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(time.mktime(time.strptime(testdate,
                                             '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop'] and not config['gecko_profile']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=['local.json'],
        )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath('local.json')])

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    # if e10s add as extra results option
    if config['e10s']:
        talos_results.add_extra_option('e10s')

    if config['gecko_profile']:
        talos_results.add_extra_option('geckoProfile')

    # some tests use mitmproxy to playback pages
    mitmproxy_recordings_list = config.get('mitmproxy', False)
    if mitmproxy_recordings_list is not False:
        # needed so can tell talos ttest to allow external connections
        browser_config['mitmproxy'] = True

        # start mitmproxy playback; this also generates the CA certificate
        mitmdump_path = config.get('mitmdumpPath', False)
        if mitmdump_path is False:
            # cannot continue, need path for mitmdump playback tool
            LOG.error('Aborting: mitmdumpPath was not provided on cmd line but is required')
            sys.exit()

        mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
        mitmproxy_proc = mitmproxy.start_mitmproxy_playback(mitmdump_path,
                                                            mitmproxy_recording_path,
                                                            mitmproxy_recordings_list.split(),
                                                            browser_config['browser_path'])

        # install the generated CA certificate into Firefox
        # mitmproxy cert setup needs path to mozharness install; mozharness has set this
        # value in the SCRIPTSPATH env var for us in mozharness/mozilla/testing/talos.py
        scripts_path = os.environ.get('SCRIPTSPATH')
        LOG.info('scripts_path: %s' % str(scripts_path))
        mitmproxy.install_mitmproxy_cert(mitmproxy_proc,
                                         browser_config['browser_path'],
                                         str(scripts_path))

    if config.get('first_non_blank_paint', False):
        browser_config['firstNonBlankPaint'] = True

    testname = None
    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            mytest = TTest()
            talos_results.add(mytest.runTest(browser_config, test))

            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname, status='FAIL', message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname, status='ERROR', message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # if mitmproxy was used for page playback, stop it
    if mitmproxy_recordings_list is not False:
        mitmproxy.stop_mitmproxy_playback(mitmproxy_proc)

    # output results
    if results_urls:
        talos_results.output(results_urls)
        if browser_config['develop'] or config['gecko_profile']:
            print("Thanks for running Talos locally. Results are in %s"
                  % (results_urls['output_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 6
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)

    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']
    for test in tests:

        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolate(test['url'])
        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

    # pass --no-remote to firefox launch, if --develop is specified
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # set defaults
    title = config.get('title', '')
    testdate = config.get('testdate', '')

    if browser_config['e10s'] and not title.endswith(".e"):
        # we are running in e10s mode
        title = "%s.e" % (title, )

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [
        utils.interpolate(i) for i in browser_config['extensions']
    ]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print "unable to find changeset or repository: %s" % version_info
            sys.exit()
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    logging.debug("using testdate: %d", date)
    logging.debug("actual date: %d", int(time.time()))

    # results container
    talos_results = TalosResults(title=title,
                                 date=date,
                                 browser_config=browser_config)

    # results links
    if not browser_config['develop']:
        results_urls = dict(
            # hardcoded, this will be removed soon anyway.
            results_urls=['http://graphs.mozilla.org/server/collect.cgi'],
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            datazilla_urls=['local.json'],
        )
    else:
        # local mode, output to files
        results_urls = dict(results_urls=[os.path.abspath('local.out')],
                            datazilla_urls=[os.path.abspath('local.json')])
    talos_results.check_output_formats(results_urls)

    # setup a webserver, if --develop is specified
    httpd = None
    if browser_config['develop']:
        httpd = setup_webserver(browser_config['webserver'])
        if httpd:
            httpd.start()

    # run the tests
    timer = utils.Timer()
    logging.info("Starting test suite %s", title)
    for test in tests:
        testname = test['name']
        testtimer = utils.Timer()
        logging.info("Starting test %s", testname)

        try:
            mytest = TTest()
            if mytest:
                talos_results.add(mytest.runTest(browser_config, test))
            else:
                logging.error("Error found while running %s", testname)
        except TalosRegression:
            logging.error("Detected a regression for %s", testname)
            if httpd:
                httpd.stop()
            # by returning 1, we report an orange to buildbot
            # http://docs.buildbot.net/latest/developer/results.html
            return 1
        except (TalosCrash, TalosError):
            # NOTE: if we get into this condition, talos has an internal
            # problem and cannot continue
            #       this will prevent future tests from running
            traceback.print_exception(*sys.exc_info())
            if httpd:
                httpd.stop()
            # indicate a failure to buildbot, turn the job red
            return 2

        logging.info("Completed test %s (%s)", testname, testtimer.elapsed())

    logging.info("Completed test suite (%s)", timer.elapsed())

    # stop the webserver if running
    if httpd:
        httpd.stop()

    # output results
    if results_urls:
        talos_results.output(results_urls)
        if browser_config['develop']:
            print
            print(
                "Thanks for running Talos locally. Results are in"
                " %s and %s" %
                (results_urls['results_urls'], results_urls['datazilla_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 7
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)
    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']

    for test in tests:
        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
            test['preferences']['talos.tpmanifest'] = test['tpmanifest']

        # if using firstNonBlankPaint, set test preference for it
        # so that the browser pref will be turned on (in ffsetup)
        if test.get('fnbpaint', False):
            LOG.info("Test is using firstNonBlankPaint, browser pref will be turned on")
            test['preferences']['dom.performance.time_to_non_blank_paint.enabled'] = True

        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

        if not test.get('profile', False):
            test['profile'] = config.get('profile')

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # Pass subtests filter argument via a preference
    if browser_config['subtests']:
        browser_config['preferences']['talos.subtests'] = browser_config['subtests']

    # If --code-coverage files are expected, set flag in browser config so ffsetup knows
    # that it needs to delete any ccov files resulting from browser initialization
    # NOTE: This is only supported in production; local setup of ccov folders and
    # data collection not supported yet, so if attempting to run with --code-coverage
    # flag locally, that is not supported yet
    if config.get('code_coverage', False):
        if browser_config['develop']:
            raise TalosError('Aborting: talos --code-coverage flag is only '
                             'supported in production')
        else:
            browser_config['code_coverage'] = True

    # set defaults
    testdate = config.get('testdate', '')

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [utils.interpolate(i)
                                    for i in browser_config['extensions']]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print("Abort: unable to find changeset or repository: %s" % version_info)
            sys.exit(1)
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(time.mktime(time.strptime(testdate,
                                             '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop'] and not config['gecko_profile']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=['local.json'],
        )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath('local.json')])

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    # legacy still required for perfherder data
    talos_results.add_extra_option('e10s')

    # stylo is another option for testing
    if config['enable_stylo']:
        talos_results.add_extra_option('stylo')
    if config['disable_stylo']:
        talos_results.add_extra_option('stylo_disabled')

    # measuring the difference of a a certain thread level
    if config.get('stylothreads', 0) > 0:
        talos_results.add_extra_option('%s_thread' % config['stylothreads'])

    if config['gecko_profile']:
        talos_results.add_extra_option('geckoProfile')

    # some tests use mitmproxy to playback pages
    mitmproxy_recordings_list = config.get('mitmproxy', False)
    if mitmproxy_recordings_list is not False:
        # needed so can tell talos ttest to allow external connections
        browser_config['mitmproxy'] = True

        # start mitmproxy playback; this also generates the CA certificate
        mitmdump_path = config.get('mitmdumpPath', False)
        if mitmdump_path is False:
            # cannot continue, need path for mitmdump playback tool
            raise TalosError('Aborting: mitmdumpPath not provided on cmd line but is required')

        mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
        mitmproxy_proc = mitmproxy.start_mitmproxy_playback(mitmdump_path,
                                                            mitmproxy_recording_path,
                                                            mitmproxy_recordings_list.split(),
                                                            browser_config['browser_path'])

        # install the generated CA certificate into Firefox
        # mitmproxy cert setup needs path to mozharness install; mozharness has set this
        # value in the SCRIPTSPATH env var for us in mozharness/mozilla/testing/talos.py
        scripts_path = os.environ.get('SCRIPTSPATH')
        LOG.info('scripts_path: %s' % str(scripts_path))
        mitmproxy.install_mitmproxy_cert(mitmproxy_proc,
                                         browser_config['browser_path'],
                                         str(scripts_path))

    testname = None

    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            if not test.get('url'):
                # set browser prefs for pageloader test setings (doesn't use cmd line args / url)
                test['url'] = None
                set_tp_preferences(test, browser_config)

            mytest = TTest()

            # some tests like ts_paint return multiple results in a single iteration
            if test.get('firstpaint', False) or test.get('userready', None):
                # we need a 'testeventmap' to tell us which tests each event should map to
                multi_value_result = None
                separate_results_list = []

                test_event_map = test.get('testeventmap', None)
                if test_event_map is None:
                    raise TalosError("Need 'testeventmap' in test.py for %s" % test.get('name'))

                # run the test
                multi_value_result = mytest.runTest(browser_config, test)
                if multi_value_result is None:
                    raise TalosError("Abort: no results returned for %s" % test.get('name'))

                # parse out the multi-value results, and 'fake it' to appear like separate tests
                separate_results_list = convert_to_separate_test_results(multi_value_result,
                                                                         test_event_map)

                # now we have three separate test results, store them
                for test_result in separate_results_list:
                    talos_results.add(test_result)

            # some tests like bloom_basic run two separate tests and then compare those values
            # we want the results in perfherder to only be the actual difference between those
            # and store the base and reference test replicates in results.json for upload
            elif test.get('base_vs_ref', False):
                # run the test, results will be reported for each page like two tests in the suite
                base_and_reference_results = mytest.runTest(browser_config, test)
                # now compare each test, and create a new test object for the comparison
                talos_results.add(make_comparison_result(base_and_reference_results))
            else:
                # just expecting regular test - one result value per iteration
                talos_results.add(mytest.runTest(browser_config, test))
            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname, status='FAIL', message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname, status='ERROR', message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # if mitmproxy was used for page playback, stop it
    if mitmproxy_recordings_list is not False:
        mitmproxy.stop_mitmproxy_playback(mitmproxy_proc)

    # output results
    if results_urls and not browser_config['no_upload_results']:
        talos_results.output(results_urls)
        if browser_config['develop'] or config['gecko_profile']:
            print("Thanks for running Talos locally. Results are in %s"
                  % (results_urls['output_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Ejemplo n.º 8
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)

    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']
    for test in tests:

        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolate(test['url'])
        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

    # pass --no-remote to firefox launch, if --develop is specified
    if browser_config['develop']:
        browser_config['extra_args'] = '--no-remote'

    # set defaults
    title = config.get('title', '')
    testdate = config.get('testdate', '')

    if browser_config['e10s'] and not title.endswith(".e"):
        # we are running in e10s mode
        title = "%s.e" % (title,)

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [utils.interpolate(i)
                                    for i in browser_config['extensions']]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print "unable to find changeset or repository: %s" % version_info
            sys.exit()
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(time.mktime(time.strptime(testdate,
                                             '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    logging.debug("using testdate: %d", date)
    logging.debug("actual date: %d", int(time.time()))

    # results container
    talos_results = TalosResults(title=title,
                                 date=date,
                                 browser_config=browser_config)

    # results links
    if not browser_config['develop']:
        results_urls = dict(
            # hardcoded, this will be removed soon anyway.
            results_urls=['http://graphs.mozilla.org/server/collect.cgi'],
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            datazilla_urls=['local.json'],
        )
    else:
        # local mode, output to files
        results_urls = dict(
            results_urls=[os.path.abspath('local.out')],
            datazilla_urls=[os.path.abspath('local.json')]
        )
    talos_results.check_output_formats(results_urls)

    # setup a webserver, if --develop is specified
    httpd = None
    if browser_config['develop']:
        httpd = setup_webserver(browser_config['webserver'])
        if httpd:
            httpd.start()

    # run the tests
    timer = utils.Timer()
    logging.info("Starting test suite %s", title)
    for test in tests:
        testname = test['name']
        testtimer = utils.Timer()
        logging.info("Starting test %s", testname)

        try:
            mytest = TTest()
            if mytest:
                talos_results.add(mytest.runTest(browser_config, test))
            else:
                logging.error("Error found while running %s", testname)
        except TalosRegression:
            logging.error("Detected a regression for %s", testname)
            if httpd:
                httpd.stop()
            # by returning 1, we report an orange to buildbot
            # http://docs.buildbot.net/latest/developer/results.html
            return 1
        except (TalosCrash, TalosError):
            # NOTE: if we get into this condition, talos has an internal
            # problem and cannot continue
            #       this will prevent future tests from running
            traceback.print_exception(*sys.exc_info())
            if httpd:
                httpd.stop()
            # indicate a failure to buildbot, turn the job red
            return 2

        logging.info("Completed test %s (%s)", testname, testtimer.elapsed())

    logging.info("Completed test suite (%s)", timer.elapsed())

    # stop the webserver if running
    if httpd:
        httpd.stop()

    # output results
    if results_urls:
        talos_results.output(results_urls)
        if browser_config['develop']:
            print
            print ("Thanks for running Talos locally. Results are in"
                   " %s and %s" % (results_urls['results_urls'],
                                   results_urls['datazilla_urls']))

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0