コード例 #1
0
    def download(self):
        """Download and unpack mitmproxy binary and pageset using tooltool"""
        if not os.path.exists(self.raptor_dir):
            os.makedirs(self.raptor_dir)

        if 'win' in self.config['platform']:
            # on windows we need a python3 environment and use our own package from tooltool
            self.py3_path = self.fetch_python3()
            LOG.info("python3 path is: %s" % self.py3_path)
        else:
            # on osx and linux we use pre-built binaries
            LOG.info("downloading mitmproxy binary")
            _manifest = os.path.join(here,
                                     self.config['playback_binary_manifest'])
            transformed_manifest = transform_platform(_manifest,
                                                      self.config['platform'])
            tooltool_download(transformed_manifest, self.config['run_local'],
                              self.raptor_dir)

        # we use one pageset for all platforms
        LOG.info("downloading mitmproxy pageset")
        _manifest = os.path.join(here,
                                 self.config['playback_pageset_manifest'])
        transformed_manifest = transform_platform(_manifest,
                                                  self.config['platform'])
        tooltool_download(transformed_manifest, self.config['run_local'],
                          self.raptor_dir)
        return
コード例 #2
0
ファイル: test_utils.py プロジェクト: shashank100/gecko-dev
def test_transform_platform(platform):
    transformed = transform_platform("mitmproxy-rel-bin-{platform}.manifest", platform)
    assert "{platform}" not in transformed
    if platform == 'mac':
        assert "osx" in transformed
    else:
        assert platform in transformed
コード例 #3
0
ファイル: mitmproxy.py プロジェクト: alifunterz/gecko-dev
    def download(self):
        """Download and unpack mitmproxy binary and pageset using tooltool"""
        if not os.path.exists(self.raptor_dir):
            os.makedirs(self.raptor_dir)

        LOG.info("downloading mitmproxy binary")
        _manifest = os.path.join(here, self.config['playback_binary_manifest'])
        transformed_manifest = transform_platform(_manifest,
                                                  self.config['platform'])
        tooltool_download(transformed_manifest, self.config['run_local'],
                          self.raptor_dir)

        # we use one pageset for all platforms
        LOG.info("downloading mitmproxy pageset")
        _manifest = os.path.join(here,
                                 self.config['playback_pageset_manifest'])
        transformed_manifest = transform_platform(_manifest,
                                                  self.config['platform'])
        tooltool_download(transformed_manifest, self.config['run_local'],
                          self.raptor_dir)
        return
コード例 #4
0
 def fetch_python3(self):
     """Mitmproxy on windows needs Python 3.x"""
     python3_path = os.path.join(self.raptor_dir, 'python3.6', 'python')
     if not os.path.exists(os.path.dirname(python3_path)):
         _manifest = os.path.join(here, self.config['python3_win_manifest'])
         transformed_manifest = transform_platform(_manifest, self.config['platform'],
                                                   self.config['processor'])
         LOG.info("downloading py3 package for mitmproxy windows: %s" % transformed_manifest)
         self._tooltool_fetch(transformed_manifest)
     cmd = [python3_path, '--version']
     # just want python3 ver printed in production log
     subprocess.Popen(cmd, env=os.environ.copy())
     return python3_path
コード例 #5
0
def write_test_settings_json(test_details, oskey):
    # write test settings json file with test details that the control
    # server will provide for the web ext
    test_url = transform_platform(test_details['test_url'], oskey)
    test_settings = {
        "raptor-options": {
            "type": test_details['type'],
            "test_url": test_url,
            "page_cycles": int(test_details['page_cycles'])
        }
    }

    if test_details['type'] == "pageload":
        test_settings['raptor-options']['measure'] = {}
        if "dcf" in test_details['measure']:
            test_settings['raptor-options']['measure']['dcf'] = True
        if "fnbpaint" in test_details['measure']:
            test_settings['raptor-options']['measure']['fnbpaint'] = True
        if "fcp" in test_details['measure']:
            test_settings['raptor-options']['measure']['fcp'] = True
        if "hero" in test_details['measure']:
            test_settings['raptor-options']['measure']['hero'] = test_details[
                'hero'].split()
    if test_details.get("page_timeout", None) is not None:
        test_settings['raptor-options']['page_timeout'] = int(
            test_details['page_timeout'])
    test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")
    if test_details.get("lower_is_better", "true") == "false":
        test_settings['raptor-options']['lower_is_better'] = False
    else:
        test_settings['raptor-options']['lower_is_better'] = True

    # support optional subtest unit/lower_is_better fields, default to main test values if not set
    val = test_details.get('subtest_unit',
                           test_settings['raptor-options']['unit'])
    test_settings['raptor-options']['subtest_unit'] = val
    val = test_details.get('subtest_lower',
                           test_settings['raptor-options']['lower_is_better'])
    test_settings['raptor-options']['subtest_lower_is_better'] = val

    if test_details.get("alert_threshold", None) is not None:
        test_settings['raptor-options']['alert_threshold'] = float(
            test_details['alert_threshold'])

    settings_file = os.path.join(tests_dir, test_details['name'] + '.json')
    try:
        with open(settings_file, 'w') as out_file:
            json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
            out_file.close()
    except IOError:
        LOG.info("abort: exception writing test settings json!")
コード例 #6
0
    def download(self):
        # download mitmproxy binary and pageset using tooltool
        # note: tooltool automatically unpacks the files as well
        if not os.path.exists(self.raptor_dir):
            os.makedirs(self.raptor_dir)

        if 'win' in self.config['platform']:
            # on windows we need a python3 environment and use our own package from tooltool
            self.py3_path = self.fetch_python3()
            LOG.info("python3 path is: %s" % self.py3_path)
        else:
            # on osx and linux we use pre-built binaries
            LOG.info("downloading mitmproxy binary")
            _manifest = os.path.join(here, self.config['playback_binary_manifest'])
            transformed_manifest = transform_platform(_manifest, self.config['platform'])
            self._tooltool_fetch(transformed_manifest)

        # we use one pageset for all platforms (pageset was recorded on win10)
        LOG.info("downloading mitmproxy pageset")
        _manifest = os.path.join(here, self.config['playback_pageset_manifest'])
        transformed_manifest = transform_platform(_manifest, self.config['platform'])
        self._tooltool_fetch(transformed_manifest)
        return
コード例 #7
0
def write_test_settings_json(args, test_details, oskey):
    # write test settings json file with test details that the control
    # server will provide for the web ext
    test_url = transform_platform(test_details['test_url'], oskey)

    test_settings = {
        "raptor-options": {
            "type": test_details['type'],
            "cold": test_details['cold'],
            "test_url": test_url,
            "expected_browser_cycles": test_details['expected_browser_cycles'],
            "page_cycles": int(test_details['page_cycles']),
            "host": args.host,
        }
    }

    if test_details['type'] == "pageload":
        test_settings['raptor-options']['measure'] = {}

        # test_details['measure'] was already converted to a list in get_raptor_test_list below
        # the 'hero=' line is still a raw string from the test INI
        for m in test_details['measure']:
            test_settings['raptor-options']['measure'][m] = True
            if m == 'hero':
                test_settings['raptor-options']['measure'][m] = [
                    h.strip() for h in test_details['hero'].split(',')
                ]

        if test_details.get("alert_on", None) is not None:
            # alert_on was already converted to list above
            test_settings['raptor-options']['alert_on'] = test_details[
                'alert_on']

    if test_details.get("page_timeout", None) is not None:
        test_settings['raptor-options']['page_timeout'] = int(
            test_details['page_timeout'])

    test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")

    test_settings['raptor-options']['lower_is_better'] = test_details.get(
        "lower_is_better", True)

    # support optional subtest unit/lower_is_better fields
    val = test_details.get('subtest_unit',
                           test_settings['raptor-options']['unit'])
    test_settings['raptor-options']['subtest_unit'] = val
    subtest_lower_is_better = test_details.get('subtest_lower_is_better')

    if subtest_lower_is_better is None:
        # default to main test values if not set
        test_settings['raptor-options']['subtest_lower_is_better'] = (
            test_settings['raptor-options']['lower_is_better'])
    else:
        test_settings['raptor-options'][
            'subtest_lower_is_better'] = subtest_lower_is_better

    if test_details.get("alert_change_type", None) is not None:
        test_settings['raptor-options']['alert_change_type'] = test_details[
            'alert_change_type']

    if test_details.get("alert_threshold", None) is not None:
        test_settings['raptor-options']['alert_threshold'] = float(
            test_details['alert_threshold'])

    if test_details.get("screen_capture", None) is not None:
        test_settings['raptor-options']['screen_capture'] = test_details.get(
            "screen_capture")

    # if Gecko profiling is enabled, write profiling settings for webext
    if test_details.get("gecko_profile", False):
        threads = ['GeckoMain', 'Compositor']

        # With WebRender enabled profile some extra threads
        if os.getenv('MOZ_WEBRENDER') == '1':
            threads.extend(['Renderer', 'WR'])

        if test_details.get('gecko_profile_threads'):
            test_threads = filter(
                None, test_details['gecko_profile_threads'].split(','))
            threads.extend(test_threads)

        test_settings['raptor-options'].update({
            'gecko_profile':
            True,
            'gecko_profile_entries':
            int(test_details.get('gecko_profile_entries', 1000000)),
            'gecko_profile_interval':
            int(test_details.get('gecko_profile_interval', 1)),
            'gecko_profile_threads':
            ','.join(set(threads)),
        })

    if test_details.get("newtab_per_cycle", None) is not None:
        test_settings['raptor-options']['newtab_per_cycle'] = \
            bool(test_details['newtab_per_cycle'])

    if test_details['type'] == "scenario":
        test_settings['raptor-options']['scenario_time'] = test_details[
            'scenario_time']
        if 'background_test' in test_details:
            test_settings['raptor-options']['background_test'] = \
                bool(test_details['background_test'])
        else:
            test_settings['raptor-options']['background_test'] = False

    jsons_dir = os.path.join(tests_dir, 'json')

    if not os.path.exists(jsons_dir):
        os.mkdir(os.path.join(tests_dir, 'json'))

    settings_file = os.path.join(jsons_dir, test_details['name'] + '.json')
    try:
        with open(settings_file, 'w') as out_file:
            json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
            out_file.close()
    except IOError:
        LOG.info("abort: exception writing test settings json!")
コード例 #8
0
def write_test_settings_json(args, test_details, oskey):
    # write test settings json file with test details that the control
    # server will provide for the web ext
    test_url = transform_platform(test_details["test_url"], oskey)
    # this is needed for raptor browsertime to pick up the replaced
    # {platform} argument for motionmark tests
    test_details["test_url"] = test_url

    test_settings = {
        "raptor-options": {
            "type": test_details["type"],
            "cold": test_details["cold"],
            "test_url": test_url,
            "expected_browser_cycles": test_details["expected_browser_cycles"],
            "page_cycles": int(test_details["page_cycles"]),
            "host": args.host,
        }
    }

    if test_details["type"] == "pageload":
        test_settings["raptor-options"]["measure"] = {}

        # test_details['measure'] was already converted to a list in get_raptor_test_list below
        # the 'hero=' line is still a raw string from the test INI
        for m in test_details["measure"]:
            test_settings["raptor-options"]["measure"][m] = True
            if m == "hero":
                test_settings["raptor-options"]["measure"][m] = [
                    h.strip() for h in test_details["hero"].split(",")
                ]

        if test_details.get("alert_on", None) is not None:
            # alert_on was already converted to list above
            test_settings["raptor-options"]["alert_on"] = test_details["alert_on"]

    if test_details.get("page_timeout", None) is not None:
        test_settings["raptor-options"]["page_timeout"] = int(
            test_details["page_timeout"]
        )

    test_settings["raptor-options"]["unit"] = test_details.get("unit", "ms")

    test_settings["raptor-options"]["lower_is_better"] = test_details.get(
        "lower_is_better", True
    )

    # support optional subtest unit/lower_is_better fields
    val = test_details.get("subtest_unit", test_settings["raptor-options"]["unit"])
    test_settings["raptor-options"]["subtest_unit"] = val
    subtest_lower_is_better = test_details.get("subtest_lower_is_better")

    if subtest_lower_is_better is None:
        # default to main test values if not set
        test_settings["raptor-options"]["subtest_lower_is_better"] = test_settings[
            "raptor-options"
        ]["lower_is_better"]
    else:
        test_settings["raptor-options"][
            "subtest_lower_is_better"
        ] = subtest_lower_is_better

    if test_details.get("alert_change_type", None) is not None:
        test_settings["raptor-options"]["alert_change_type"] = test_details[
            "alert_change_type"
        ]

    if test_details.get("alert_threshold", None) is not None:
        test_settings["raptor-options"]["alert_threshold"] = float(
            test_details["alert_threshold"]
        )

    if test_details.get("screen_capture", None) is not None:
        test_settings["raptor-options"]["screen_capture"] = test_details.get(
            "screen_capture"
        )

    # if Gecko profiling is enabled, write profiling settings for webext
    if test_details.get("gecko_profile", False):
        threads = ["GeckoMain", "Compositor"]

        # With WebRender enabled profile some extra threads
        if os.getenv("MOZ_WEBRENDER") == "1":
            threads.extend(["Renderer", "WR"])

        if test_details.get("gecko_profile_threads"):
            # pylint --py3k: W1639
            test_threads = list(
                filter(None, test_details["gecko_profile_threads"].split(","))
            )
            threads.extend(test_threads)

        test_settings["raptor-options"].update(
            {
                "gecko_profile": True,
                "gecko_profile_entries": int(
                    test_details.get("gecko_profile_entries", 1000000)
                ),
                "gecko_profile_interval": int(
                    test_details.get("gecko_profile_interval", 1)
                ),
                "gecko_profile_threads": ",".join(set(threads)),
            }
        )

    if test_details.get("newtab_per_cycle", None) is not None:
        test_settings["raptor-options"]["newtab_per_cycle"] = bool(
            test_details["newtab_per_cycle"]
        )

    if test_details["type"] == "scenario":
        test_settings["raptor-options"]["scenario_time"] = test_details["scenario_time"]
        if "background_test" in test_details:
            test_settings["raptor-options"]["background_test"] = bool(
                test_details["background_test"]
            )
        else:
            test_settings["raptor-options"]["background_test"] = False

    jsons_dir = os.path.join(tests_dir, "json")

    if not os.path.exists(jsons_dir):
        os.mkdir(os.path.join(tests_dir, "json"))

    settings_file = os.path.join(jsons_dir, test_details["name"] + ".json")
    try:
        with open(settings_file, "w") as out_file:
            json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
            out_file.close()
    except IOError:
        LOG.info("abort: exception writing test settings json!")
コード例 #9
0
def write_test_settings_json(test_details, oskey):
    # write test settings json file with test details that the control
    # server will provide for the web ext
    test_url = transform_platform(test_details['test_url'], oskey)

    test_settings = {
        "raptor-options": {
            "type": test_details['type'],
            "test_url": test_url,
            "page_cycles": int(test_details['page_cycles'])
        }
    }

    if test_details['type'] == "pageload":
        test_settings['raptor-options']['measure'] = {}
        if "dcf" in test_details['measure']:
            test_settings['raptor-options']['measure']['dcf'] = True
        if "fnbpaint" in test_details['measure']:
            test_settings['raptor-options']['measure']['fnbpaint'] = True
        if "fcp" in test_details['measure']:
            test_settings['raptor-options']['measure']['fcp'] = True
        if "hero" in test_details['measure']:
            test_settings['raptor-options']['measure']['hero'] = test_details[
                'hero'].split()
        if "ttfi" in test_details['measure']:
            test_settings['raptor-options']['measure']['ttfi'] = True
    if test_details.get("page_timeout", None) is not None:
        test_settings['raptor-options']['page_timeout'] = int(
            test_details['page_timeout'])
    test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")
    if test_details.get("lower_is_better", "true") == "false":
        test_settings['raptor-options']['lower_is_better'] = False
    else:
        test_settings['raptor-options']['lower_is_better'] = True

    # support optional subtest unit/lower_is_better fields, default to main test values if not set
    val = test_details.get('subtest_unit',
                           test_settings['raptor-options']['unit'])
    test_settings['raptor-options']['subtest_unit'] = val
    val = test_details.get('subtest_lower',
                           test_settings['raptor-options']['lower_is_better'])
    test_settings['raptor-options']['subtest_lower_is_better'] = val

    if test_details.get("alert_threshold", None) is not None:
        test_settings['raptor-options']['alert_threshold'] = float(
            test_details['alert_threshold'])

    # if gecko profiling is enabled, write profiling settings for webext
    if test_details.get("gecko_profile", False):
        test_settings['raptor-options']['gecko_profile'] = True
        # when profiling, if webRender is enabled we need to set that, so
        # the runner can add the web render threads to gecko profiling
        test_settings['raptor-options']['gecko_profile_interval'] = \
            float(test_details.get("gecko_profile_interval", 0))
        test_settings['raptor-options']['gecko_profile_entries'] = \
            float(test_details.get("gecko_profile_entries", 0))
        if str(os.getenv('MOZ_WEBRENDER')) == '1':
            test_settings['raptor-options']['webrender_enabled'] = True

    if test_details.get("newtab_per_cycle", None) is not None:
        test_settings['raptor-options']['newtab_per_cycle'] = \
            bool(test_details['newtab_per_cycle'])

    settings_file = os.path.join(tests_dir, test_details['name'] + '.json')
    try:
        with open(settings_file, 'w') as out_file:
            json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
            out_file.close()
    except IOError:
        LOG.info("abort: exception writing test settings json!")
コード例 #10
0
def test_transform_platform_processor(processor):
    transformed = transform_platform("string-with-processor-{x64}.manifest",
                                     'win', processor)
    assert "{x64}" not in transformed
    if processor == 'x86_64':
        assert "_x64" in transformed
コード例 #11
0
def test_transform_platform_no_change():
    starting_string = "nothing-in-here-to-transform"
    assert transform_platform(starting_string, 'mac') == starting_string
コード例 #12
0
ファイル: manifest.py プロジェクト: alifunterz/gecko-dev
def write_test_settings_json(args, test_details, oskey):
    # write test settings json file with test details that the control
    # server will provide for the web ext
    test_url = transform_platform(test_details['test_url'], oskey)

    test_settings = {
        "raptor-options": {
            "type": test_details['type'],
            "test_url": test_url,
            "page_cycles": int(test_details['page_cycles']),
            "host": args.host,
        }
    }

    if test_details['type'] == "pageload":
        test_settings['raptor-options']['measure'] = {}

        # test_details['measure'] was already converted to a list in get_raptor_test_list below
        # the 'hero=' line is still a raw string from the test INI
        for m in test_details['measure']:
            test_settings['raptor-options']['measure'][m] = True
            if m == 'hero':
                test_settings['raptor-options']['measure'][m] = [
                    h.strip() for h in test_details['hero'].split(',')
                ]

        if test_details.get("alert_on", None) is not None:
            # alert_on was already converted to list above
            test_settings['raptor-options']['alert_on'] = test_details[
                'alert_on']

    if test_details.get("page_timeout", None) is not None:
        test_settings['raptor-options']['page_timeout'] = int(
            test_details['page_timeout'])

    test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")

    if test_details.get("lower_is_better", "true") == "false":
        test_settings['raptor-options']['lower_is_better'] = False
    else:
        test_settings['raptor-options']['lower_is_better'] = True

    # support optional subtest unit/lower_is_better fields, default to main test values if not set
    val = test_details.get('subtest_unit',
                           test_settings['raptor-options']['unit'])
    test_settings['raptor-options']['subtest_unit'] = val
    val = test_details.get('subtest_lower_is_better',
                           test_settings['raptor-options']['lower_is_better'])
    if val == "false":
        test_settings['raptor-options']['subtest_lower_is_better'] = False
    else:
        test_settings['raptor-options']['subtest_lower_is_better'] = True

    if test_details.get("alert_threshold", None) is not None:
        test_settings['raptor-options']['alert_threshold'] = float(
            test_details['alert_threshold'])

    if test_details.get("screen_capture", None) is not None:
        test_settings['raptor-options']['screen_capture'] = test_details.get(
            "screen_capture")

    # if gecko profiling is enabled, write profiling settings for webext
    if test_details.get("gecko_profile", False):
        test_settings['raptor-options']['gecko_profile'] = True
        # when profiling, if webRender is enabled we need to set that, so
        # the runner can add the web render threads to gecko profiling
        test_settings['raptor-options']['gecko_profile_interval'] = \
            float(test_details.get("gecko_profile_interval", 0))
        test_settings['raptor-options']['gecko_profile_entries'] = \
            float(test_details.get("gecko_profile_entries", 0))
        if str(os.getenv('MOZ_WEBRENDER')) == '1':
            test_settings['raptor-options']['webrender_enabled'] = True

    if test_details.get("newtab_per_cycle", None) is not None:
        test_settings['raptor-options']['newtab_per_cycle'] = \
            bool(test_details['newtab_per_cycle'])

    settings_file = os.path.join(tests_dir, test_details['name'] + '.json')
    try:
        with open(settings_file, 'w') as out_file:
            json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
            out_file.close()
    except IOError:
        LOG.info("abort: exception writing test settings json!")