def test_parsing_testvars(mach_parsed_kwargs): mach_parsed_kwargs.pop('tests') testvars_json_loads = [{ "wifi": { "ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo" } }, { "wifi": { "PEAP": "bar" }, "device": { "stuff": "buzz" } }] expected_dict = { "wifi": { "ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo", "PEAP": "bar" }, "device": { "stuff": "buzz" } } with patch( 'marionette_harness.runtests.MarionetteTestRunner._load_testvars', return_value=testvars_json_loads) as load: runner = MarionetteTestRunner(**mach_parsed_kwargs) assert runner.testvars == expected_dict assert load.call_count == 1
def kwarg_builder(new_items, return_socket=False): mach_parsed_kwargs.update(new_items) runner = MarionetteTestRunner(**mach_parsed_kwargs) with patch('marionette_harness.runner.base.socket') as socket: built_kwargs = runner._build_kwargs() if return_socket: return built_kwargs, socket return built_kwargs
def test_option_repeat(mach_parsed_kwargs, repeat): if repeat is not None: mach_parsed_kwargs['repeat'] = repeat runner = MarionetteTestRunner(**mach_parsed_kwargs) if repeat is None: assert runner.repeat == 0 else: assert runner.repeat == repeat
def test_option_e10s_sets_prefs(mach_parsed_kwargs, e10s): mach_parsed_kwargs['e10s'] = e10s runner = MarionetteTestRunner(**mach_parsed_kwargs) e10s_prefs = { 'browser.tabs.remote.autostart': True, 'browser.tabs.remote.force-enable': True, } for k, v in e10s_prefs.iteritems(): assert runner.prefs.get(k, False) == (v and e10s)
def test_parsing_optional_arguments(mach_parsed_kwargs, arg_name, arg_dest, arg_value, expected_value): parser = MarionetteArguments() parsed_args = parser.parse_args(['--' + arg_name, arg_value]) result = vars(parsed_args) assert result.get(arg_dest) == expected_value mach_parsed_kwargs[arg_dest] = result[arg_dest] runner = MarionetteTestRunner(**mach_parsed_kwargs) built_kwargs = runner._build_kwargs() assert built_kwargs[arg_dest] == expected_value
def test_parse_opt_args_emulator(mach_parsed_kwargs, arg_name, arg_dest, arg_value, expected_value): parser = MarionetteArguments() parsed_args = parser.parse_args(["--" + arg_name, arg_value]) result = vars(parsed_args) assert result.get(arg_dest) == expected_value mach_parsed_kwargs[arg_dest] = result[arg_dest] mach_parsed_kwargs["emulator"] = True runner = MarionetteTestRunner(**mach_parsed_kwargs) built_kwargs = runner._build_kwargs() assert built_kwargs[arg_dest] == expected_value
def test_load_testvars_throws_expected_errors(mach_parsed_kwargs): mach_parsed_kwargs['testvars'] = ['some_bad_path.json'] runner = MarionetteTestRunner(**mach_parsed_kwargs) with pytest.raises(IOError) as io_exc: runner._load_testvars() assert 'does not exist' in io_exc.value.message with patch('os.path.exists', return_value=True): with patch('__builtin__.open', mock_open(read_data='[not {valid JSON]')): with pytest.raises(Exception) as json_exc: runner._load_testvars() assert 'not properly formatted' in json_exc.value.message
def test_load_testvars_throws_expected_errors(mach_parsed_kwargs): mach_parsed_kwargs["testvars"] = ["some_bad_path.json"] runner = MarionetteTestRunner(**mach_parsed_kwargs) with pytest.raises(IOError) as io_exc: runner._load_testvars() assert "does not exist" in str(io_exc.value) with patch("os.path.exists", return_value=True): with patch("__builtin__.open", mock_open(read_data="[not {valid JSON]")): with pytest.raises(Exception) as json_exc: runner._load_testvars() assert "not properly formatted" in str(json_exc.value)
def test_option_e10s_sets_prefs(mach_parsed_kwargs, e10s): mach_parsed_kwargs['e10s'] = e10s runner = MarionetteTestRunner(**mach_parsed_kwargs) e10s_prefs = { 'browser.tabs.remote.autostart': True, 'browser.tabs.remote.force-enable': True, 'extensions.e10sBlocksEnabling': False } for k, v in e10s_prefs.iteritems(): if k == 'extensions.e10sBlocksEnabling' and not e10s: continue assert runner.prefs.get(k, False) == (v and e10s)
def test_option_run_until_failure(mach_parsed_kwargs, repeat, run_until_failure): if run_until_failure is not None: mach_parsed_kwargs["run_until_failure"] = run_until_failure if repeat is not None: mach_parsed_kwargs["repeat"] = repeat runner = MarionetteTestRunner(**mach_parsed_kwargs) if run_until_failure is None: assert runner.run_until_failure is False if repeat is None: assert runner.repeat == 0 else: assert runner.repeat == repeat else: assert runner.run_until_failure == run_until_failure if repeat is None: assert runner.repeat == 30 else: assert runner.repeat == repeat
def runner(mach_parsed_kwargs): """ MarionetteTestRunner instance initialized with default options. """ return MarionetteTestRunner(**mach_parsed_kwargs)
def open_urls(self, urls, marionette_port=24242): testvars = { 'perTabPause': self.per_tab_pause, 'settleWaitTime': self.settle_wait_time, 'entities': len(urls), 'urls': urls, 'stats': self.stats, } e10s = self.process_count > 0 prefs = { # Don't open the first-run dialog, it loads a video 'startup.homepage_welcome_url': '', 'startup.homepage_override_url': '', 'browser.newtab.url': 'about:blank', # make sure e10s is enabled "browser.tabs.remote.autostart": e10s, "browser.tabs.remote.autostart.1": e10s, "browser.tabs.remote.autostart.2": e10s, "browser.tabs.remote.autostart.3": e10s, "browser.tabs.remote.autostart.4": e10s, "browser.tabs.remote.autostart.5": e10s, "browser.tabs.remote.autostart.6": e10s, "dom.ipc.processCount": self.process_count, # prevent "You're using e10s!" dialog from showing up "browser.displayedE10SNotice": 1000, # override image expiration in hopes of getting less volatile # numbers "image.mem.surfacecache.min_expiration_ms": 10000, # Specify a communications port "marionette.defaultPrefs.port": marionette_port, } if self.proxy: # disable network access prefs.update({ "network.proxy.socks": self.proxy, "network.proxy.socks_port": self.proxy_port, "network.proxy.socks_remote_dns": True, "network.proxy.type": 1, # Socks }) profile = mozprofile.FirefoxProfile(preferences=prefs) # TODO(ER): Figure out how to turn on debug level info again #commandline.formatter_option_defaults['level'] = 'debug' logger = commandline.setup_logging("MarionetteTest", {}) runner = MarionetteTestRunner(binary=self.binary, profile=profile, logger=logger, startup_timeout=60, address="localhost:%d" % marionette_port, gecko_log="gecko_%d.log" % self.process_count) # Add our testvars runner.testvars.update(testvars) test_path = os.path.join(MODULE_DIR, "test_memory_usage.py") try: print "Marionette - running test" runner.run_tests([test_path]) failures = runner.failed except Exception, e: print e pass