Esempio n. 1
0
 def test_23(self):
     "test hitting log size limit"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     with open(self.tmpfn, 'w') as prefs:
         prefs.write('//fftest_big_log\n')
     limit = 0x100000  # 1MB
     ffp.launch(TESTFF_BIN, prefs_js=self.tmpfn, log_limit=limit)
     self.assertIsNotNone(ffp.wait(10))
     self.assertIsNotNone(ffp.returncode)
     ffp.close()
     self.assertEqual(ffp.reason, ffp.RC_WORKER)
     self.assertIsNotNone(ffp.returncode)
     ffp.save_logs(self.logs)
     total_size = 0
     for fname in os.listdir(self.logs):
         self.assertIn(fname, [
             "log_ffp_worker_log_size_limiter.txt", "log_stderr.txt",
             "log_stdout.txt"
         ])
         total_size += os.stat(os.path.join(self.logs, fname)).st_size
     self.assertLess(limit, total_size)
     with open(
             os.path.join(self.logs, "log_ffp_worker_log_size_limiter.txt"),
             "r") as log_fp:
         self.assertIn("LOG_SIZE_LIMIT_EXCEEDED", log_fp.read())
Esempio n. 2
0
 def test_17(self):
     "test launching with gdb"
     if not sys.platform.startswith("linux"):
         with self.assertRaisesRegex(EnvironmentError,
                                     "GDB is only supported on Linux"):
             FFPuppet(use_gdb=True)
     else:
         ffp = FFPuppet(use_gdb=True)
         self.addCleanup(ffp.clean_up)
         bin_path = str(
             subprocess.check_output(["which",
                                      "echo"]).strip().decode("ascii"))
         # launch will fail b/c 'echo' will exit right away but that's fine
         with self.assertRaisesRegex(LaunchError,
                                     "Failure during browser startup"):
             self.assertEqual(ffp.launch(bin_path), 0)
         ffp.close()
         ffp.save_logs(self.logs)
         with open(os.path.join(self.logs, "log_stdout.txt"),
                   "rb") as log_fp:
             log_data = log_fp.read()
         # verify GDB ran and executed the script
         self.assertRegex(
             log_data,
             br"[Inferior \d+ (process \d+) exited with code \d+]")
         self.assertRegex(log_data, br"\+quit_with_code")
Esempio n. 3
0
 def test_08(self):
     "test clone_log()"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     self.assertIsNone(ffp.clone_log("stdout", target_file=self.tmpfn))
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     ffp.wait(0.25)  # wait for log prints
     # make sure logs are available
     self.assertEqual(ffp.clone_log("stdout", target_file=self.tmpfn),
                      self.tmpfn)
     with open(self.tmpfn, "rb") as tmpfp:
         orig = tmpfp.read()
     self.assertGreater(len(orig), 5)
     self.assertEqual(
         ffp.clone_log("stdout", target_file=self.tmpfn, offset=5),
         self.tmpfn)
     with open(self.tmpfn, "rb") as tmpfp:
         self.assertEqual(tmpfp.read(), orig[5:])
     # grab log without giving a target file name
     rnd_log = ffp.clone_log("stdout")
     self.assertIsNotNone(rnd_log)
     try:
         ffp.close()
         # make sure logs are available
         self.assertEqual(ffp.clone_log("stdout", target_file=self.tmpfn),
                          self.tmpfn)
         with open(self.tmpfn, "rb") as tmpfp:
             self.assertTrue(tmpfp.read().startswith(orig))
     finally:
         if os.path.isfile(rnd_log):
             os.remove(rnd_log)
     # verify clean_up() removed the logs
     ffp.clean_up()
     self.assertIsNone(ffp.clone_log("stdout", target_file=self.tmpfn))
 def test_04(self):
     "test logging"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.close()
     ffp.save_logs(os.path.join(self.logs, "no_logs"))
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     ffp.wait(0.25) # wait for log prints
     ffp.close()
     self.assertTrue(ffp._logs.closed) # pylint: disable=protected-access
     log_ids = ffp.available_logs()
     self.assertEqual(len(log_ids), 2)
     self.assertIn("stderr", log_ids)
     self.assertIn("stdout", log_ids)
     log_dir = os.path.join(self.logs, "some_dir") # nonexistent directory
     ffp.save_logs(log_dir)
     self.assertTrue(os.path.isdir(log_dir))
     log_list = os.listdir(log_dir)
     self.assertIn("log_stderr.txt", log_list)
     self.assertIn("log_stdout.txt", log_list)
     with open(os.path.join(log_dir, "log_stdout.txt"), "r") as log_fp:
         self.assertIn("url: http://", log_fp.read().strip())
     for fname in log_list:
         with open(os.path.join(log_dir, fname)) as log_fp:
             log_data = log_fp.read().splitlines()
         if fname.startswith("log_stderr"):
             self.assertEqual(len(log_data), 3)
             self.assertTrue(log_data[0].startswith('[ffpuppet] Launch command:'))
             self.assertTrue(log_data[-1].startswith('[ffpuppet] Exit code:')) # exit code differs between platforms
         elif fname.startswith("log_stdout"):
             self.assertEqual(log_data[0], "hello world")
         else:
             raise AssertionError("Unknown log file %r" % fname)
Esempio n. 5
0
 def test_25(self):
     "test multiple minidumps"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.launch(TESTFF_BIN)
     md_dir = os.path.join(ffp.profile, "minidumps")
     if not os.path.isdir(md_dir):
         os.mkdir(md_dir)
     ffp._last_bin_path = ffp.profile  # pylint: disable=protected-access
     sym_dir = os.path.join(ffp.profile,
                            "symbols")  # needs to exist to satisfy a check
     if not os.path.isdir(sym_dir):
         os.mkdir(sym_dir)
     # create "test.dmp" files
     with open(os.path.join(md_dir, "test1.dmp"), "w") as out_fp:
         out_fp.write("1a\n1b")
     with open(os.path.join(md_dir, "test2.dmp"), "w") as out_fp:
         out_fp.write("2a\n2b")
     with open(os.path.join(md_dir, "test3.dmp"), "w") as out_fp:
         out_fp.write("3a\n3b")
     self.assertFalse(ffp.is_healthy())
     # process .dmp file
     ffp.close()
     ffp.save_logs(self.logs)
     logs = os.listdir(self.logs)
     self.assertIn("log_minidump_01.txt", logs)
     self.assertIn("log_minidump_02.txt", logs)
     self.assertIn("log_minidump_03.txt", logs)
Esempio n. 6
0
    def test_29(self):
        "test rmtree error handler"
        # normal profile creation
        # - just create a puppet, write a readonly file in its profile, then call close()
        ffp = FFPuppet()
        self.addCleanup(ffp.clean_up)
        ffp.launch(TESTFF_BIN)
        ro_file = os.path.join(ffp.profile, "read-only-test.txt")
        with open(ro_file, "w"):
            pass
        os.chmod(ro_file, stat.S_IREAD)
        ffp.close()
        self.assertFalse(os.path.isfile(ro_file))
        ffp.clean_up()

        # use template profile that contains a readonly file
        prf_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, prf_dir, onerror=onerror)
        ro_file = os.path.join(prf_dir, "read-only.txt")
        with open(ro_file, "w"):
            pass
        os.chmod(ro_file, stat.S_IREAD)
        ffp = FFPuppet(use_profile=prf_dir)
        self.addCleanup(ffp.clean_up)
        ffp.launch(TESTFF_BIN)
        working_prf = ffp.profile
        self.assertTrue(os.path.isdir(working_prf))
        ffp.close()
        self.assertFalse(os.path.isdir(working_prf))
Esempio n. 7
0
 def test_11(self):
     "test abort tokens"
     with open(self.tmpfn, 'w') as prefs:
         prefs.write('//fftest_soft_assert\n')
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.add_abort_token(re.compile(r"TEST\dREGEX\.+"))
     with self.assertRaisesRegex(
             TypeError,
             "Expecting 'str' or 're._pattern_type' got: 'NoneType'"):
         ffp.add_abort_token(None)
     ffp.add_abort_token("###!!! ASSERTION:")
     ffp.launch(TESTFF_BIN,
                location=self.tsrv.get_addr(),
                prefs_js=self.tmpfn)
     self.assertIsNotNone(ffp.wait(10))
     ffp.close()
     self.assertEqual(ffp.reason, ffp.RC_WORKER)
     self.assertIsNotNone(ffp.returncode)
     self.assertEqual(len(ffp.available_logs()), 3)
     ffp.save_logs(self.logs)
     worker_log = os.path.join(self.logs, "log_ffp_worker_log_scanner.txt")
     self.assertTrue(os.path.isfile(worker_log))
     with open(worker_log, "rb") as log_fp:
         self.assertIn(b"TOKEN_LOCATED", log_fp.read())
Esempio n. 8
0
 def test_05(self):
     "test get_pid()"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     self.assertIsNone(ffp.get_pid())
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     self.assertGreater(ffp.get_pid(), 0)
     ffp.close()
     self.assertIsNone(ffp.get_pid())
Esempio n. 9
0
 def test_06(self):
     "test is_running()"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     self.assertFalse(ffp.is_running())
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     self.assertTrue(ffp.is_running())
     ffp.close()
     self.assertFalse(ffp.is_running())
     self.assertFalse(ffp.is_running())  # call 2x
Esempio n. 10
0
 def test_10(self):
     "test calling launch() multiple times"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     for _ in range(10):
         ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
         ffp.close()
     # call 2x without calling close()
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     with self.assertRaisesRegex(LaunchError, "Process is already running"):
         ffp.launch(TESTFF_BIN)
     ffp.close()
Esempio n. 11
0
 def test_24(self):
     "test collecting and cleaning up ASan logs"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.launch(TESTFF_BIN)
     test_logs = list()
     asan_prefix = os.path.join(ffp._logs.working_path,
                                ffp._logs.LOG_ASAN_PREFIX)  # pylint: disable=protected-access
     for i in range(3):
         test_logs.append(".".join([asan_prefix, str(i)]))
     # small log with nothing interesting
     with open(test_logs[0], "w") as log_fp:
         log_fp.write("SHORT LOG\n")
         log_fp.write("filler line")
     # crash on another thread
     with open(test_logs[1], "w") as log_fp:
         log_fp.write("GOOD LOG\n")
         log_fp.write(
             "==70811==ERROR: AddressSanitizer: SEGV on unknown address 0x00000BADF00D"
         )
         log_fp.write(
             " (pc 0x7f4c0bb54c67 bp 0x7f4c07bea380 sp 0x7f4c07bea360 T0)\n"
         )  # must be 2nd line
         for _ in range(4):  # pad out to 6 lines
             log_fp.write("filler line\n")
     # child log that should be ignored (created when parent crashes)
     with open(test_logs[2], "w") as log_fp:
         log_fp.write("BAD LOG\n")
         log_fp.write(
             "==70811==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000"
         )
         log_fp.write(
             " (pc 0x7f4c0bb54c67 bp 0x7f4c07bea380 sp 0x7f4c07bea360 T2)\n"
         )  # must be 2nd line
         for _ in range(4):  # pad out to 6 lines
             log_fp.write("filler line\n")
     self.assertFalse(ffp.is_healthy())
     self.assertTrue(ffp.is_running())
     ffp.close()
     ffp.save_logs(self.logs)
     dir_list = os.listdir(self.logs)
     self.assertEqual(len(dir_list), 5)
     for fname in dir_list:
         if not fname.startswith("log_ffp_asan_"):
             self.assertIn(fname, ["log_stderr.txt", "log_stdout.txt"])
             continue
         with open(os.path.join(self.logs, fname), "r") as log_fp:
             self.assertIn(log_fp.readline(),
                           ["BAD LOG\n", "GOOD LOG\n", "SHORT LOG\n"])
     ffp.clean_up()
     for t_log in test_logs:
         self.assertFalse(os.path.isfile(t_log))
Esempio n. 12
0
 def test_21(self):
     "test log_length()"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     self.assertIsNone(ffp.log_length("INVALID"))
     self.assertIsNone(ffp.log_length("stderr"))
     ffp.launch(TESTFF_BIN)
     self.assertGreater(ffp.log_length("stderr"), 0)
     ffp.close()
     self.assertGreater(ffp.log_length("stderr"), 0)
     ffp.clean_up()
     # verify clean_up() removed the logs
     self.assertIsNone(ffp.log_length("stderr"))
Esempio n. 13
0
 def test_02(self):
     "test crash on start"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     with open(self.tmpfn, 'w') as prefs:
         prefs.write('//fftest_startup_crash\n')
     with self.assertRaisesRegex(BrowserTerminatedError,
                                 "Failure during browser startup"):
         ffp.launch(TESTFF_BIN, prefs_js=self.tmpfn)
     ffp.close()
     self.assertFalse(ffp.is_running())  # process should be gone
     self.assertEqual(ffp.launches, 0)
     self.assertEqual(ffp.reason, ffp.RC_ALERT)
Esempio n. 14
0
 def test_27(self):
     "test multiprocess target"
     with open(self.tmpfn, "w") as prefs_fp:
         prefs_fp.write("//fftest_multi_proc\n")
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.launch(TESTFF_BIN, prefs_js=self.tmpfn, location=self.tsrv.get_addr())
     self.assertTrue(ffp.is_running())
     self.assertIsNone(ffp.wait(0))
     c_procs = Process(ffp.get_pid()).children()
     self.assertGreater(len(c_procs), 0)
     # terminate one of the processes
     c_procs[-1].terminate()
     ffp.close()
     self.assertFalse(ffp.is_running())
     self.assertIsNone(ffp.wait(0))
Esempio n. 15
0
 def test_30(self):
     "test using a readonly prefs.js and extension"
     prefs = os.path.join(self.logs, "prefs.js")
     with open(prefs, "w"):
         pass
     os.chmod(prefs, stat.S_IREAD)
     ext = os.path.join(self.logs, "ext.xpi")
     with open(ext, "w"):
         pass
     os.chmod(ext, stat.S_IREAD)
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.launch(TESTFF_BIN, extension=ext, prefs_js=prefs)
     working_prf = ffp.profile
     ffp.close()
     self.assertFalse(os.path.isdir(working_prf))
Esempio n. 16
0
 def test_09(self):
     "test hitting memory limit"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     with open(self.tmpfn, 'w') as prefs:
         prefs.write('//fftest_memory\n')
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr(), prefs_js=self.tmpfn, memory_limit=0x100000) # 1MB
     self.assertIsNotNone(ffp.wait(30))
     ffp.close()
     self.assertIsNotNone(ffp.returncode)
     self.assertEqual(ffp.reason, ffp.RC_WORKER)
     self.assertEqual(len(ffp.available_logs()), 3)
     ffp.save_logs(self.logs)
     worker_log = os.path.join(self.logs, "log_ffp_worker_memory_limiter.txt")
     self.assertTrue(os.path.isfile(worker_log))
     with open(worker_log, "rb") as log_fp:
         self.assertIn(b"MEMORY_LIMIT_EXCEEDED", log_fp.read())
Esempio n. 17
0
 def test_03(self):
     "test hang on start"
     ffp = FFPuppet()
     default_timeout = ffp.LAUNCH_TIMEOUT_MIN
     try:
         ffp.LAUNCH_TIMEOUT_MIN = 1
         self.addCleanup(ffp.clean_up)
         with open(self.tmpfn, 'w') as prefs:
             prefs.write('//fftest_startup_hang\n')
         start = time.time()
         with self.assertRaisesRegex(BrowserTimeoutError, "Launching browser timed out"):
             ffp.launch(TESTFF_BIN, prefs_js=self.tmpfn, launch_timeout=1)
         duration = time.time() - start
         ffp.close()
         self.assertEqual(ffp.reason, ffp.RC_CLOSED)
         self.assertGreater(duration, ffp.LAUNCH_TIMEOUT_MIN)
         self.assertLess(duration, 30)
     finally:
         ffp.LAUNCH_TIMEOUT_MIN = default_timeout
Esempio n. 18
0
 def test_01(self):
     "test basic launch and close"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     self.assertEqual(ffp.launches, 0)
     self.assertEqual(ffp.reason, ffp.RC_CLOSED)
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     self.assertEqual(len(ffp._workers), 0)  # pylint: disable=protected-access
     self.assertEqual(ffp.launches, 1)
     self.assertIsNone(ffp.wait(0))
     self.assertTrue(ffp.is_running())
     self.assertTrue(ffp.is_healthy())
     self.assertIsNone(ffp.reason)
     ffp.close()
     self.assertEqual(ffp.reason, ffp.RC_CLOSED)
     self.assertIsNone(ffp._proc)  # pylint: disable=protected-access
     self.assertFalse(ffp.is_running())
     self.assertFalse(ffp.is_healthy())
     self.assertIsNone(ffp.wait(10))
Esempio n. 19
0
 def test_19(self):
     "test launching with Valgrind"
     if not sys.platform.startswith("linux"):
         with self.assertRaisesRegex(EnvironmentError, "Valgrind is only supported on Linux"):
             FFPuppet(use_valgrind=True)
     else:
         ffp = FFPuppet(use_valgrind=True)
         self.addCleanup(ffp.clean_up)
         bin_path = str(subprocess.check_output(["which", "echo"]).strip().decode("ascii"))
         # launch will fail b/c 'echo' will exit right away but that's fine
         with self.assertRaisesRegex(LaunchError, "Failure during browser startup"):
             self.assertEqual(ffp.launch(bin_path), 0)
         ffp.close()
         ffp.save_logs(self.logs)
         with open(os.path.join(self.logs, "log_stderr.txt"), "rb") as log_fp:
             log_data = log_fp.read()
         # verify Valgrind ran and executed the script
         self.assertRegex(log_data, br"valgrind -q")
         self.assertRegex(log_data, br"\[ffpuppet\] Exit code: 0")
Esempio n. 20
0
 def test_07(self):
     "test wait()"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     # call when ffp._proc is None
     self.assertIsNone(ffp.wait())
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     # call when ffp._proc is running
     self.assertTrue(ffp.is_running())
     self.assertIsNone(ffp.wait(0))
     ffp._terminate()  # pylint: disable=protected-access
     # call when ffp._proc is not running
     self.assertFalse(ffp.is_running())
     self.assertIsNotNone(ffp.wait(0))  # with a timeout of zero
     self.assertIsNotNone(ffp.wait())  # without a timeout
     ffp.close()
     self.assertEqual(ffp.reason, ffp.RC_EXITED)
     with self.assertRaisesRegex(AssertionError, ""):
         ffp._terminate()  # pylint: disable=protected-access
     self.assertIsNone(ffp.wait(None))
Esempio n. 21
0
 def test_28(self):
     "test returncode"
     with open(self.tmpfn, "w") as prefs_fp:
         prefs_fp.write("//fftest_exit_code_3\n")
     tsrv = HTTPTestServer()
     self.addCleanup(tsrv.shutdown)
     ffp = FFPuppet()
     self.assertEqual(ffp.returncode, 0)
     self.addCleanup(ffp.clean_up)
     ffp.launch(TESTFF_BIN, prefs_js=self.tmpfn, location=self.tsrv.get_addr())
     ffp.wait(10)
     self.assertFalse(ffp.is_running())
     # verify private member is set when using returncode property
     self.assertIsNone(ffp._returncode)  # pylint: disable=protected-access
     self.assertEqual(ffp.returncode, 3)
     self.assertEqual(ffp._returncode, 3)  # pylint: disable=protected-access
     # verify private member is set when calling close()
     ffp._returncode = None  # pylint: disable=protected-access
     ffp.close()
     self.assertEqual(ffp.reason, ffp.RC_EXITED)
     self.assertEqual(ffp.returncode, 3)
Esempio n. 22
0
 def test_28(self):
     "test launching with RR"
     if not sys.platform.startswith("linux"):
         with self.assertRaisesRegex(EnvironmentError,
                                     "RR is only supported on Linux"):
             FFPuppet(use_rr=True)
     else:
         try:
             # TODO: this can hang if ptrace is blocked by seccomp
             proc = subprocess.Popen(["rr", "check"],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
         except OSError:
             self.skipTest("RR not installed")
         _, stderr = proc.communicate()
         proc.wait()
         if b"Unable to open performance counter" in stderr:
             self.skipTest(
                 "This machine doesn't support performance counters needed by RR"
             )
         ffp = FFPuppet(use_rr=True)
         self.addCleanup(ffp.clean_up)
         rr_dir = tempfile.mkdtemp(prefix="test_ffp_rr")
         self.addCleanup(shutil.rmtree, rr_dir, onerror=onerror)
         bin_path = str(
             subprocess.check_output(["which",
                                      "echo"]).strip().decode("ascii"))
         # launch will fail b/c 'echo' will exit right away but that's fine
         with self.assertRaisesRegex(LaunchError,
                                     "Failure during browser startup"):
             ffp.launch(bin_path, env_mod={"_RR_TRACE_DIR": rr_dir})
         ffp.close()
         self.assertEqual(ffp.reason, ffp.RC_EXITED)
         ffp.save_logs(self.logs)
         with open(os.path.join(self.logs, "log_stderr.txt"),
                   "rb") as log_fp:
             log_data = log_fp.read()
         # verify RR ran and executed the script
         self.assertIn(b"rr record", log_data)
         self.assertIn(b"[ffpuppet] Reason code:", log_data)
Esempio n. 23
0
 def test_15(self):
     "test passing a file and a non existing file to launch() via location"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     with self.assertRaisesRegex(IOError, "Cannot find"):
         ffp.launch(TESTFF_BIN, location="fake_file.none")
     ffp.close()
     with tempfile.NamedTemporaryFile() as test_fp:
         test_fp.write(b"test")
         test_fp.seek(0)
         # needs realpath() for OSX & normcase() for Windows
         fname = os.path.realpath(os.path.normcase(test_fp.name))
         ffp.launch(TESTFF_BIN, location=fname)
         ffp.wait(0.25)  # wait for log prints
         ffp.close()
         ffp.save_logs(self.logs)
     with open(os.path.join(self.logs, "log_stdout.txt"), "r") as log_fp:
         location = log_fp.read().strip()
     self.assertIn("url: file:///", location)
     location = os.path.normcase(location.split("file:///")[-1])
     self.assertFalse(location.startswith("/"))
     self.assertEqual(os.path.normpath(os.path.join("/", location)), fname)
Esempio n. 24
0
 def test_13(self):
     "test calling close() and clean_up() in multiple states"
     ffp = FFPuppet()
     self.addCleanup(ffp.clean_up)
     ffp.close()
     ffp.launch(TESTFF_BIN, location=self.tsrv.get_addr())
     self.assertIsNone(ffp.reason)
     ffp.close()
     ffp.clean_up()
     ffp.close()
Esempio n. 25
0
class PuppetTarget(Target):
    def __init__(self, binary, extension, launch_timeout, log_limit,
                 memory_limit, prefs, relaunch, **kwds):
        super(PuppetTarget,
              self).__init__(binary, extension, launch_timeout, log_limit,
                             memory_limit, prefs, relaunch)
        self.use_rr = kwds.pop("rr", False)
        self.use_valgrind = kwds.pop("valgrind", False)
        use_xvfb = kwds.pop("xvfb", False)
        if kwds:
            log.warning("PuppetTarget ignoring unsupported arguments: %s",
                        ", ".join(kwds))

        # create Puppet object
        self._puppet = FFPuppet(use_rr=self.use_rr,
                                use_valgrind=self.use_valgrind,
                                use_xvfb=use_xvfb)

    def _abort_hung_proc(self):
        # send SIGABRT to the busiest process
        with self._lock:
            proc_usage = self._puppet.cpu_usage()
        for pid, cpu in sorted(proc_usage, reverse=True, key=lambda x: x[1]):
            log.debug("sending SIGABRT to pid: %r, cpu: %0.2f%%", pid, cpu)
            os.kill(pid, signal.SIGABRT)
            break

    def add_abort_token(self, token):
        self._puppet.add_abort_token(token)

    def cleanup(self):
        # prevent parallel calls to FFPuppet.clean_up()
        with self._lock:
            self._puppet.clean_up()

    def close(self):
        # prevent parallel calls to FFPuppet.close()
        with self._lock:
            self._puppet.close()

    @property
    def closed(self):
        return self._puppet.reason is not None

    @property
    def monitor(self):
        if self._monitor is None:

            class _PuppetMonitor(TargetMonitor):
                # pylint: disable=no-self-argument,protected-access
                def clone_log(_, log_id, offset=0):
                    return self._puppet.clone_log(log_id, offset=offset)

                def is_running(_):
                    return self._puppet.is_running()

                def is_healthy(_):
                    return self._puppet.is_healthy()

                @property
                def launches(_):
                    return self._puppet.launches

                def log_length(_, log_id):
                    return self._puppet.log_length(log_id)

            self._monitor = _PuppetMonitor()
        return self._monitor

    def poll_for_idle(self, threshold, interval):
        # return POLL_IDLE if cpu usage of target is below threshold for interval seconds
        start_time = time.time()
        while time.time() - start_time < interval:
            for _, cpu in self._puppet.cpu_usage():
                if cpu >= threshold:
                    return self.POLL_BUSY
            if not self._puppet.is_running():
                break
        else:
            log.info("Process utilized <= %d%% CPU for %ds", threshold,
                     interval)
        return self.POLL_IDLE

    def detect_failure(self, ignored, was_timeout):
        status = self.RESULT_NONE
        if self.expect_close and not was_timeout:
            # give the browser a moment to close if needed
            self._puppet.wait(timeout=30)
        is_healthy = self._puppet.is_healthy()
        # check if there has been a crash, hang, etc...
        if not is_healthy or was_timeout:
            if self._puppet.is_running():
                log.debug("terminating browser...")
                if was_timeout and "timeout" not in ignored:
                    self._abort_hung_proc()
                    # give the process a moment to start dump
                    self._puppet.wait(timeout=1)
            self.close()
        # if something has happened figure out what
        if not is_healthy:
            if self._puppet.reason == FFPuppet.RC_CLOSED:
                log.info("target.close() was called")
            elif self._puppet.reason == FFPuppet.RC_EXITED:
                log.info("Target closed itself")
            elif (self._puppet.reason == FFPuppet.RC_WORKER
                  and "memory" in ignored and "ffp_worker_memory_usage"
                  in self._puppet.available_logs()):
                status = self.RESULT_IGNORED
                log.info("Memory limit exceeded")
            elif (self._puppet.reason == FFPuppet.RC_WORKER
                  and "log-limit" in ignored
                  and "ffp_worker_log_size" in self._puppet.available_logs()):
                status = self.RESULT_IGNORED
                log.info("Log size limit exceeded")
            else:
                log.debug("failure detected, ffpuppet return code: %r",
                          self._puppet.reason)
                status = self.RESULT_FAILURE
        elif was_timeout:
            log.info("Timeout detected")
            status = self.RESULT_IGNORED if "timeout" in ignored else self.RESULT_FAILURE
        return status

    def dump_coverage(self):
        # If at this point, the browser is running, i.e. we did neither
        # relaunch nor crash/timeout, then we need to signal the browser
        # to dump coverage before attempting a new test that potentially
        # crashes.
        # Note: This is not required if we closed or are going to close
        # the browser (relaunch or done with all iterations) because the
        # SIGTERM will also trigger coverage to be synced out.
        pid = self._puppet.get_pid()
        if pid is None or not self._puppet.is_running():
            log.debug("Could not dump coverage because process is not running")
            return
        try:
            for child in psutil.Process(pid).children(recursive=True):
                log.debug("Sending SIGUSR1 to %d", child.pid)
                os.kill(child.pid, signal.SIGUSR1)
        except (psutil.AccessDenied, psutil.NoSuchProcess):
            pass
        log.debug("Sending SIGUSR1 to %d", pid)
        os.kill(pid, signal.SIGUSR1)

    def launch(self, location, env_mod=None):
        if not self.prefs:
            raise TargetError("A prefs.js file is required")
        self.rl_countdown = self.rl_reset
        env_mod = dict(env_mod or [])
        # do not allow network connections to non local endpoints
        env_mod["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
        env_mod["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
        try:
            self._puppet.launch(self.binary,
                                launch_timeout=self.launch_timeout,
                                location=location,
                                log_limit=self.log_limit,
                                memory_limit=self.memory_limit,
                                prefs_js=self.prefs,
                                extension=self.extension,
                                env_mod=env_mod)
        except LaunchError as exc:
            log.error("FFPuppet Error: %s", str(exc))
            self.close()
            if isinstance(exc, BrowserTimeoutError):
                raise TargetLaunchTimeout(str(exc))
            raise TargetLaunchError(str(exc))

    def log_size(self):
        return self._puppet.log_length("stderr") + self._puppet.log_length(
            "stdout")

    def save_logs(self, *args, **kwargs):
        self._puppet.save_logs(*args, **kwargs)
Esempio n. 26
0
class PuppetTarget(Target):
    SUPPORTED_ASSETS = (
        # file containing line separated list of tokens to scan stderr/out for
        "abort-tokens",
        # xpi or directory containing the unpacked extension
        "extension",
        # LSan suppression list file
        "lsan-suppressions",
        # prefs.js file to use
        "prefs",
        # TSan suppression list file
        "tsan-suppressions",
        # UBSan suppression list file
        "ubsan-suppressions",
    )

    TRACKED_ENVVARS = (
        "ASAN_OPTIONS",
        "LSAN_OPTIONS",
        "TSAN_OPTIONS",
        "UBSAN_OPTIONS",
        "GNOME_ACCESSIBILITY",
        "MOZ_CHAOSMODE",
        "XPCOM_DEBUG_BREAK",
    )

    __slots__ = ("use_valgrind", "_extension", "_prefs", "_puppet")

    def __init__(self, binary, launch_timeout, log_limit, memory_limit, **kwds):
        super().__init__(
            binary,
            launch_timeout,
            log_limit,
            memory_limit,
            assets=kwds.pop("assets", None),
        )
        # TODO: clean up handling debuggers
        debugger = Debugger.NONE
        if kwds.pop("pernosco", False):
            debugger = Debugger.PERNOSCO
        if kwds.pop("rr", False):
            debugger = Debugger.RR
        if kwds.pop("valgrind", False):
            self.use_valgrind = True
            debugger = Debugger.VALGRIND
        self._extension = None
        self._prefs = None

        # create Puppet object
        self._puppet = FFPuppet(
            debugger=debugger,
            use_xvfb=kwds.pop("xvfb", False),
            working_path=grz_tmp("target"),
        )
        if kwds:
            LOG.warning(
                "PuppetTarget ignoring unsupported arguments: %s", ", ".join(kwds)
            )

    def _cleanup(self):
        # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up()
        with self._lock:
            self._puppet.clean_up()

    def close(self, force_close=False):
        # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up()
        with self._lock:
            self._puppet.close(force_close=force_close)

    @property
    def closed(self):
        return self._puppet.reason is not None

    def create_report(self, is_hang=False):
        logs = mkdtemp(prefix="logs_", dir=grz_tmp("logs"))
        self.save_logs(logs)
        return Report(logs, self.binary, is_hang=is_hang)

    def filtered_environ(self):
        # remove context specific entries from environment
        filtered = dict(self.environ)
        opts = SanitizerOptions()
        # iterate over *SAN_OPTIONS entries
        for san in (x for x in filtered if x.endswith("SAN_OPTIONS")):
            opts.load_options(filtered[san])
            # remove entries specific to the current environment
            opts.pop("external_symbolizer_path")
            opts.pop("log_path")
            opts.pop("strip_path_prefix")
            opts.pop("suppressions")
            filtered[san] = opts.options
        # remove empty entries
        return {k: v for k, v in filtered.items() if v}

    def is_idle(self, threshold):
        for _, cpu in self._puppet.cpu_usage():
            if cpu >= threshold:
                return False
        return True

    @property
    def monitor(self):
        if self._monitor is None:

            class _PuppetMonitor(TargetMonitor):
                # pylint: disable=no-self-argument,protected-access
                def clone_log(_, log_id, offset=0):
                    return self._puppet.clone_log(log_id, offset=offset)

                def is_running(_):
                    return self._puppet.is_running()

                def is_healthy(_):
                    return self._puppet.is_healthy()

                @property
                def launches(_):
                    return self._puppet.launches

                def log_length(_, log_id):
                    return self._puppet.log_length(log_id)

            self._monitor = _PuppetMonitor()
        return self._monitor

    def check_result(self, ignored):
        result = Result.NONE
        # check if there has been a crash, hangs will appear as SIGABRT
        if not self._puppet.is_healthy():
            self.close()
            # something has happened figure out what
            if self._puppet.reason == Reason.CLOSED:
                LOG.debug("target.close() was called")
            elif self._puppet.reason == Reason.EXITED:
                LOG.debug("target closed itself")
            elif (
                self._puppet.reason == Reason.WORKER
                and "memory" in ignored
                and "ffp_worker_memory_usage" in self._puppet.available_logs()
            ):
                result = Result.IGNORED
                LOG.debug("memory limit exceeded")
            elif (
                self._puppet.reason == Reason.WORKER
                and "log-limit" in ignored
                and "ffp_worker_log_size" in self._puppet.available_logs()
            ):
                result = Result.IGNORED
                LOG.debug("log size limit exceeded")
            else:
                # crash or hang (forced SIGABRT) has been detected
                LOG.debug("result detected (%s)", self._puppet.reason.name)
                result = Result.FOUND
        return result

    def handle_hang(self, ignore_idle=True):
        was_idle = False
        if self._puppet.is_healthy():
            proc_usage = sorted(self._puppet.cpu_usage(), key=lambda x: x[1])
            if proc_usage:
                pid, cpu = proc_usage.pop()
                if ignore_idle and cpu < 15:
                    # don't send SIGABRT if process is idle
                    LOG.debug("ignoring idle hang (%0.1f%%)", cpu)
                    was_idle = True
                elif system() == "Linux":
                    # sending SIGABRT is only supported on Linux for now
                    # TODO: add/test on other OSs
                    LOG.debug("sending SIGABRT to %r (%0.1f%%)", pid, cpu)
                    try:
                        kill(pid, SIGABRT)
                    except OSError:
                        LOG.warning("Failed to send SIGABRT to pid %d", pid)
                    self._puppet.wait(timeout=10)
        # always call close() since this function should only/always
        # be called when there has been a timeout
        self.close()
        return was_idle

    def dump_coverage(self, timeout=15):
        assert SIGUSR1 is not None
        pid = self._puppet.get_pid()
        if pid is None or not self._puppet.is_healthy():
            LOG.debug("Skipping coverage dump (target is not in a good state)")
            return
        # If at this point, the browser is in a good state, i.e. no crashes
        # or hangs, so signal the browser to dump coverage.
        try:
            for child in Process(pid).children(recursive=True):
                LOG.debug("Sending SIGUSR1 to %d (child)", child.pid)
                try:
                    kill(child.pid, SIGUSR1)
                except OSError:
                    LOG.warning("Failed to send SIGUSR1 to pid %d", child.pid)
        except (AccessDenied, NoSuchProcess):  # pragma: no cover
            pass
        LOG.debug("Sending SIGUSR1 to %d (parent)", pid)
        try:
            kill(pid, SIGUSR1)
        except OSError:
            LOG.warning("Failed to send SIGUSR1 to pid %d", pid)
        start_time = time()
        gcda_found = False
        delay = 0.1
        # wait for processes to write .gcno files
        # this should typically take less than 1 second
        while True:
            for proc in process_iter(attrs=["pid", "ppid", "open_files"]):
                # check if proc is the target or child process
                if pid in (proc.info["pid"], proc.info["ppid"]):
                    if proc.info["open_files"] is None:
                        continue
                    if any(x.path.endswith(".gcda") for x in proc.info["open_files"]):
                        gcda_found = True
                        # get the pid of the process that has the file open
                        gcda_open = proc.info["pid"]
                        break
            else:
                gcda_open = None
            elapsed = time() - start_time
            if gcda_found:
                if gcda_open is None:
                    # success
                    LOG.debug("gcda dump took %0.2fs", elapsed)
                    break
                if elapsed >= timeout:
                    # timeout failure
                    LOG.warning(
                        "gcda file open by pid %d after %0.2fs", gcda_open, elapsed
                    )
                    try:
                        kill(gcda_open, SIGABRT)
                    except OSError:
                        pass
                    sleep(1)
                    self.close()
                    break
                if delay < 1.0:
                    # increase delay to a maximum of 1 second
                    delay = min(1.0, delay + 0.1)
            elif elapsed >= 3:
                # assume we missed the process writing .gcno files
                LOG.warning("No gcda files seen after %0.2fs", elapsed)
                break
            if not self._puppet.is_healthy():
                LOG.warning("Browser failure during dump_coverage()")
                break
            sleep(delay)

    def launch(self, location):
        # setup environment
        env_mod = dict(self.environ)
        # do not allow network connections to non local endpoints
        env_mod["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
        env_mod["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
        try:
            self._puppet.launch(
                self.binary,
                launch_timeout=self.launch_timeout,
                location=location,
                log_limit=self.log_limit,
                memory_limit=self.memory_limit,
                prefs_js=self._prefs,
                extension=self._extension,
                env_mod=env_mod,
            )
        except LaunchError as exc:
            LOG.error("FFPuppet LaunchError: %s", str(exc))
            self.close()
            if isinstance(exc, BrowserTimeoutError):
                raise TargetLaunchTimeout(str(exc)) from None
            raise TargetLaunchError(str(exc), self.create_report()) from None

    def log_size(self):
        return self._puppet.log_length("stderr") + self._puppet.log_length("stdout")

    def process_assets(self):
        self._extension = self.assets.get("extension")
        self._prefs = self.assets.get("prefs")
        # generate temporary prefs.js with prefpicker
        if self._prefs is None:
            LOG.debug("using prefpicker to generate prefs.js")
            with TemporaryDirectory(dir=grz_tmp("target")) as tmp_path:
                prefs = Path(tmp_path) / "prefs.js"
                template = PrefPicker.lookup_template("browser-fuzzing.yml")
                PrefPicker.load_template(template).create_prefsjs(prefs)
                self._prefs = self.assets.add("prefs", str(prefs), copy=False)
        abort_tokens = self.assets.get("abort-tokens")
        if abort_tokens:
            LOG.debug("loading 'abort tokens' from %r", abort_tokens)
            with open(abort_tokens, "r") as in_fp:
                for line in in_fp:
                    line = line.strip()
                    if line:
                        self._puppet.add_abort_token(line)

        # configure sanitizer suppressions
        opts = SanitizerOptions()
        for sanitizer in ("lsan", "tsan", "ubsan"):
            asset = "%s-suppressions" % (sanitizer,)
            # load existing sanitizer options from environment
            var_name = "%s_OPTIONS" % (sanitizer.upper(),)
            opts.load_options(self.environ.get(var_name, ""))
            if self.assets.get(asset):
                # use suppression file if provided as asset
                opts.add("suppressions", repr(self.assets.get(asset)), overwrite=True)
            elif opts.get("suppressions"):
                supp_file = opts.pop("suppressions")
                if SanitizerOptions.is_quoted(supp_file):
                    supp_file = supp_file[1:-1]
                if isfile(supp_file):
                    # use environment specified suppression file
                    LOG.debug("using %r from environment", asset)
                    opts.add(
                        "suppressions",
                        repr(self.assets.add(asset, supp_file)),
                        overwrite=True,
                    )
                else:
                    LOG.warning("Missing %s suppressions file %r", sanitizer, supp_file)
            else:
                # nothing to do
                continue
            # update sanitized *SAN_OPTIONS
            LOG.debug("updating suppressions in %r", var_name)
            self.environ[var_name] = opts.options

    def save_logs(self, *args, **kwargs):
        self._puppet.save_logs(*args, **kwargs)
Esempio n. 27
0
class PuppetTarget(Target):
    __slots__ = ("use_rr", "use_valgrind", "_puppet", "_remove_prefs")

    def __init__(self, binary, extension, launch_timeout, log_limit,
                 memory_limit, **kwds):
        super().__init__(binary, extension, launch_timeout, log_limit,
                         memory_limit)
        self.use_rr = kwds.pop("rr", False)
        self.use_valgrind = kwds.pop("valgrind", False)
        self._remove_prefs = False
        # create Puppet object
        self._puppet = FFPuppet(use_rr=self.use_rr,
                                use_valgrind=self.use_valgrind,
                                use_xvfb=kwds.pop("xvfb", False))
        if kwds:
            LOG.warning("PuppetTarget ignoring unsupported arguments: %s",
                        ", ".join(kwds))

    def _abort_hung_proc(self):
        # send SIGABRT to the busiest process
        with self._lock:
            proc_usage = self._puppet.cpu_usage()
        for pid, cpu in sorted(proc_usage, reverse=True, key=lambda x: x[1]):
            LOG.debug("sending SIGABRT to pid: %r, cpu: %0.2f%%", pid, cpu)
            try:
                kill(pid, SIGABRT)
            except OSError:
                LOG.warning("Failed to send SIGABRT to pid %d", pid)
            break

    def add_abort_token(self, token):
        self._puppet.add_abort_token(token)

    def cleanup(self):
        # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up()
        with self._lock:
            self._puppet.clean_up()
        if self._remove_prefs and self._prefs and isfile(self._prefs):
            unlink(self._prefs)

    def close(self):
        # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up()
        with self._lock:
            self._puppet.close()

    @property
    def closed(self):
        return self._puppet.reason is not None

    def is_idle(self, threshold):
        for _, cpu in self._puppet.cpu_usage():
            if cpu >= threshold:
                return False
        return True

    def create_report(self):
        logs = mkdtemp(prefix="logs_", dir=grz_tmp("logs"))
        self.save_logs(logs)
        return Report(logs, self.binary)

    @property
    def monitor(self):
        if self._monitor is None:

            class _PuppetMonitor(TargetMonitor):
                # pylint: disable=no-self-argument,protected-access
                def clone_log(_, log_id, offset=0):
                    return self._puppet.clone_log(log_id, offset=offset)

                def is_running(_):
                    return self._puppet.is_running()

                def is_healthy(_):
                    return self._puppet.is_healthy()

                @property
                def launches(_):
                    return self._puppet.launches

                def log_length(_, log_id):
                    return self._puppet.log_length(log_id)

            self._monitor = _PuppetMonitor()
        return self._monitor

    def detect_failure(self, ignored, was_timeout):
        status = self.RESULT_NONE
        is_healthy = self._puppet.is_healthy()
        # check if there has been a crash, hang, etc...
        if not is_healthy or was_timeout:
            if self._puppet.is_running():
                LOG.debug("terminating browser...")
                if was_timeout and "timeout" not in ignored and system(
                ) == "Linux":
                    self._abort_hung_proc()
                    # give the process a moment to start dump
                    self._puppet.wait(timeout=1)
            self.close()
        # if something has happened figure out what
        if not is_healthy:
            if self._puppet.reason == FFPuppet.RC_CLOSED:
                LOG.debug("target.close() was called")
            elif self._puppet.reason == FFPuppet.RC_EXITED:
                LOG.debug("target closed itself")
            elif (self._puppet.reason == FFPuppet.RC_WORKER
                  and "memory" in ignored and "ffp_worker_memory_usage"
                  in self._puppet.available_logs()):
                status = self.RESULT_IGNORED
                LOG.debug("memory limit exceeded")
            elif (self._puppet.reason == FFPuppet.RC_WORKER
                  and "log-limit" in ignored
                  and "ffp_worker_log_size" in self._puppet.available_logs()):
                status = self.RESULT_IGNORED
                LOG.debug("log size limit exceeded")
            else:
                LOG.debug("failure detected, ffpuppet reason %r",
                          self._puppet.reason)
                status = self.RESULT_FAILURE
        elif was_timeout:
            LOG.debug("timeout detected")
            status = self.RESULT_IGNORED if "timeout" in ignored else self.RESULT_FAILURE
        return status

    def dump_coverage(self, timeout=15):
        assert SIGUSR1 is not None
        pid = self._puppet.get_pid()
        if pid is None or not self._puppet.is_healthy():
            LOG.debug("Skipping coverage dump (target is not in a good state)")
            return
        # If at this point, the browser is in a good state, i.e. no crashes
        # or hangs, so signal the browser to dump coverage.
        try:
            for child in Process(pid).children(recursive=True):
                LOG.debug("Sending SIGUSR1 to %d (child)", child.pid)
                try:
                    kill(child.pid, SIGUSR1)
                except OSError:
                    LOG.warning("Failed to send SIGUSR1 to pid %d", child.pid)
        except (AccessDenied, NoSuchProcess):  # pragma: no cover
            pass
        LOG.debug("Sending SIGUSR1 to %d (parent)", pid)
        try:
            kill(pid, SIGUSR1)
        except OSError:
            LOG.warning("Failed to send SIGUSR1 to pid %d", pid)
        start_time = time()
        gcda_found = False
        delay = 0.1
        # wait for processes to write .gcno files
        # this should typically take less than 1 second
        while True:
            for proc in process_iter(attrs=["pid", "ppid", "open_files"]):
                # check if proc is the target or child process
                if pid in (proc.info["pid"], proc.info["ppid"]):
                    if proc.info["open_files"] is None:
                        continue
                    if any(
                            x.path.endswith(".gcda")
                            for x in proc.info["open_files"]):
                        gcda_found = True
                        # get the pid of the process that has the file open
                        gcda_open = proc.info["pid"]
                        break
            else:
                gcda_open = None
            elapsed = time() - start_time
            if gcda_found:
                if gcda_open is None:
                    # success
                    LOG.debug("gcda dump took %0.2fs", elapsed)
                    break
                if elapsed >= timeout:
                    # timeout failure
                    LOG.warning("gcda file open by pid %d after %0.2fs",
                                gcda_open, elapsed)
                    try:
                        kill(gcda_open, SIGABRT)
                    except OSError:
                        pass
                    sleep(1)
                    self.close()
                    break
                if delay < 1.0:
                    # increase delay to a maximum of 1 second
                    delay = min(1.0, delay + 0.1)
            elif elapsed >= 3:
                # assume we missed the process writing .gcno files
                LOG.warning("No gcda files seen after %0.2fs", elapsed)
                break
            if not self._puppet.is_healthy():
                LOG.warning("Browser failure during dump_coverage()")
                break
            sleep(delay)

    def launch(self, location, env_mod=None):
        # setup environment
        env_mod = dict(env_mod or [])
        # do not allow network connections to non local endpoints
        env_mod["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
        env_mod["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
        try:
            self._puppet.launch(self.binary,
                                launch_timeout=self.launch_timeout,
                                location=location,
                                log_limit=self.log_limit,
                                memory_limit=self.memory_limit,
                                prefs_js=self.prefs,
                                extension=self.extension,
                                env_mod=env_mod)
        except LaunchError as exc:
            LOG.error("FFPuppet LaunchError: %s", str(exc))
            self.close()
            if isinstance(exc, BrowserTimeoutError):
                raise TargetLaunchTimeout(str(exc)) from None
            raise TargetLaunchError(str(exc), self.create_report()) from None

    def log_size(self):
        return self._puppet.log_length("stderr") + self._puppet.log_length(
            "stdout")

    @property
    def prefs(self):
        if self._prefs is None:
            # generate temporary prefs.js
            for prefs_template in PrefPicker.templates():
                if prefs_template.endswith("browser-fuzzing.yml"):
                    LOG.debug("using prefpicker template %r", prefs_template)
                    tmp_fd, self._prefs = mkstemp(prefix="prefs_",
                                                  suffix=".js",
                                                  dir=grz_tmp())
                    close(tmp_fd)
                    PrefPicker.load_template(prefs_template).create_prefsjs(
                        self._prefs)
                    LOG.debug("generated prefs.js %r", self._prefs)
                    self._remove_prefs = True
                    break
            else:  # pragma: no cover
                raise TargetError("Failed to generate prefs.js")
        return self._prefs

    @prefs.setter
    def prefs(self, prefs_file):
        if self._remove_prefs and self._prefs and isfile(self._prefs):
            unlink(self._prefs)
        if prefs_file is None:
            self._prefs = None
            self._remove_prefs = True
        elif isfile(prefs_file):
            self._prefs = abspath(prefs_file)
            self._remove_prefs = False
        else:
            raise TargetError("Missing prefs.js file %r" % (prefs_file, ))

    def save_logs(self, *args, **kwargs):
        self._puppet.save_logs(*args, **kwargs)