def test(self):
   """Test that ASan instrumentation can be successfully set up on device."""
   adb.revert_asan_device_setup_if_needed()
   environment.reset_current_memory_tool_options()
   sanitizer.setup_asan_if_needed()
   self.assertEqual(0, self.mock.log_error.call_count)
   self.assertTrue(adb.file_exists('/system/bin/asanwrapper'))
    def test_doesnt_mutate_options(self):
        """Test that calling get_memory_tool_options followed by
    set_memory_tool_options does not mutate sanitizer options unless we
    do so explicitly."""
        # Make environment module use the Windows symbolizer, since its path is
        # hard to get right.
        test_helpers.patch_environ(self)
        os.environ['JOB_NAME'] = 'windows_libfuzzer_chrome_asan'
        test_helpers.patch(self, [
            'clusterfuzz._internal.system.environment.platform',
            'clusterfuzz._internal.system.environment.get_llvm_symbolizer_path'
        ])
        self.mock.platform.return_value = 'WINDOWS'
        windows_symbolizer_path = (
            r'c:\clusterfuzz\resources\platform\windows\llvm-symbolizer.exe')

        self.mock.get_llvm_symbolizer_path.return_value = windows_symbolizer_path
        environment.reset_current_memory_tool_options()
        memory_tool_var = 'ASAN_OPTIONS'
        first_asan_options_dict = environment.get_memory_tool_options(
            memory_tool_var)
        environment.set_memory_tool_options(memory_tool_var,
                                            first_asan_options_dict)
        second_asan_options_dict = environment.get_memory_tool_options(
            memory_tool_var)
        self.assertDictEqual(first_asan_options_dict, second_asan_options_dict)
 def test_ubsan_disabled(self):
     """Test reset_current_memory_tool_options when ubsan is disabled."""
     os.environ['JOB_NAME'] = 'libfuzzer_chrome_asan'
     os.environ['UBSAN'] = 'True'
     environment.reset_current_memory_tool_options(disable_ubsan=True)
     self.assertDictEqual(
         {
             'halt_on_error': 0,
             'print_stacktrace': 0,
             'print_suppressions': 0
         }, environment.get_memory_tool_options('UBSAN_OPTIONS'))
示例#4
0
def _setup_memory_tools_environment(testcase):
    """Set up environment for various memory tools used."""
    env = testcase.get_metadata('env')
    if not env:
        environment.reset_current_memory_tool_options(
            redzone_size=testcase.redzone,
            disable_ubsan=testcase.disable_ubsan)
        return

    for options_name, options_value in six.iteritems(env):
        if not options_value:
            environment.remove_key(options_name)
            continue
        environment.set_memory_tool_options(options_name, options_value)
 def test_windows_symbolizer(self):
     """Test that the reset_current_memory_tool_options returns the expected path
 to the llvm symbolizer on Windows."""
     os.environ['JOB_NAME'] = 'windows_libfuzzer_chrome_asan'
     test_helpers.patch(self, [
         'clusterfuzz._internal.system.environment.platform',
         'clusterfuzz._internal.system.environment.get_llvm_symbolizer_path'
     ])
     self.mock.platform.return_value = 'WINDOWS'
     windows_symbolizer_path = (
         r'c:\clusterfuzz\resources\platform\windows\llvm-symbolizer.exe')
     self.mock.get_llvm_symbolizer_path.return_value = windows_symbolizer_path
     environment.reset_current_memory_tool_options()
     self.assertIn(
         'external_symbolizer_path="%s"' % windows_symbolizer_path,
         os.environ['ASAN_OPTIONS'])
示例#6
0
def cleanup_task_state():
  """Cleans state before and after a task is executed."""
  # Cleanup stale processes.
  process_handler.cleanup_stale_processes()

  # Clear build urls, temp and testcase directories.
  shell.clear_build_urls_directory()
  shell.clear_crash_stacktraces_directory()
  shell.clear_testcase_directories()
  shell.clear_temp_directory()
  shell.clear_system_temp_directory()
  shell.clear_device_temp_directories()

  # Reset memory tool environment variables.
  environment.reset_current_memory_tool_options()

  # Call python's garbage collector.
  utils.python_gc()
    def run(self, initial_corpus_path, minimized_corpus_path, bad_units_path):
        """Run corpus pruning. Output result to directory."""
        if not shell.get_directory_file_count(initial_corpus_path):
            # Empty corpus, nothing to do.
            return None

        # Set memory tool options and fuzzer arguments.
        engine_common.unpack_seed_corpus_if_needed(self.runner.target_path,
                                                   initial_corpus_path,
                                                   force_unpack=True)

        environment.reset_current_memory_tool_options(redzone_size=MIN_REDZONE,
                                                      leaks=True)
        self.runner.process_sanitizer_options()
        additional_args = self.runner.get_libfuzzer_flags()

        # Execute fuzzer with arguments for corpus pruning.
        logs.log('Running merge...')
        try:
            result = self.runner.minimize_corpus(additional_args,
                                                 [initial_corpus_path],
                                                 minimized_corpus_path,
                                                 bad_units_path,
                                                 CORPUS_PRUNING_TIMEOUT)
        except TimeoutError as e:
            raise CorpusPruningException(
                'Corpus pruning timed out while minimizing corpus\n' + repr(e))
        except engine.Error as e:
            raise CorpusPruningException(
                'Corpus pruning failed to minimize corpus\n' + repr(e))

        symbolized_output = stack_symbolizer.symbolize_stacktrace(result.logs)

        # Sanity check that there are files in minimized corpus after merging.
        if not shell.get_directory_file_count(minimized_corpus_path):
            raise CorpusPruningException(
                'Corpus pruning failed to minimize corpus\n' +
                symbolized_output)

        logs.log('Corpus merge finished successfully.',
                 output=symbolized_output)

        return result.stats
 def test_ubsan_enabled(self):
     """Test reset_current_memory_tool_options when ubsan is enabled."""
     os.environ['JOB_NAME'] = 'libfuzzer_chrome_asan'
     os.environ['UBSAN'] = 'True'
     environment.reset_current_memory_tool_options(disable_ubsan=False)
     self.assertDictEqual(
         {
             'halt_on_error': 1,
             'handle_abort': 1,
             'handle_segv': 1,
             'handle_sigbus': 1,
             'handle_sigfpe': 1,
             'handle_sigill': 1,
             'print_stacktrace': 1,
             'print_summary': 1,
             'print_suppressions': 0,
             'silence_unsigned_overflow': 1,
             'use_sigaltstack': 1
         }, environment.get_memory_tool_options('UBSAN_OPTIONS'))
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log('No files found in shared corpus, skip merge.')
            return None

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log('Merging shared corpus...')
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        try:
            result = self.runner.minimize_corpus(
                additional_args, [self.context.shared_corpus_path],
                self.context.minimized_corpus_path,
                self.context.bad_units_path, timeout)
            symbolized_output = stack_symbolizer.symbolize_stacktrace(
                result.logs)
            logs.log('Shared corpus merge finished successfully.',
                     output=symbolized_output)
        except TimeoutError as e:
            # Other cross pollinated fuzzer corpuses can have unexpected test cases
            # that time us out. This is expected, so bail out.
            logs.log_warn(
                'Corpus pruning timed out while merging shared corpus\n' +
                repr(e))
            return None
        except engine.Error as e:
            # Other cross pollinated fuzzer corpuses can be large, so we can run out
            # of disk space and exception out. This is expected, so bail out.
            logs.log_warn('Corpus pruning failed to merge shared corpus\n' +
                          repr(e))
            return None

        return result.stats
示例#10
0
def execute_task(testcase_id, job_type):
    """Run analyze task."""
    # Reset redzones.
    environment.reset_current_memory_tool_options(redzone_size=128)

    # Unset window location size and position properties so as to use default.
    environment.set_value('WINDOW_ARG', '')

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id == int(
            testcase_id)).get()
    if not metadata:
        logs.log_error('Testcase %s has no associated upload metadata.' %
                       testcase_id)
        testcase.key.delete()
        return

    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Creates empty local blacklist so all leaks will be visible to uploader.
        leak_blacklist.create_empty_local_blacklist()

    # Store the bot name and timestamp in upload metadata.
    bot_name = environment.get_value('BOT_NAME')
    metadata.bot_name = bot_name
    metadata.timestamp = datetime.datetime.utcnow()
    metadata.put()

    # Adjust the test timeout, if user has provided one.
    if metadata.timeout:
        environment.set_value('TEST_TIMEOUT', metadata.timeout)

    # Adjust the number of retries, if user has provided one.
    if metadata.retries is not None:
        environment.set_value('CRASH_RETRIES', metadata.retries)

    # Set up testcase and get absolute testcase path.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up build.
    setup_build(testcase)

    # Check if we have an application path. If not, our build failed
    # to setup correctly.
    if not build_manager.check_app_path():
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')

        if data_handler.is_first_retry_for_task(testcase):
            build_fail_wait = environment.get_value('FAIL_WAIT')
            tasks.add_task('analyze',
                           testcase_id,
                           job_type,
                           wait_time=build_fail_wait)
        else:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Build setup failed')
        return

    # Update initial testcase information.
    testcase.absolute_path = testcase_file_path
    testcase.job_type = job_type
    testcase.binary_flag = utils.is_binary_file(testcase_file_path)
    testcase.queue = tasks.default_queue()
    testcase.crash_state = ''

    # Set initial testcase metadata fields (e.g. build url, etc).
    data_handler.set_initial_testcase_metadata(testcase)

    # Update minimized arguments and use ones provided during user upload.
    if not testcase.minimized_arguments:
        minimized_arguments = environment.get_value('APP_ARGS') or ''
        additional_command_line_flags = testcase.get_metadata(
            'uploaded_additional_args')
        if additional_command_line_flags:
            minimized_arguments += ' %s' % additional_command_line_flags
        environment.set_value('APP_ARGS', minimized_arguments)
        testcase.minimized_arguments = minimized_arguments

    # Update other fields not set at upload time.
    testcase.crash_revision = environment.get_value('APP_REVISION')
    data_handler.set_initial_testcase_metadata(testcase)
    testcase.put()

    # Initialize some variables.
    gestures = testcase.gestures
    http_flag = testcase.http_flag
    test_timeout = environment.get_value('TEST_TIMEOUT')

    # Get the crash output.
    result = testcase_manager.test_for_crash_with_retries(testcase,
                                                          testcase_file_path,
                                                          test_timeout,
                                                          http_flag=http_flag,
                                                          compare_crash=False)

    # If we don't get a crash, try enabling http to see if we can get a crash.
    # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
    # are not applicable.
    if (not result.is_crash() and not http_flag
            and not environment.is_engine_fuzzer_job()):
        result_with_http = testcase_manager.test_for_crash_with_retries(
            testcase,
            testcase_file_path,
            test_timeout,
            http_flag=True,
            compare_crash=False)
        if result_with_http.is_crash():
            logs.log('Testcase needs http flag for crash.')
            http_flag = True
            result = result_with_http

    # Refresh our object.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Set application command line with the correct http flag.
    application_command_line = (
        testcase_manager.get_command_line_for_application(
            testcase_file_path, needs_http=http_flag))

    # Get the crash data.
    crashed = result.is_crash()
    crash_time = result.get_crash_time()
    state = result.get_symbolized_data()
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

    # Get crash info object with minidump info. Also, re-generate unsymbolized
    # stacktrace if needed.
    crash_info, _ = (crash_uploader.get_crash_info_and_stacktrace(
        application_command_line, state.crash_stacktrace, gestures))
    if crash_info:
        testcase.minidump_keys = crash_info.store_minidump()

    if not crashed:
        # Could not reproduce the crash.
        log_message = ('Testcase didn\'t crash in %d seconds (with retries)' %
                       test_timeout)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED,
                                             log_message)

        # In the general case, we will not attempt to symbolize if we do not detect
        # a crash. For user uploads, we should symbolize anyway to provide more
        # information about what might be happening.
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            application_command_line, state.crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)

        # For an unreproducible testcase, retry once on another bot to confirm
        # our results and in case this bot is in a bad state which we didn't catch
        # through our usual means.
        if data_handler.is_first_retry_for_task(testcase):
            testcase.status = 'Unreproducible, retrying'
            testcase.put()

            tasks.add_task('analyze', testcase_id, job_type)
            return

        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Unreproducible')

        # A non-reproducing testcase might still impact production branches.
        # Add the impact task to get that information.
        task_creation.create_impact_task_if_needed(testcase)
        return

    # Update testcase crash parameters.
    testcase.http_flag = http_flag
    testcase.crash_type = state.crash_type
    testcase.crash_address = state.crash_address
    testcase.crash_state = state.crash_state
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)

    # Try to guess if the bug is security or not.
    security_flag = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                     state.crash_type,
                                                     state.crash_address)
    testcase.security_flag = security_flag

    # If it is, guess the severity.
    if security_flag:
        testcase.security_severity = severity_analyzer.get_security_severity(
            state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

    log_message = ('Testcase crashed in %d seconds (r%d)' %
                   (crash_time, testcase.crash_revision))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED,
                                         log_message)

    # See if we have to ignore this crash.
    if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Irrelavant')
        return

    # Test for reproducibility.
    one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
        testcase.fuzzer_name, testcase.actual_fuzzer_name(),
        testcase_file_path, state.crash_state, security_flag, test_timeout,
        http_flag, gestures)
    testcase.one_time_crasher_flag = one_time_crasher_flag

    # Check to see if this is a duplicate.
    data_handler.check_uploaded_testcase_duplicate(testcase, metadata)

    # Set testcase and metadata status if not set already.
    if testcase.status == 'Duplicate':
        # For testcase uploaded by bots (with quiet flag), don't create additional
        # tasks.
        if metadata.quiet_flag:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Duplicate')
            return
    else:
        # New testcase.
        testcase.status = 'Processed'
        metadata.status = 'Confirmed'

        # Reset the timestamp as well, to respect
        # data_types.MIN_ELAPSED_TIME_SINCE_REPORT. Otherwise it may get filed by
        # triage task prematurely without the grouper having a chance to run on this
        # testcase.
        testcase.timestamp = utils.utcnow()

        # Add new leaks to global blacklist to avoid detecting duplicates.
        # Only add if testcase has a direct leak crash and if it's reproducible.
        if is_lsan_enabled:
            leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

    # Update the testcase values.
    testcase.put()

    # Update the upload metadata.
    metadata.security_flag = security_flag
    metadata.put()

    _add_default_issue_metadata(testcase)

    # Create tasks to
    # 1. Minimize testcase (minimize).
    # 2. Find regression range (regression).
    # 3. Find testcase impact on production branches (impact).
    # 4. Check whether testcase is fixed (progression).
    # 5. Get second stacktrace from another job in case of
    #    one-time crashes (stack).
    task_creation.create_tasks(testcase)
    def process_bad_units(self, bad_units_path, quarantine_corpus_path,
                          crashes):
        """Process bad units found during merge."""
        # TODO(ochang): A lot of this function is similar to parts of fuzz_task.
        # Ideally fuzz_task can be refactored in a way that lets us share the common
        # code.

        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        logs.log('Processing bad units.')
        corpus_file_paths = _get_corpus_file_paths(bad_units_path)
        num_bad_units = 0

        # Run each corpus item individually.
        for i, unit_path in enumerate(corpus_file_paths, 1):
            if i % 100 == 0:
                logs.log('Up to %d' % i)

            unit_name = os.path.basename(unit_path)
            if unit_name.startswith('timeout-') or unit_name.startswith(
                    'oom-'):
                # Don't waste time re-running timeout or oom testcases.
                self._quarantine_unit(unit_path, quarantine_corpus_path)
                num_bad_units += 1
                continue

            try:
                result = self._run_single_unit(unit_path)
            except TimeoutError:
                # Slow unit. Quarantine it.
                self._quarantine_unit(unit_path, quarantine_corpus_path)
                num_bad_units += 1
                continue

            if not crash_analyzer.is_memory_tool_crash(result.output):
                # Didn't crash.
                continue

            # Get memory tool crash information.
            state = stack_analyzer.get_crash_data(result.output,
                                                  symbolize_flag=True)

            # Crashed or caused a leak. Quarantine it.
            unit_path = self._quarantine_unit(unit_path,
                                              quarantine_corpus_path)
            num_bad_units += 1

            if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
                continue

            # Local de-duplication.
            if state.crash_state not in crashes:
                security_flag = crash_analyzer.is_security_issue(
                    state.crash_stacktrace, state.crash_type,
                    state.crash_address)
                crashes[state.crash_state] = CorpusCrash(
                    state.crash_state, state.crash_type, state.crash_address,
                    state.crash_stacktrace, unit_path, security_flag)

        logs.log('Found %d bad units, %d unique crashes.' %
                 (num_bad_units, len(crashes)))
示例#12
0
def execute_task(testcase_id, job_type):
    """Execute a symbolize command."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # We should atleast have a symbolized debug or release build.
    if not build_manager.has_symbolized_builds():
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Initialize variables.
    build_fail_wait = environment.get_value('FAIL_WAIT')

    old_crash_stacktrace = data_handler.get_stacktrace(testcase)
    sym_crash_type = testcase.crash_type
    sym_crash_address = testcase.crash_address
    sym_crash_state = testcase.crash_state
    sym_redzone = DEFAULT_REDZONE
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Decide which build revision to use.
    if testcase.crash_stacktrace == 'Pending':
        # This usually happen when someone clicked the 'Update stacktrace from
        # trunk' button on the testcase details page. In this case, we are forced
        # to use trunk. No revision -> trunk build.
        build_revision = None
    else:
        build_revision = testcase.crash_revision

    # Set up a custom or regular build based on revision.
    build_manager.setup_build(build_revision)

    # Get crash revision used in setting up build.
    crash_revision = environment.get_value('APP_REVISION')

    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # ASAN tool settings (if the tool is used).
    # See if we can get better stacks with higher redzone sizes.
    # A UAF might actually turn out to be OOB read/write with a bigger redzone.
    if environment.tool_matches('ASAN', job_type) and testcase.security_flag:
        redzone = MAX_REDZONE
        while redzone >= MIN_REDZONE:
            environment.reset_current_memory_tool_options(
                redzone_size=testcase.redzone,
                disable_ubsan=testcase.disable_ubsan)

            process_handler.terminate_stale_application_instances()
            command = testcase_manager.get_command_line_for_application(
                testcase_file_path, needs_http=testcase.http_flag)
            return_code, crash_time, output = (process_handler.run_process(
                command, timeout=warmup_timeout, gestures=testcase.gestures))
            crash_result = CrashResult(return_code, crash_time, output)

            if crash_result.is_crash() and 'AddressSanitizer' in output:
                state = crash_result.get_symbolized_data()
                security_flag = crash_result.is_security_issue()

                if (not crash_analyzer.ignore_stacktrace(
                        state.crash_stacktrace)
                        and security_flag == testcase.security_flag
                        and state.crash_type == testcase.crash_type
                        and (state.crash_type != sym_crash_type
                             or state.crash_state != sym_crash_state)):
                    logs.log(
                        'Changing crash parameters.\nOld : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))

                    sym_crash_type = state.crash_type
                    sym_crash_address = state.crash_address
                    sym_crash_state = state.crash_state
                    sym_redzone = redzone
                    old_crash_stacktrace = state.crash_stacktrace

                    logs.log(
                        '\nNew : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))
                    break

            redzone /= 2

    # We should have atleast a symbolized debug or a release build.
    symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
    if (not symbolized_builds
            or (not build_manager.check_app_path()
                and not build_manager.check_app_path('APP_PATH_DEBUG'))):
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # Increase malloc_context_size to get all stack frames. Default is 30.
    environment.reset_current_memory_tool_options(
        redzone_size=sym_redzone,
        malloc_context_size=STACK_FRAME_COUNT,
        symbolize_inline_frames=True,
        disable_ubsan=testcase.disable_ubsan)

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    # Do the symbolization if supported by this application.
    result, sym_crash_stacktrace = (get_symbolized_stacktraces(
        testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state))

    # Update crash parameters.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.crash_type = sym_crash_type
    testcase.crash_address = sym_crash_address
    testcase.crash_state = sym_crash_state
    testcase.crash_stacktrace = (
        data_handler.filter_stacktrace(sym_crash_stacktrace))

    if not result:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Unable to reproduce crash, skipping '
            'stacktrace update')
    else:
        # Switch build url to use the less-optimized symbolized build with better
        # stacktrace.
        build_url = environment.get_value('BUILD_URL')
        if build_url:
            testcase.set_metadata('build_url',
                                  build_url,
                                  update_testcase=False)

        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)

    testcase.symbolized = True
    testcase.crash_revision = crash_revision
    testcase.put()

    # We might have updated the crash state. See if we need to marked as duplicate
    # based on other testcases.
    data_handler.handle_duplicate_entry(testcase)

    task_creation.create_blame_task_if_needed(testcase)

    # Switch current directory before builds cleanup.
    root_directory = environment.get_value('ROOT_DIR')
    os.chdir(root_directory)

    # Cleanup symbolized builds which are space-heavy.
    symbolized_builds.delete()