コード例 #1
0
def clear_system_temp_directory():
    """Clear system specific temp directory."""
    def _delete_object(path, delete_func):
        """Delete a object with its delete function, ignoring any error."""
        try:
            delete_func(path)
        except:
            pass

    if environment.get_value('SKIP_SYSTEM_TEMP_CLEANUP'):
        # This provides a way to avoid clearing system temporary directory when it
        # can interfere with other processes on the system.
        return

    # Cache system temp directory to avoid iterating through the system dir list
    # on every gettempdir call. Also, it helps to avoid a case where temp dir
    # fills up the disk and gets ignored by gettempdir.
    global _system_temp_dir
    if not _system_temp_dir:
        _system_temp_dir = tempfile.gettempdir()

    # Use a custom cleanup rather than using |remove_directory| since it
    # recreates the directory and can mess up permissions and symlinks.
    for root, dirs, files in walk(_system_temp_dir, topdown=False):
        for name in files:
            _delete_object(os.path.join(root, name), os.remove)

        for name in dirs:
            _delete_object(os.path.join(root, name), os.rmdir)
    logs.log('Cleared system temp directory: %s' % _system_temp_dir)
コード例 #2
0
def run_and_wait(request, _):
  """Implementation of RunAndWait."""
  process_runner = new_process.ProcessRunner(request.executable_path,
                                             request.default_args)
  args = {}
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')

  if request.popen_args.env_is_set:
    args['env'] = request.popen_args.env
  else:
    args['env'] = None

  args['additional_args'] = request.additional_args
  protobuf_utils.get_protobuf_field(args, request, 'timeout')
  protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
  protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
  protobuf_utils.get_protobuf_field(args, request, 'input_data')
  protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')

  logs.log('Running command: %s' % process_runner.get_command())

  return untrusted_runner_pb2.RunAndWaitResponse(
      result=process_result_to_proto(process_runner.run_and_wait(**args)))
コード例 #3
0
def send(to_email, subject, html_content):
    """Send email."""
    sendgrid_api_key = db_config.get_value('sendgrid_api_key')
    if not sendgrid_api_key:
        logs.log_warn(
            'Skipping email as SendGrid API key is not set in config.')
        return

    from_email = db_config.get_value('sendgrid_sender')
    if not from_email:
        logs.log_warn(
            'Skipping email as SendGrid sender is not set in config.')
        return

    message = Mail(from_email=From(str(from_email)),
                   to_emails=To(str(to_email)),
                   subject=Subject(subject),
                   html_content=HtmlContent(str(html_content)))
    try:
        sg = SendGridAPIClient(sendgrid_api_key)
        response = sg.send(message)
        logs.log('Sent email to %s.' % to_email,
                 status_code=response.status_code,
                 body=response.body,
                 headers=response.headers)
    except Exception:
        logs.log_error('Failed to send email to %s.' % to_email)
コード例 #4
0
    def post(self, message):
        """Handle a post request."""
        testcase_id = message.attributes.get('testcaseId')
        if not testcase_id:
            raise helpers.EarlyExitException('Missing testcaseId.', 400)

        revision = message.attributes.get('revision')
        if not revision or not revision.isdigit():
            raise helpers.EarlyExitException('Missing revision.', 400)

        revision = int(revision)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        job = data_types.Job.query(
            data_types.Job.name == testcase.job_type).get()
        if not job or not job.is_external():
            raise helpers.EarlyExitException('Invalid job.', 400)

        if message.data:
            stacktrace = message.data.decode()
        else:
            logs.log(f'No stacktrace provided (testcase_id={testcase_id}).')
            stacktrace = ''

        error = message.attributes.get('error')
        handle_update(testcase, revision, stacktrace, error)
        return 'OK'
コード例 #5
0
def save_crash_info_if_needed(testcase_id, crash_revision, job_type,
                              crash_type, crash_address, crash_frames):
    """Saves crash report for chromium project, skip otherwise."""
    if data_handler.get_project_name(job_type) != 'chromium':
        return None

    serialized_crash_stack_frames = get_symbolized_stack_bytes(
        crash_type, crash_address, crash_frames)
    if not serialized_crash_stack_frames:
        return None

    crash_info = CrashReportInfo(
        serialized_crash_stack_frames=serialized_crash_stack_frames)

    # Get product and version (required).
    platform = environment.platform()
    crash_info.product = PRODUCT_MAP[platform]
    crash_info.version = revisions.get_real_revision(crash_revision,
                                                     job_type,
                                                     display=True)

    # Update crash_info object with bot information and testcase id.
    crash_info.bot_id = environment.get_value('BOT_NAME')
    crash_info.testcase_id = int(testcase_id)

    # Store CrashInfo metadata.
    crash_report_metadata = crash_info.to_report_metadata()
    crash_report_metadata.job_type = job_type
    crash_report_metadata.crash_revision = crash_revision
    crash_report_metadata.put()

    logs.log('Created crash report entry for testcase %s.' % testcase_id)
    return crash_info
コード例 #6
0
def main():
    """Run a cycle of heartbeat checks to ensure Android device is running."""
    logs.configure('android_heartbeat')
    dates.initialize_timezone_from_environment()
    environment.set_bot_environment()
    monitor.initialize()

    if environment.is_android_cuttlefish():
        android.adb.set_cuttlefish_device_serial()
    device_serial = environment.get_value('ANDROID_SERIAL')

    while True:
        state = android.adb.get_device_state()
        if state == android.adb.DEVICE_NOT_FOUND_STRING.format(
                serial=device_serial):
            android.adb.connect_to_cuttlefish_device()
            state = android.adb.get_device_state()
        logs.log('Android device %s state: %s' % (device_serial, state))

        monitoring_metrics.ANDROID_UPTIME.increment_by(
            int(state == 'device'), {
                'serial': device_serial or '',
                'platform': environment.get_platform_group() or '',
            })
        time.sleep(data_types.ANDROID_HEARTBEAT_WAIT_INTERVAL)

        if data_handler.bot_run_timed_out():
            break
コード例 #7
0
def _process_project(project, bucket):
    """Collects coverage information for all fuzz targets in the given project and
  the total stats for the project."""
    project_name = _basename(project)
    logs.log('Processing coverage for %s project.' % project_name)
    report_path = storage.get_cloud_storage_file_path(bucket, project)
    report_info = _read_json(report_path)
    if not report_info:
        logs.log_warn('Skipping code coverage for %s project.' % project_name)
        return

    # Iterate through report_info['fuzzer_stats_dir'] and prepare
    # CoverageInformation entities for invididual fuzz targets.
    entities = []
    for fuzzer in storage.list_blobs(report_info['fuzzer_stats_dir'],
                                     recursive=False):
        entities.append(
            _process_fuzzer_stats(fuzzer, report_info, project_name, bucket))

    logs.log('Processed coverage for %d targets in %s project.' %
             (len(entities), project_name))

    # Prepare CoverageInformation entity for the total project stats.
    entities.append(_process_project_stats(report_info, project_name))

    ndb_utils.put_multi(entities)
コード例 #8
0
ファイル: project_setup.py プロジェクト: google/clusterfuzz
def update_fuzzer_jobs(fuzzer_entities, job_names):
    """Update fuzzer job mappings."""
    to_delete = {}

    for fuzzer_entity_key in fuzzer_entities:
        fuzzer_entity = fuzzer_entity_key.get()

        for job in data_types.Job.query():
            if not job.environment_string:
                continue

            job_environment = job.get_environment()
            if not utils.string_is_true(job_environment.get(
                    'MANAGED', 'False')):
                continue

            if job.name in job_names:
                continue

            logs.log('Deleting job %s' % job.name)
            to_delete[job.name] = job.key

            try:
                fuzzer_entity.jobs.remove(job.name)
            except ValueError:
                pass

        fuzzer_entity.put()
        fuzzer_selection.update_mappings_for_fuzzer(fuzzer_entity)

    if to_delete:
        ndb_utils.delete_multi(to_delete.values())
コード例 #9
0
def add_crash_to_global_blacklist_if_needed(testcase):
    """Adds relevant function from testcase crash state to global blacklist."""
    testcase_id = testcase.key.id()
    if not should_be_blacklisted(testcase):
        logs.log(
            'Testcase %s is not a reproducible leak, skipping leak blacklist.'
            % testcase_id)
        return False

    function_name = get_leak_function_for_blacklist(testcase)
    if not function_name:
        logs.log_error(
            'Testcase %s has invalid crash state, skipping leak blacklist.' %
            testcase_id)
        return False

    existing_query = data_types.Blacklist.query(
        data_types.Blacklist.function_name == function_name)
    existing_query = existing_query.filter(
        data_types.Blacklist.testcase_id == testcase_id)
    existing_query = existing_query.filter(
        data_types.Blacklist.tool_name == LSAN_TOOL_NAME)

    if existing_query.get():
        logs.log_error('Item already in leak blacklist.')
        return False

    blacklist_item = data_types.Blacklist(function_name=function_name,
                                          testcase_id=testcase_id,
                                          tool_name=LSAN_TOOL_NAME)
    blacklist_item.put()
    logs.log('Added %s to leak blacklist.' % function_name)

    return blacklist_item
コード例 #10
0
def notify_issue_update(testcase, status):
  """Notify that an issue update occurred (i.e. issue was filed or closed)."""
  topic = local_config.ProjectConfig().get('issue_updates.pubsub_topic')
  if not topic:
    return

  pubsub_client = pubsub.PubSubClient()
  pubsub_client.publish(
      topic, [
          pubsub.Message(
              attributes={
                  'crash_address': testcase.crash_address,
                  'crash_state': testcase.crash_state,
                  'crash_type': testcase.crash_type,
                  'issue_id': testcase.bug_information or '',
                  'security': str(testcase.security_flag).lower(),
                  'status': status,
                  'testcase_id': str(testcase.key.id()),
              })
      ])

  if status in ('verified', 'wontfix'):
    logs.log(f'Closing issue {testcase.github_issue_num} '
             f'in GitHub repo {testcase.github_repo_id}: '
             f'Testcase {testcase.key.id()} is marked as {status}.')
    oss_fuzz_github.close_issue(testcase)
コード例 #11
0
  def rsync_to_disk(self,
                    directory,
                    timeout=CORPUS_FILES_SYNC_TIMEOUT,
                    delete=True):
    """Run gsutil to download corpus files from GCS.

    Overridden to have additional logging.

    Args:
      directory: Path to directory to sync to.
      timeout: Timeout for gsutil.
      delete: Whether or not to delete files on disk that don't exist locally.

    Returns:
      A bool indicating whether or not the command succeeded.
    """
    result = GcsCorpus.rsync_to_disk(
        self, directory, timeout=timeout, delete=delete)
    if not result:
      return False

    # Checkout additional regressions corpus if set and ignore the result.
    if self._regressions_corpus:
      regressions_dir = os.path.join(directory, 'regressions')
      self._regressions_corpus.rsync_to_disk(
          regressions_dir, timeout=timeout, delete=False)

    num_files = _count_corpus_files(directory)
    if self._log_results:
      logs.log('%d corpus files downloaded for %s.' %
               (num_files, self._project_qualified_target_name))

    return result
コード例 #12
0
  def rsync_from_disk(self,
                      directory,
                      timeout=CORPUS_FILES_SYNC_TIMEOUT,
                      delete=True):
    """Upload local files to GCS and remove files which do not exist locally.

    Overridden to have additional logging.

    Args:
      directory: Path to directory to sync to.
      timeout: Timeout for gsutil.
      delete: Whether or not to delete files on GCS that don't exist locally.

    Returns:
      A bool indicating whether or not the command succeeded.
    """
    result = GcsCorpus.rsync_from_disk(
        self, directory, timeout=timeout, delete=delete)

    num_files = _count_corpus_files(directory)
    if self._log_results:
      logs.log('%d corpus files uploaded for %s.' %
               (num_files, self._project_qualified_target_name))

    return result
コード例 #13
0
ファイル: grouper.py プロジェクト: vanhauser-thc/clusterfuzz
def combine_testcases_into_group(testcase_1, testcase_2, testcase_map):
    """Combine two testcases into a group."""
    logs.log('Grouping testcase 1 '
             '(crash_type=%s, crash_state=%s, security_flag=%s, group=%s) '
             'and testcase 2 '
             '(crash_type=%s, crash_state=%s, security_flag=%s, group=%s).' %
             (testcase_1.crash_type, testcase_1.crash_state,
              testcase_1.security_flag, testcase_1.group_id,
              testcase_2.crash_type, testcase_2.crash_state,
              testcase_2.security_flag, testcase_2.group_id))

    # If none of the two testcases have a group id, just assign a new group id to
    # both.
    if not testcase_1.group_id and not testcase_2.group_id:
        new_group_id = _get_new_group_id()
        testcase_1.group_id = new_group_id
        testcase_2.group_id = new_group_id
        return

    # If one of the testcase has a group id, then assign the other to reuse that
    # group id.
    if testcase_1.group_id and not testcase_2.group_id:
        testcase_2.group_id = testcase_1.group_id
        return
    if testcase_2.group_id and not testcase_1.group_id:
        testcase_1.group_id = testcase_2.group_id
        return

    # If both the testcase have their own groups, then just merge the two groups
    # together and reuse one of their group ids.
    group_id_to_reuse = testcase_1.group_id
    group_id_to_move = testcase_2.group_id
    for testcase in six.itervalues(testcase_map):
        if testcase.group_id == group_id_to_move:
            testcase.group_id = group_id_to_reuse
コード例 #14
0
    def is_still_crashing(st_index, stacktrace):
        """Check if the the given stackstrace indicates
      the testcase is still crashing"""
        state = stack_analyzer.get_crash_data(stacktrace,
                                              fuzz_target=fuzz_target_name,
                                              symbolize_flag=False,
                                              already_symbolized=True,
                                              detect_ooms_and_hangs=True)

        crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
        if not crash_comparer.is_similar():
            return False

        logs.log(f'State for trial {st_index} of {testcase_id} '
                 f'remains similar'
                 f'(old_state={testcase.crash_state}, '
                 f'new_state={state.crash_state}).')

        is_security = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                       state.crash_type,
                                                       state.crash_address)
        if is_security != testcase.security_flag:
            return False

        logs.log(f'Security flag for trial {st_index} of {testcase_id} '
                 f'still matches'
                 f'({testcase.security_flag}).')
        return True
コード例 #15
0
def file_issue(testcase):
    """File an issue to the GitHub repo of the project"""
    if not _filing_enabled(testcase):
        return

    if testcase.github_repo_id and testcase.github_issue_num:
        logs.log('Issue already filed under'
                 f'issue number {testcase.github_issue_num} in '
                 f'Repo {testcase.github_repo_id}.')
        return

    access_token = _get_access_token()

    repo = _get_repo(testcase, access_token)
    if not repo:
        logs.log('Unable to file issues to the main repo of the project')
        return

    if not repo.has_issues:
        logs.log_warn('Unable to file issues to the main repo: '
                      'Repo has disabled issues.')
        return

    issue = _post_issue(repo, testcase)
    _update_testcase_properties(testcase, repo, issue)
コード例 #16
0
def _make_corpus_backup_public(target, corpus_fuzzer_name_override,
                               corpus_backup_bucket_name):
  """Identifies old corpus backups and makes them public."""
  corpus_backup_date = utils.utcnow().date() - datetime.timedelta(
      days=data_types.CORPUS_BACKUP_PUBLIC_LOOKBACK_DAYS)

  corpus_backup_url = corpus_manager.gcs_url_for_backup_file(
      corpus_backup_bucket_name, corpus_fuzzer_name_override or target.engine,
      target.project_qualified_name(), corpus_backup_date)

  if not storage.get(corpus_backup_url):
    logs.log_warn('Failed to find corpus backup %s.' % corpus_backup_url)
    return

  if not _set_public_acl_if_needed(corpus_backup_url):
    return

  filename = (
      corpus_manager.PUBLIC_BACKUP_TIMESTAMP + os.extsep +
      corpus_manager.BACKUP_ARCHIVE_FORMAT)
  public_url = os.path.join(os.path.dirname(corpus_backup_url), filename)

  if not storage.copy_blob(corpus_backup_url, public_url):
    logs.log_error(
        'Failed to overwrite %s with the latest public corpus backup.' %
        public_url)
    return

  if not _set_public_acl_if_needed(public_url):
    return

  logs.log('Corpus backup %s is now marked public.' % corpus_backup_url)
コード例 #17
0
def _add_default_issue_metadata(testcase):
    """Adds the default issue metadata (e.g. components, labels) to testcase."""
    default_metadata = engine_common.get_all_issue_metadata_for_testcase(
        testcase)
    if not default_metadata:
        return

    testcase_metadata = testcase.get_metadata()
    for key, default_value in six.iteritems(default_metadata):
        # Add the default issue metadata first. This gives preference to uploader
        # specified issue metadata.
        new_value_list = utils.parse_delimited(default_value,
                                               delimiter=',',
                                               strip=True,
                                               remove_empty=True)

        # Append uploader specified testcase metadata value to end (for preference).
        uploader_value = testcase_metadata.get(key, '')
        uploader_value_list = utils.parse_delimited(uploader_value,
                                                    delimiter=',',
                                                    strip=True,
                                                    remove_empty=True)
        for value in uploader_value_list:
            if value not in new_value_list:
                new_value_list.append(value)

        new_value = ','.join(new_value_list)
        if new_value == uploader_value:
            continue

        logs.log('Updating issue metadata for {} from {} to {}.'.format(
            key, uploader_value, new_value))
        testcase.set_metadata(key, new_value)
コード例 #18
0
ファイル: data_types.py プロジェクト: google/clusterfuzz
    def _post_put_hook(self, _):
        if not self.key:
            # Failed put. An exception will be thrown automatically afterwards.
            return

        logs.log('Updated testcase %d (bug %s).' %
                 (self.key.id(), self.bug_information or '-'))
コード例 #19
0
def update_target_weights_for_engine(client, engine, specifications):
    """Update all fuzz target weights for the specified engine."""
    matches = {}
    run_set = set()

    # All fuzzers with non-default weights must be tracked with a special
    # specification. This ensures that they will be restored to normal weight
    # once conditions causing adjustments are no longer met.
    target_jobs = data_types.FuzzTargetJob.query(
        data_types.FuzzTarget.engine == engine).filter(
            data_types.FuzzTargetJob.weight != 1.0)

    for target_job in target_jobs:
        matches[(target_job.fuzz_target_name,
                 target_job.job)] = RESTORE_DEFAULT_MATCH

    for match in specifications:
        update_matches_for_specification(match, client, engine, matches,
                                         run_set)

    for (fuzzer, job), match in six.iteritems(matches):
        if (fuzzer, job) not in run_set:
            # This ensures that we don't reset weights for fuzzers with problems if
            # they didn't run in the time covered by our queries.
            continue

        update_weight_for_target(fuzzer, job, match)

    logs.log('Weight adjustments complete for engine %s.' % engine)
コード例 #20
0
def _store_testcase_for_regression_testing(testcase, testcase_file_path):
  """Stores reproduction testcase for future regression testing in corpus
  pruning task."""
  if testcase.open:
    # Store testcase only after the crash is fixed.
    return

  if not testcase.bug_information:
    # Only store crashes with bugs associated with them.
    return

  fuzz_target = data_handler.get_fuzz_target(testcase.overridden_fuzzer_name)
  if not fuzz_target:
    # No work to do, only applicable for engine fuzzers.
    return

  corpus = corpus_manager.FuzzTargetCorpus(fuzz_target.engine,
                                           fuzz_target.project_qualified_name())
  regression_testcase_url = os.path.join(
      corpus.get_regressions_corpus_gcs_url(),
      utils.file_hash(testcase_file_path))

  if storage.copy_file_to(testcase_file_path, regression_testcase_url):
    logs.log('Successfully stored testcase for regression testing: ' +
             regression_testcase_url)
  else:
    logs.log_error('Failed to store testcase for regression testing: ' +
                   regression_testcase_url)
コード例 #21
0
    def _process_failures(self, projects, build_type):
        """Process failures."""
        issue_tracker = issue_tracker_utils.get_issue_tracker()
        if not issue_tracker:
            raise OssFuzzBuildStatusException('Failed to get issue tracker.')

        for project in projects:
            project_name = project['name']
            builds = project['history']
            if not builds:
                continue

            build = builds[0]
            if build['success']:
                continue

            project_name = project['name']

            # Do not file an issue for non-main build types, if there is a main build
            # failure for the same project, as the root cause might be the same.
            if build_type != MAIN_BUILD_TYPE:
                build_failure = get_build_failure(project_name,
                                                  MAIN_BUILD_TYPE)
                if build_failure:
                    continue

            build_failure = get_build_failure(project_name, build_type)

            build_time = get_build_time(build)
            if build_failure:
                if build_time <= build_failure.last_checked_timestamp:
                    # No updates.
                    continue
            else:
                build_failure = create_build_failure(project_name, build,
                                                     build_type)

            build_failure.last_checked_timestamp = build_time
            build_failure.consecutive_failures += 1
            if build_failure.consecutive_failures >= MIN_CONSECUTIVE_BUILD_FAILURES:
                if build_failure.issue_id is None:
                    oss_fuzz_project = _get_oss_fuzz_project(project_name)
                    if not oss_fuzz_project:
                        logs.log(
                            'Project %s is disabled, skipping bug filing.' %
                            project_name)
                        continue

                    build_failure.issue_id = file_bug(issue_tracker,
                                                      project_name,
                                                      build['build_id'],
                                                      oss_fuzz_project.ccs,
                                                      build_type)
                elif (build_failure.consecutive_failures -
                      MIN_CONSECUTIVE_BUILD_FAILURES) % REMINDER_INTERVAL == 0:
                    send_reminder(issue_tracker, build_failure.issue_id,
                                  build['build_id'])

            build_failure.put()
コード例 #22
0
    def _load_data(self, bigquery, fuzzer):
        """Load yesterday's stats into BigQuery."""
        project_id = utils.get_application_id()

        yesterday = (self._utc_now().date() - datetime.timedelta(days=1))
        date_string = yesterday.strftime('%Y%m%d')
        timestamp = utils.utc_date_to_timestamp(yesterday)

        dataset_id = fuzzer_stats.dataset_name(fuzzer)
        if not self._create_dataset_if_needed(bigquery, dataset_id):
            return

        for kind in STATS_KINDS:
            kind_name = kind.__name__
            table_id = kind_name

            if kind == fuzzer_stats.TestcaseRun:
                schema = fuzzer_stats_schema.get(fuzzer)
            else:
                schema = kind.SCHEMA

            if not self._create_table_if_needed(bigquery, dataset_id, table_id,
                                                schema):
                continue

            gcs_path = fuzzer_stats.get_gcs_stats_path(kind_name, fuzzer,
                                                       timestamp)
            load = {
                'destinationTable': {
                    'projectId': project_id,
                    'tableId': table_id + '$' + date_string,
                    'datasetId': dataset_id,
                },
                'schemaUpdateOptions': [
                    'ALLOW_FIELD_ADDITION',
                ],
                'sourceFormat': 'NEWLINE_DELIMITED_JSON',
                'sourceUris': ['gs:/' + gcs_path + '*.json'],
                'writeDisposition': 'WRITE_TRUNCATE',
            }
            if schema is not None:
                load['schema'] = schema

            job_body = {
                'configuration': {
                    'load': load,
                },
            }

            logs.log("Uploading job to BigQuery.", job_body=job_body)
            request = bigquery.jobs().insert(projectId=project_id,
                                             body=job_body)
            response = request.execute()

            # We cannot really check the response here, as the query might be still
            # running, but having a BigQuery jobId in the log would make our life
            # simpler if we ever have to manually check the status of the query.
            # See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query.
            logs.log('Response from BigQuery: %s' % response)
コード例 #23
0
ファイル: battery.py プロジェクト: vanhauser-thc/clusterfuzz
def wait_until_good_state():
  """Check battery and make sure it is charged beyond minimum level and
  temperature thresholds."""
  # Battery levels are not applicable on GCE.
  if environment.is_android_cuttlefish() or settings.is_automotive():
    return

  # Make sure device is online.
  adb.wait_for_device()

  # Skip battery check if done recently.
  last_battery_check_time = persistent_cache.get_value(
      LAST_BATTERY_CHECK_TIME_KEY,
      constructor=datetime.datetime.utcfromtimestamp)
  if last_battery_check_time and not dates.time_has_expired(
      last_battery_check_time, seconds=BATTERY_CHECK_INTERVAL):
    return

  # Initialize variables.
  battery_level_threshold = environment.get_value('LOW_BATTERY_LEVEL_THRESHOLD',
                                                  LOW_BATTERY_LEVEL_THRESHOLD)
  battery_temperature_threshold = environment.get_value(
      'MAX_BATTERY_TEMPERATURE_THRESHOLD', MAX_BATTERY_TEMPERATURE_THRESHOLD)
  device_restarted = False

  while True:
    battery_information = get_battery_level_and_temperature()
    if battery_information is None:
      logs.log_error('Failed to get battery information, skipping check.')
      return

    battery_level = battery_information['level']
    battery_temperature = battery_information['temperature']
    logs.log('Battery information: level (%d%%), temperature (%.1f celsius).' %
             (battery_level, battery_temperature))
    if (battery_level >= battery_level_threshold and
        battery_temperature <= battery_temperature_threshold):
      persistent_cache.set_value(LAST_BATTERY_CHECK_TIME_KEY, time.time())
      return

    logs.log('Battery in bad battery state, putting device in sleep mode.')

    if not device_restarted:
      adb.reboot()
      device_restarted = True

    # Change thresholds to expected levels (only if they were below minimum
    # thresholds).
    if battery_level < battery_level_threshold:
      battery_level_threshold = EXPECTED_BATTERY_LEVEL
    if battery_temperature > battery_temperature_threshold:
      battery_temperature_threshold = EXPECTED_BATTERY_TEMPERATURE

    # Stopping shell should help with shutting off a lot of services that would
    # otherwise use up the battery. However, we need to turn it back on to get
    # battery status information.
    adb.stop_shell()
    time.sleep(BATTERY_CHARGE_INTERVAL)
    adb.start_shell()
コード例 #24
0
def _find_existing_issue(repo, issue_title):
    """Checking if there is an existing open issue under the same name"""
    for issue in repo.get_issues():
        if issue.title == issue_title:
            logs.log(
                f'Issue ({issue_title}) already exists in Repo ({repo.id}).')
            return issue
    return None
コード例 #25
0
def _process_fuzzer_stats(fuzzer, project_info, project_name, bucket):
    """Processes coverage stats for a single fuzz target."""
    fuzzer_name = data_types.fuzz_target_project_qualified_name(
        project_name, _basename(fuzzer))
    fuzzer_info_path = storage.get_cloud_storage_file_path(bucket, fuzzer)
    logs.log('Processing fuzzer stats for %s (%s).' %
             (fuzzer_name, fuzzer_info_path))
    return _coverage_information(fuzzer_info_path, fuzzer_name, project_info)
コード例 #26
0
    def _post_put_hook(self, _):
        if not self.key:
            # Failed put. An exception will be thrown automatically afterwards.
            return

        logs.log(
            f'Updated testcase {self.key.id()} (bug {self.bug_information or "-"}).'
        )
コード例 #27
0
def get_introspector_index():
    """Return introspector projects status"""
    if storage.exists(INTROSPECTOR_INDEX_JSON_URL):
        introspector_index = json.loads(
            storage.read_data(INTROSPECTOR_INDEX_JSON_URL))
    else:
        introspector_index = {}
    logs.log('Loaded introspector status: %d' % len(introspector_index))
    return introspector_index
コード例 #28
0
def generate_weighted_strategy_pool(strategy_list, use_generator, engine_name):
    """Generate a strategy pool based on probability distribution from multi armed
  bandit experimentation."""

    # If weighted strategy selection is enabled, there will be a distribution
    # stored in the environment.
    distribution = environment.get_value('STRATEGY_SELECTION_DISTRIBUTION')
    selection_method = environment.get_value('STRATEGY_SELECTION_METHOD',
                                             default_value='default')

    # Otherwise if weighted strategy selection is not enabled (strategy selection
    # method is default) or if we cannot query properly, generate strategy
    # pool according to default parameters. We pass the combined list of
    # multi-armed bandit strategies and manual strategies for consideration in
    # the default strategy selection process.
    if not distribution or selection_method == 'default':
        return generate_default_strategy_pool(strategy_list, use_generator)

    # Change the distribution to a list of named tuples rather than a list of
    # dictionaries so that we can use the random_weighted_choice function. Filter
    # out probability entries from other engines.
    distribution_tuples = [
        StrategyCombination(strategy_name=elem['strategy_name'],
                            probability=elem['probability'])
        for elem in distribution if elem['engine'] == engine_name
    ]

    if not distribution_tuples:
        logs.log_warn(
            'Tried to generate a weighted strategy pool, but do not have '
            'strategy probabilities for %s fuzzing engine.' % engine_name)
        return generate_default_strategy_pool(strategy_list, use_generator)

    strategy_selection = utils.random_weighted_choice(distribution_tuples,
                                                      'probability')
    strategy_name = strategy_selection.strategy_name

    chosen_strategies = strategy_name.split(',')
    pool = StrategyPool()

    for strategy_tuple in strategy_list:
        if strategy_tuple.name in chosen_strategies:
            pool.add_strategy(strategy_tuple)

    # We consider certain strategies separately as those are only supported by a
    # small number of fuzz targets and should be used heavily when available.
    for value in [
            strategy_entry for strategy_entry in strategy_list
            if strategy_entry.manually_enable
    ]:
        if do_strategy(value):
            pool.add_strategy(value)

    logs.log('Strategy pool was generated according to weighted distribution. '
             'Chosen strategies: ' + ', '.join(pool.strategy_names))
    return pool
コード例 #29
0
ファイル: undercoat.py プロジェクト: google/clusterfuzz
def start_instance():
  """Start an instance via undercoat."""
  handle = undercoat_api_command('start_instance').output.strip()
  logs.log('Started undercoat instance with handle %s' % handle)

  # Immediately save the handle in case we crash before stop_instance()
  # is called
  add_running_handle(handle)

  return handle
コード例 #30
0
def close_bug(issue_tracker, issue_id, project_name):
    """Close a build failure bug."""
    logs.log('Closing build failure bug (project=%s, issue_id=%s).' %
             (project_name, issue_id))

    issue = issue_tracker.get_original_issue(issue_id)
    issue.status = 'Verified'
    issue.save(
        new_comment='The latest build has succeeded, closing this issue.',
        notify=True)