def setUp(self): test_helpers.patch( self, ["redis.Redis", "system.environment.is_running_on_app_engine"]) self.mock.Redis.return_value = _MockRedis() self.mock.is_running_on_app_engine.return_value = True self.cache = memoize.Memcache(100) def fn(): pass self.key = self.cache.get_key(fn, ("a", "b"), {"c": "d"}) self.value = "b"
def setUp(self): test_helpers.patch(self, [ 'redis.Redis', 'system.environment.is_running_on_app_engine', ]) self.mock.Redis.return_value = _MockRedis() self.mock.is_running_on_app_engine.return_value = True self.cache = memoize.Memcache(100) def fn(): pass self.key = self.cache.get_key(fn, ('a', 'b'), {'c': 'd'}) self.value = 'b'
def setUp(self): test_helpers.patch(self, [ 'system.environment.is_running_on_app_engine', ]) self.mock.is_running_on_app_engine.return_value = True self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_memcache_stub() self.cache = memoize.Memcache(100) def fn(): pass self.key = self.cache.get_key(fn, ('a', 'b'), {'c': 'd'}) self.value = 'b'
class FuzzerRunLogsContext(BuiltinFieldContext): """Fuzzer logs context.""" MEMCACHE_TTL = 15 * 60 def __init__(self, fuzzer=None, jobs=None): super(FuzzerRunLogsContext, self).__init__(fuzzer=fuzzer, jobs=jobs) @memoize.wrap(memoize.FifoInMemory(256)) def _get_logs_bucket_from_job(self, job_type): """Get logs bucket from job.""" return data_handler.get_value_from_job_definition_or_environment( job_type, 'FUZZ_LOGS_BUCKET') @memoize.wrap(memoize.Memcache(MEMCACHE_TTL, key_fn=_logs_bucket_key_fn)) def _get_logs_bucket_from_fuzzer(self, fuzzer_name): """Get logs bucket from fuzzer (child fuzzers only).""" jobs = [ mapping.job for mapping in fuzz_target_utils.get_fuzz_target_jobs( fuzz_target_name=fuzzer_name) ] if not jobs: return None # Check that the logs bucket is same for all of them. bucket = self._get_logs_bucket_from_job(jobs[0]) if all(bucket == self._get_logs_bucket_from_job(job) for job in jobs[1:]): return bucket return None def get_logs_bucket(self, fuzzer_name=None, job_type=None): """Return logs bucket for the job.""" if job_type: return self._get_logs_bucket_from_job(job_type) if fuzzer_name: return self._get_logs_bucket_from_fuzzer(fuzzer_name) return None
FILE_UNREPRODUCIBLE_TESTCASE_TEXT = ( '<b>Note: This crash might not be reproducible with the provided testcase. ' 'That said, for the past %d days we\'ve been seeing this crash frequently. ' 'If you are unable to reproduce this, please try a speculative fix based ' 'on the crash stacktrace in the report. The fix can be verified by looking ' 'at the crash statistics in the report, a day after the fix is deployed. ' 'We will auto-close the bug if the crash is not seen for %d days.' '</b>' % (data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)) # ------------------------------------------------------------------------------ # Testcase, TestcaseUploadMetadata database related functions # ------------------------------------------------------------------------------ @memoize.wrap(memoize.Memcache(MEMCACHE_TTL_IN_SECONDS)) def get_all_project_names(): """Return all project names.""" query = data_types.Testcase.query( projection=[data_types.Testcase.project_name], distinct=True).order(data_types.Testcase.project_name) return [ testcase.project_name for testcase in query if testcase.project_name and testcase.project_name.strip() ] def get_domain(): """Get current domain.""" default_domain = '{app_id}.appspot.com'.format(
if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or job_environment.get('SYSTEM_BINARY_DIR')): continue # Skip if explicitly excluded using flag. if utils.string_is_true( job_environment.get('EXCLUDE_FROM_TOP_CRASHES')): continue jobs.add(job.name) platforms.add(job_platform_to_real_platform(job.platform)) return jobs, platforms @memoize.wrap(memoize.Memcache(12 * 60 * 60)) def _get_crash_occurrence_platforms_from_crash_parameters( crash_type, crash_state, security_flag, project_name, lookbehind_days): """Get platforms from crash stats based on crash parameters.""" last_hour = crash_stats.get_last_successful_hour() if not last_hour: # No crash stats available, skip. return [] where_clause = ('crash_type = {crash_type} AND ' 'crash_state = {crash_state} AND ' 'security_flag = {security_flag} AND ' 'project = {project}').format( crash_type=json.dumps(crash_type), crash_state=json.dumps(crash_state), security_flag=json.dumps(security_flag),
revisions_dict = {} for key in src_map: # Only add keys that have both url and rev attributes. if 'url' in src_map[key] and 'rev' in src_map[key]: revisions_dict[key] = { 'name': _get_component_display_name(key, project_name), 'rev': src_map[key]['rev'], 'url': src_map[key]['url'] } return revisions_dict @memoize.wrap(memoize.FifoOnDisk(DISK_CACHE_SIZE)) @memoize.wrap(memoize.Memcache(60 * 60 * 24 * 30)) # 30 day TTL def _git_commit_position_to_git_hash_for_chromium(revision, repository): """Return git hash for a git commit position using cr-rev.appspot.com.""" request_variables = { 'number': revision, 'numbering_identifier': 'refs/heads/master', 'numbering_type': 'COMMIT_POSITION', 'project': 'chromium', 'repo': repository, 'fields': 'git_sha', } query_string = urllib.parse.urlencode(request_variables) query_url = '%s?%s' % (CRREV_NUMBERING_URL, query_string) url_content = _get_url_content(query_url) if url_content is None: return None