class Trial(Model): """Trials for specific binaries.""" # App name that this trial is applied to. E.g. "d8" or "chrome". app_name = ndb.StringProperty() # Chance to select this set of arguments. Zero to one. probability = ndb.FloatProperty() # Additional arguments to apply if selected. app_args = ndb.StringProperty(indexed=False)
class OssFuzzProject(Model): """Represents a project that has been set up for OSS-Fuzz.""" # Name of the project. name = ndb.StringProperty() # Whether or not the project should run on high end hosts. high_end = ndb.BooleanProperty(default=False) # Weight for CPU distribution. This is set by admins. cpu_weight = ndb.FloatProperty(default=1.0) # The disk size to use (overrides the default). disk_size_gb = ndb.IntegerProperty() # Service account for this project. service_account = ndb.StringProperty() # CCs for the project. ccs = ndb.StringProperty(repeated=True)
class FuzzTargetJob(Model): """Mapping between fuzz target and jobs with additional metadata for selection.""" # Fully qualified fuzz target name. fuzz_target_name = StringProperty() # Job this target ran as. job = StringProperty() # Engine this ran as. engine = StringProperty() # Relative frequency with which to select this fuzzer. weight = ndb.FloatProperty(default=1.0) # Approximate last time this target was run. last_run = ndb.DateTimeProperty() def _pre_put_hook(self): """Pre-put hook.""" self.key = ndb.Key(FuzzTargetJob, fuzz_target_job_key(self.fuzz_target_name, self.job))
class Testcase(Model): """Represents a single testcase.""" # Crash on an invalid read/write. crash_type = ndb.StringProperty() # Crashing address. crash_address = ndb.StringProperty(indexed=False) # First x stack frames. crash_state = ndb.StringProperty() # Complete stacktrace. crash_stacktrace = ndb.TextProperty(indexed=False) # Last tested crash stacktrace using the latest revision. last_tested_crash_stacktrace = ndb.TextProperty(indexed=False) # Blobstore keys for various things like original testcase, minimized # testcase, etc. fuzzed_keys = ndb.StringProperty(indexed=False) minimized_keys = ndb.StringProperty(indexed=False) minidump_keys = ndb.StringProperty(indexed=False) # Tracking issue tracker bug. One bug number per line (future extension). bug_information = ndb.StringProperty() # Regression range. regression = ndb.StringProperty(default='') # Revisions where this issue has been fixed. fixed = ndb.StringProperty(default='') # Is it a security bug ? security_flag = ndb.BooleanProperty(default=False) # Security severity of the bug. security_severity = ndb.IntegerProperty(indexed=False) # Did the bug only reproduced once ? one_time_crasher_flag = ndb.BooleanProperty(default=False) # Any additional comments. comments = ndb.TextProperty(default='', indexed=False) # Revision that we discovered the crash in. crash_revision = ndb.IntegerProperty() # The file on the bot that generated the testcase. original_absolute_path = ndb.StringProperty(indexed=False, default='') absolute_path = ndb.StringProperty(indexed=False) # Minimized argument list. minimized_arguments = ndb.TextProperty(default='', indexed=False) # Window argument (usually width, height, top, left, etc). window_argument = ndb.TextProperty(default='', indexed=False) # Type of job associated with this testcase. job_type = ndb.StringProperty() # Original job queue used for tasks created for this testcase. queue = ndb.StringProperty(indexed=False) # State representing whether the fuzzed or minimized testcases are archived. archive_state = ndb.IntegerProperty(default=0, indexed=False) # File name of the original uploaded archive. archive_filename = ndb.StringProperty(indexed=False) # Is this a binary file? binary_flag = ndb.BooleanProperty(default=False, indexed=False) # Timestamp. timestamp = ndb.DateTimeProperty() # Does the testcase crash stack vary b/w crashes ? flaky_stack = ndb.BooleanProperty(default=False, indexed=False) # Do we need to test this testcase using an HTTP/HTTPS server? http_flag = ndb.BooleanProperty(default=False, indexed=False) # Name of the fuzzer used to generate this testcase. fuzzer_name = ndb.StringProperty() # Status of this testcase (pending, processed, unreproducible, etc). status = ndb.StringProperty(default='Processed') # Id of the testcase that this is marked as a duplicate of. duplicate_of = ndb.IntegerProperty(indexed=False) # Flag indicating whether or not the testcase has been symbolized. symbolized = ndb.BooleanProperty(default=False, indexed=False) # Id for this testcase's associated group. group_id = ndb.IntegerProperty(default=0) # Tracking issue tracker bug for this testcase group. group_bug_information = ndb.IntegerProperty(default=0) # Fake user interaction sequences like key clicks, mouse movements, etc. gestures = ndb.StringProperty(repeated=True, indexed=False) # ASAN redzone size in bytes. redzone = ndb.IntegerProperty(default=128, indexed=False) # Whether testcase is open. open = ndb.BooleanProperty(default=True) # Adjusts timeout based on multiplier value. timeout_multiplier = ndb.FloatProperty(default=1.0, indexed=False) # Additional metadata stored as a JSON object. This should be used for # properties that are not commonly accessed and do not need to be indexed. additional_metadata = ndb.TextProperty(indexed=False) # Boolean attribute indicating if cleanup triage needs to be done. triaged = ndb.BooleanProperty(default=False) # Project name associated with this test case. project_name = ndb.StringProperty() # keywords is used for searching. keywords = ndb.StringProperty(repeated=True) # Whether testcase has a bug (either bug_information or # group_bug_information). has_bug_flag = ndb.BooleanProperty() # Indices for bug_information and group_bug_information. bug_indices = ndb.StringProperty(repeated=True) # Overridden fuzzer name because actual fuzzer name can be different in many # scenarios (libfuzzer, afl, etc). overridden_fuzzer_name = ndb.StringProperty() # Platform (e.g. windows, linux, android). platform = ndb.StringProperty() # Platform id (e.g. windows, linux, android:hammerhead:l). # For Android, includes device type and underlying OS version. platform_id = ndb.StringProperty() # Impact indices for searching. impact_indices = ndb.StringProperty(repeated=True) # Whether or not a testcase is a duplicate of other testcase. is_a_duplicate_flag = ndb.BooleanProperty() # Whether or not a testcase is the leader of its group. # If the testcase is not in a group, it's the leader of a group of 1. # The default is false because we prefer not to show crashes until we are # sure. And group_task will correctly set the value within 30 minutes. is_leader = ndb.BooleanProperty(default=False) # Fuzzer name indices fuzzer_name_indices = ndb.StringProperty(repeated=True) # The impacted version indices (including both beta and stable). impact_version_indices = ndb.StringProperty(repeated=True) # The impacted stable version. impact_stable_version = ndb.StringProperty() # The impacted stable version indices. impact_stable_version_indices = ndb.StringProperty(repeated=True) # The impacted stable version is merely probable (not definite). Because # for a non-asan build, we don't have a stable/beta build. Therefore, we # make an intelligent guess on the version. impact_stable_version_likely = ndb.BooleanProperty() # The impacted beta version. impact_beta_version = ndb.StringProperty() # The impacted beta version indices. impact_beta_version_indices = ndb.StringProperty(repeated=True) # The impacted beta version is merely probable (not definite). See the # comment on impact_stable_version_likely. impact_beta_version_likely = ndb.BooleanProperty() # Whether or not impact task has been run on this testcase. is_impact_set_flag = ndb.BooleanProperty() # Code coverage data for the testcase. coverage = ndb.StringProperty() # Uploader email address. uploader_email = ndb.StringProperty() def has_blame(self): return self.project_name == 'chromium' def has_impacts(self): return self.project_name == 'chromium' and not self.one_time_crasher_flag def impacts_production(self): return bool(self.impact_stable_version) or bool( self.impact_beta_version) def is_status_unreproducible(self): return self.status and self.status.startswith('Unreproducible') def is_crash(self): return bool(self.crash_state) def populate_indices(self): """Populate keywords for fast test case list searching.""" self.keywords = list( search_tokenizer.tokenize(self.crash_state) | search_tokenizer.tokenize(self.crash_type) | search_tokenizer.tokenize(self.fuzzer_name) | search_tokenizer.tokenize(self.overridden_fuzzer_name) | search_tokenizer.tokenize(self.job_type) | search_tokenizer.tokenize(self.platform_id)) self.bug_indices = search_tokenizer.tokenize_bug_information(self) self.has_bug_flag = bool(self.bug_indices) self.is_a_duplicate_flag = bool(self.duplicate_of) fuzzer_name_indices = list( set([self.fuzzer_name, self.overridden_fuzzer_name])) self.fuzzer_name_indices = [f for f in fuzzer_name_indices if f] # If the impact task hasn't been run (aka is_impact_set_flag=False) OR # if impact isn't applicable (aka has_impacts() is False), we wipe all # the impact fields' indices. if self.has_impacts() and self.is_impact_set_flag: self.impact_stable_version_indices = ( search_tokenizer.tokenize_impact_version( self.impact_stable_version)) self.impact_beta_version_indices = ( search_tokenizer.tokenize_impact_version( self.impact_beta_version)) self.impact_version_indices = list( set(self.impact_stable_version_indices + self.impact_beta_version_indices)) if self.impact_beta_version: self.impact_version_indices.append('beta') if self.impact_stable_version: self.impact_version_indices.append('stable') if not self.impacts_production(): self.impact_version_indices.append('head') else: self.impact_version_indices = [] self.impact_stable_version_indices = [] self.impact_beta_version_indices = [] def _pre_put_hook(self): self.populate_indices() def _post_put_hook(self, _): logs.log('Updated testcase %d (bug %s).' % (self.key.id(), self.bug_information or '-')) def set_impacts_as_na(self): self.impact_stable_version = self.impact_beta_version = None self.impact_stable_version_likely = self.impact_beta_version_likely = False self.is_impact_set_flag = False def _ensure_metadata_is_cached(self): """Ensure that the metadata for this has been cached.""" if hasattr(self, 'metadata_cache'): return try: cache = json_utils.loads(self.additional_metadata) except (TypeError, ValueError): cache = {} setattr(self, 'metadata_cache', cache) def get_metadata(self, key=None, default=None): """Get metadata for a test case. Slow on first access.""" self._ensure_metadata_is_cached() # If no key is specified, return all metadata. if not key: return self.metadata_cache try: return self.metadata_cache[key] except KeyError: return default def set_metadata(self, key, value, update_testcase=True): """Set metadata for a test case.""" self._ensure_metadata_is_cached() self.metadata_cache[key] = value self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def delete_metadata(self, key, update_testcase=True): """Remove metadata key for a test case.""" self._ensure_metadata_is_cached() # Make sure that the key exists in cache. If not, no work to do here. if key not in self.metadata_cache: return del self.metadata_cache[key] self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def actual_fuzzer_name(self): """Actual fuzzer name, uses one from overridden attribute if available.""" return self.overridden_fuzzer_name or self.fuzzer_name def get_fuzz_target(self): """Get the associated FuzzTarget entity for this test case.""" name = self.actual_fuzzer_name() if not name: return None return ndb.Key(FuzzTarget, name).get()
class FuzzerJob(Model): """Mapping between a fuzzer and job with additional metadata for selection.""" fuzzer = ndb.StringProperty() job = ndb.StringProperty() platform = ndb.StringProperty() weight = ndb.FloatProperty(default=1.0)
class FuzzStrategyProbability(Model): """Mapping between fuzz strategies and probabilities with which they should be selected.""" strategy_name = ndb.StringProperty() probability = ndb.FloatProperty()