class BundledArchiveMetadata(Model): """Metadata needed for multiple test cases uploaded in an archive.""" # Blobstore key of the archive. blobstore_key = ndb.StringProperty() # Timeout in seconds for each testcase in the bundle. timeout = ndb.IntegerProperty() # Job queue for the analyze tasks created for this bundle. job_queue = ndb.StringProperty() # Job type that should be used for all testcases in this bundle. job_type = ndb.StringProperty() # Flag indicating whether or not these testcases need http. http_flag = ndb.BooleanProperty() # File name of the uploaded archive. archive_filename = ndb.StringProperty() # Email address of the uploader of the archive. uploader_email = ndb.StringProperty() # Fake user interaction sequences like key clicks, mouse movements, etc. gestures = ndb.StringProperty(repeated=True) # Optional. Revision that we discovered the crash in. crash_revision = ndb.IntegerProperty() # Optional. Additional arguments. additional_arguments = ndb.StringProperty() # Optional. Bug information. bug_information = ndb.StringProperty() # Optional. Platform id, e.g. android:shamu. platform_id = ndb.StringProperty() # Optional. App launch command. e.g. shell am start ... app_launch_command = ndb.StringProperty() # Fuzzer name. fuzzer_name = ndb.StringProperty() # Overridden fuzzer name because actual fuzzer name can be different in many # scenarios (libfuzzer, afl, etc). overridden_fuzzer_name = ndb.StringProperty() # Binary name for fuzz target (only applicable to libFuzzer, AFL). fuzzer_binary_name = ndb.StringProperty()
class CoverageInformation(Model): """Coverage info.""" date = ndb.DateProperty(auto_now_add=True) fuzzer = StringProperty() # Function coverage information. functions_covered = ndb.IntegerProperty() functions_total = ndb.IntegerProperty() # Edge coverage information. edges_covered = ndb.IntegerProperty() edges_total = ndb.IntegerProperty() # Corpus size information. corpus_size_units = ndb.IntegerProperty() corpus_size_bytes = ndb.IntegerProperty() corpus_location = StringProperty() # Corpus backup information. corpus_backup_location = StringProperty() # Quarantine size information. quarantine_size_units = ndb.IntegerProperty() quarantine_size_bytes = ndb.IntegerProperty() quarantine_location = StringProperty() # Link to the HTML report. html_report_url = StringProperty() def _pre_put_hook(self): """Pre-put hook.""" self.key = ndb.Key(CoverageInformation, coverage_information_key(self.fuzzer, self.date))
class TestcaseUploadMetadata(Model): """Metadata associated with a user uploaded test case.""" # Timestamp. timestamp = ndb.DateTimeProperty() # Testcase filename. filename = ndb.StringProperty() # Current status of the testcase. status = ndb.StringProperty() # Uploader email address. uploader_email = ndb.StringProperty() # Name of the bot that ran analyze on this testcase. bot_name = ndb.StringProperty() # Id of the associated testcase. testcase_id = ndb.IntegerProperty() # Id of the testcase that this is marked as a duplicate of. duplicate_of = ndb.IntegerProperty() # Blobstore key for the testcase associated with this object. blobstore_key = ndb.StringProperty() # Testcase timeout. timeout = ndb.IntegerProperty() # Is this a single testcase bundled in an archive? bundled = ndb.BooleanProperty() # Path to the file in the archive. path_in_archive = ndb.TextProperty() # Original blobstore key for this object (used for archives). original_blobstore_key = ndb.StringProperty() # Security flag. security_flag = ndb.BooleanProperty(default=False) # Number of retries for this testcase. retries = ndb.IntegerProperty() # Flag to indicate where bug title should be updated or not. bug_summary_update_flag = ndb.BooleanProperty() # Flag to indicate if we are running in quiet mode (e.g. bug updates). quiet_flag = ndb.BooleanProperty()
class ExternalUserPermission(Model): """Permissions for external users.""" # Email user is authenticated as. email = ndb.StringProperty() # Type of |entity_name|. Can be one of the values of PermissionEntityKind. entity_kind = ndb.IntegerProperty() # Name of the entity that user is allowed to view. entity_name = ndb.StringProperty() # Whether or not |allowed_name| is a prefix. is_prefix = ndb.BooleanProperty(default=False) # Auto CC type. auto_cc = ndb.IntegerProperty()
class ReportMetadata(Model): """Metadata associated with a crash report.""" # Job type from testcase. job_type = ndb.StringProperty() # Revision of build from report. crash_revision = ndb.IntegerProperty(default=-1) # Has this report been successfully uploaded? is_uploaded = ndb.BooleanProperty(default=False) # Product. product = ndb.StringProperty(default='') # Version. version = ndb.StringProperty(default='', indexed=False) # Key to minidump previously written to blobstore. minidump_key = ndb.StringProperty(default='', indexed=False) # Processed crash bytes. serialized_crash_stack_frames = ndb.BlobProperty(default='', indexed=False) # Id of the associated testcase. testcase_id = ndb.StringProperty(default='') # Id of the associated bot. bot_id = ndb.StringProperty(default='', indexed=False) # Optional upload params, stored as a JSON object. optional_params = ndb.TextProperty(indexed=False) # Report id from crash/. crash_report_id = ndb.StringProperty()
class Notification(Model): """Tracks whether or not an email has been sent to a user for a test case.""" # Testcase id associated with this notification. testcase_id = ndb.IntegerProperty() # User that this notification was sent to. user_email = ndb.StringProperty()
class LockStatShard(Model): """Lock statistics shard.""" # The number of successful acquires. acquires = ndb.IntegerProperty(default=0) # The number of lock acquire bails. bails = ndb.IntegerProperty(default=0) # The number of acquire failures. failed_acquires = ndb.IntegerProperty(default=0) # The number of times the lock was detected to be lost because the holder ran # out of time. lost = ndb.IntegerProperty(default=0) # Total wait time over all successful acquires. wait_time = ndb.IntegerProperty(default=0)
class Blacklist(Model): """Represents global blacklist to track entries for suppressions files.""" # Function name. function_name = ndb.StringProperty() # Tool name. tool_name = ndb.StringProperty() # Testcase ID. testcase_id = ndb.IntegerProperty()
class ClusterInfo(Model): """Cpu allocation information for a project in a zone.""" # The cluster for the CPU allocation. cluster = ndb.StringProperty() # The number of allocated CPUs in this cluster. cpu_count = ndb.IntegerProperty(default=0) # The GCE zone for this cluster. gce_zone = ndb.StringProperty()
class HostWorkerAssignment(Model): """Host worker assignment information.""" # The host instance name. host_name = ndb.StringProperty() # The instance number (0 to WORKERS_PER_HOST - 1). instance_num = ndb.IntegerProperty() # The worker instance name. worker_name = ndb.StringProperty() # The project name. project_name = ndb.StringProperty()
class OssFuzzBuildFailure(Model): """Represents build failure.""" # Project name. project_name = ndb.StringProperty() # The monorail issue ID for the failure. issue_id = ndb.StringProperty() # The last timestamp of the build. last_checked_timestamp = ndb.DateTimeProperty() # Number of consecutive failures. consecutive_failures = ndb.IntegerProperty(default=0) # Build type (fuzzing, coverage, etc). build_type = ndb.StringProperty()
class OssFuzzProject(Model): """Represents a project that has been set up for OSS-Fuzz.""" # Name of the project. name = ndb.StringProperty() # Whether or not the project should run on high end hosts. high_end = ndb.BooleanProperty(default=False) # Weight for CPU distribution. This is set by admins. cpu_weight = ndb.FloatProperty(default=1.0) # The disk size to use (overrides the default). disk_size_gb = ndb.IntegerProperty() # Service account for this project. service_account = ndb.StringProperty() # CCs for the project. ccs = ndb.StringProperty(repeated=True)
class BlobInfo(data_types.Model): """Legacy BlobInfo.""" content_type = ndb.StringProperty() creation = ndb.DateTimeProperty() filename = ndb.StringProperty() gs_object_name = ndb.StringProperty() md5_hash = ndb.StringProperty() size = ndb.IntegerProperty() upload_id = ndb.StringProperty() @classmethod def _get_kind(cls): if environment.get_value('DATASTORE_EMULATOR_HOST'): # Datastore emulator does not allow writing entities with names of the # format "__*__". cls._kind_map['_BlobInfo_'] = cls return '_BlobInfo_' return '__BlobInfo__'
class BuildMetadata(Model): """Metadata associated with a particular archived build.""" # Job type that this build belongs to. job_type = ndb.StringProperty() # Revision of the build. revision = ndb.IntegerProperty() # Good build or bad build. bad_build = ndb.BooleanProperty(default=False) # Stdout and stderr. console_output = ndb.TextProperty() # Bot name. bot_name = ndb.StringProperty() # Symbol data. symbols = ndb.StringProperty() # Creation timestamp. timestamp = ndb.DateTimeProperty()
class DummyEntity(ndb.Model): string_property = ndb.StringProperty() datetime_property = ndb.DateTimeProperty() integer_property = ndb.IntegerProperty()
class Job(Model): """Definition of a job type used by the bots.""" VALID_NAME_REGEX = NAME_CHECK_REGEX # Job type name. name = ndb.StringProperty() # Job environment string. environment_string = ndb.TextProperty() # The platform that this job can run on. platform = ndb.StringProperty() # Blobstore key of the custom binary for this job. custom_binary_key = ndb.StringProperty() # Filename for the custom binary. custom_binary_filename = ndb.StringProperty() # Revision of the custom binary. custom_binary_revision = ndb.IntegerProperty() # Description of the job. description = ndb.TextProperty() # Template to use, if any. templates = ndb.StringProperty(repeated=True) def get_environment(self): """Get the environment as a dict for this job, including any environment variables in its template.""" if not self.templates: return environment.parse_environment_definition( self.environment_string) job_environment = {} for template_name in self.templates: template = JobTemplate.query( JobTemplate.name == template_name).get() if not template: continue template_environment = environment.parse_environment_definition( template.environment_string) job_environment.update(template_environment) environment_overrides = environment.parse_environment_definition( self.environment_string) job_environment.update(environment_overrides) return job_environment def get_environment_string(self): """Get the environment string for this job, including any environment variables in its template. Avoid using this if possible.""" environment_string = '' job_environment = self.get_environment() for key, value in six.iteritems(job_environment): environment_string += '%s = %s\n' % (key, value) return environment_string
class Testcase(Model): """Represents a single testcase.""" # Crash on an invalid read/write. crash_type = ndb.StringProperty() # Crashing address. crash_address = ndb.StringProperty(indexed=False) # First x stack frames. crash_state = ndb.StringProperty() # Complete stacktrace. crash_stacktrace = ndb.TextProperty(indexed=False) # Last tested crash stacktrace using the latest revision. last_tested_crash_stacktrace = ndb.TextProperty(indexed=False) # Blobstore keys for various things like original testcase, minimized # testcase, etc. fuzzed_keys = ndb.StringProperty(indexed=False) minimized_keys = ndb.StringProperty(indexed=False) minidump_keys = ndb.StringProperty(indexed=False) # Tracking issue tracker bug. One bug number per line (future extension). bug_information = ndb.StringProperty() # Regression range. regression = ndb.StringProperty(default='') # Revisions where this issue has been fixed. fixed = ndb.StringProperty(default='') # Is it a security bug ? security_flag = ndb.BooleanProperty(default=False) # Security severity of the bug. security_severity = ndb.IntegerProperty(indexed=False) # Did the bug only reproduced once ? one_time_crasher_flag = ndb.BooleanProperty(default=False) # Any additional comments. comments = ndb.TextProperty(default='', indexed=False) # Revision that we discovered the crash in. crash_revision = ndb.IntegerProperty() # The file on the bot that generated the testcase. original_absolute_path = ndb.StringProperty(indexed=False, default='') absolute_path = ndb.StringProperty(indexed=False) # Minimized argument list. minimized_arguments = ndb.TextProperty(default='', indexed=False) # Window argument (usually width, height, top, left, etc). window_argument = ndb.TextProperty(default='', indexed=False) # Type of job associated with this testcase. job_type = ndb.StringProperty() # Original job queue used for tasks created for this testcase. queue = ndb.StringProperty(indexed=False) # State representing whether the fuzzed or minimized testcases are archived. archive_state = ndb.IntegerProperty(default=0, indexed=False) # File name of the original uploaded archive. archive_filename = ndb.StringProperty(indexed=False) # Is this a binary file? binary_flag = ndb.BooleanProperty(default=False, indexed=False) # Timestamp. timestamp = ndb.DateTimeProperty() # Does the testcase crash stack vary b/w crashes ? flaky_stack = ndb.BooleanProperty(default=False, indexed=False) # Do we need to test this testcase using an HTTP/HTTPS server? http_flag = ndb.BooleanProperty(default=False, indexed=False) # Name of the fuzzer used to generate this testcase. fuzzer_name = ndb.StringProperty() # Status of this testcase (pending, processed, unreproducible, etc). status = ndb.StringProperty(default='Processed') # Id of the testcase that this is marked as a duplicate of. duplicate_of = ndb.IntegerProperty(indexed=False) # Flag indicating whether or not the testcase has been symbolized. symbolized = ndb.BooleanProperty(default=False, indexed=False) # Id for this testcase's associated group. group_id = ndb.IntegerProperty(default=0) # Tracking issue tracker bug for this testcase group. group_bug_information = ndb.IntegerProperty(default=0) # Fake user interaction sequences like key clicks, mouse movements, etc. gestures = ndb.StringProperty(repeated=True, indexed=False) # ASAN redzone size in bytes. redzone = ndb.IntegerProperty(default=128, indexed=False) # Whether testcase is open. open = ndb.BooleanProperty(default=True) # Adjusts timeout based on multiplier value. timeout_multiplier = ndb.FloatProperty(default=1.0, indexed=False) # Additional metadata stored as a JSON object. This should be used for # properties that are not commonly accessed and do not need to be indexed. additional_metadata = ndb.TextProperty(indexed=False) # Boolean attribute indicating if cleanup triage needs to be done. triaged = ndb.BooleanProperty(default=False) # Project name associated with this test case. project_name = ndb.StringProperty() # keywords is used for searching. keywords = ndb.StringProperty(repeated=True) # Whether testcase has a bug (either bug_information or # group_bug_information). has_bug_flag = ndb.BooleanProperty() # Indices for bug_information and group_bug_information. bug_indices = ndb.StringProperty(repeated=True) # Overridden fuzzer name because actual fuzzer name can be different in many # scenarios (libfuzzer, afl, etc). overridden_fuzzer_name = ndb.StringProperty() # Platform (e.g. windows, linux, android). platform = ndb.StringProperty() # Platform id (e.g. windows, linux, android:hammerhead:l). # For Android, includes device type and underlying OS version. platform_id = ndb.StringProperty() # Impact indices for searching. impact_indices = ndb.StringProperty(repeated=True) # Whether or not a testcase is a duplicate of other testcase. is_a_duplicate_flag = ndb.BooleanProperty() # Whether or not a testcase is the leader of its group. # If the testcase is not in a group, it's the leader of a group of 1. # The default is false because we prefer not to show crashes until we are # sure. And group_task will correctly set the value within 30 minutes. is_leader = ndb.BooleanProperty(default=False) # Fuzzer name indices fuzzer_name_indices = ndb.StringProperty(repeated=True) # The impacted version indices (including both beta and stable). impact_version_indices = ndb.StringProperty(repeated=True) # The impacted stable version. impact_stable_version = ndb.StringProperty() # The impacted stable version indices. impact_stable_version_indices = ndb.StringProperty(repeated=True) # The impacted stable version is merely probable (not definite). Because # for a non-asan build, we don't have a stable/beta build. Therefore, we # make an intelligent guess on the version. impact_stable_version_likely = ndb.BooleanProperty() # The impacted beta version. impact_beta_version = ndb.StringProperty() # The impacted beta version indices. impact_beta_version_indices = ndb.StringProperty(repeated=True) # The impacted beta version is merely probable (not definite). See the # comment on impact_stable_version_likely. impact_beta_version_likely = ndb.BooleanProperty() # Whether or not impact task has been run on this testcase. is_impact_set_flag = ndb.BooleanProperty() # Code coverage data for the testcase. coverage = ndb.StringProperty() # Uploader email address. uploader_email = ndb.StringProperty() def has_blame(self): return self.project_name == 'chromium' def has_impacts(self): return self.project_name == 'chromium' and not self.one_time_crasher_flag def impacts_production(self): return bool(self.impact_stable_version) or bool( self.impact_beta_version) def is_status_unreproducible(self): return self.status and self.status.startswith('Unreproducible') def is_crash(self): return bool(self.crash_state) def populate_indices(self): """Populate keywords for fast test case list searching.""" self.keywords = list( search_tokenizer.tokenize(self.crash_state) | search_tokenizer.tokenize(self.crash_type) | search_tokenizer.tokenize(self.fuzzer_name) | search_tokenizer.tokenize(self.overridden_fuzzer_name) | search_tokenizer.tokenize(self.job_type) | search_tokenizer.tokenize(self.platform_id)) self.bug_indices = search_tokenizer.tokenize_bug_information(self) self.has_bug_flag = bool(self.bug_indices) self.is_a_duplicate_flag = bool(self.duplicate_of) fuzzer_name_indices = list( set([self.fuzzer_name, self.overridden_fuzzer_name])) self.fuzzer_name_indices = [f for f in fuzzer_name_indices if f] # If the impact task hasn't been run (aka is_impact_set_flag=False) OR # if impact isn't applicable (aka has_impacts() is False), we wipe all # the impact fields' indices. if self.has_impacts() and self.is_impact_set_flag: self.impact_stable_version_indices = ( search_tokenizer.tokenize_impact_version( self.impact_stable_version)) self.impact_beta_version_indices = ( search_tokenizer.tokenize_impact_version( self.impact_beta_version)) self.impact_version_indices = list( set(self.impact_stable_version_indices + self.impact_beta_version_indices)) if self.impact_beta_version: self.impact_version_indices.append('beta') if self.impact_stable_version: self.impact_version_indices.append('stable') if not self.impacts_production(): self.impact_version_indices.append('head') else: self.impact_version_indices = [] self.impact_stable_version_indices = [] self.impact_beta_version_indices = [] def _pre_put_hook(self): self.populate_indices() def _post_put_hook(self, _): logs.log('Updated testcase %d (bug %s).' % (self.key.id(), self.bug_information or '-')) def set_impacts_as_na(self): self.impact_stable_version = self.impact_beta_version = None self.impact_stable_version_likely = self.impact_beta_version_likely = False self.is_impact_set_flag = False def _ensure_metadata_is_cached(self): """Ensure that the metadata for this has been cached.""" if hasattr(self, 'metadata_cache'): return try: cache = json_utils.loads(self.additional_metadata) except (TypeError, ValueError): cache = {} setattr(self, 'metadata_cache', cache) def get_metadata(self, key=None, default=None): """Get metadata for a test case. Slow on first access.""" self._ensure_metadata_is_cached() # If no key is specified, return all metadata. if not key: return self.metadata_cache try: return self.metadata_cache[key] except KeyError: return default def set_metadata(self, key, value, update_testcase=True): """Set metadata for a test case.""" self._ensure_metadata_is_cached() self.metadata_cache[key] = value self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def delete_metadata(self, key, update_testcase=True): """Remove metadata key for a test case.""" self._ensure_metadata_is_cached() # Make sure that the key exists in cache. If not, no work to do here. if key not in self.metadata_cache: return del self.metadata_cache[key] self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def actual_fuzzer_name(self): """Actual fuzzer name, uses one from overridden attribute if available.""" return self.overridden_fuzzer_name or self.fuzzer_name def get_fuzz_target(self): """Get the associated FuzzTarget entity for this test case.""" name = self.actual_fuzzer_name() if not name: return None return ndb.Key(FuzzTarget, name).get()
class BuildCrashStatsJobHistory(Model): """Represents the record of build_crash_stats run.""" # End time in hours from epoch, inclusively. end_time_in_hours = ndb.IntegerProperty()
class Fuzzer(Model): """Represents a fuzzer.""" VALID_NAME_REGEX = NAME_CHECK_REGEX # Last update time. timestamp = ndb.DateTimeProperty() # Fuzzer Name. name = ndb.StringProperty() # The name of the archive that the user uploaded. filename = ndb.StringProperty() # Blobstore key for this fuzzer. blobstore_key = ndb.StringProperty() # String representation of the file size. file_size = ndb.StringProperty() # Fuzzer's main executable path, relative to root. executable_path = ndb.StringProperty() # Revision number of the fuzzer. revision = ndb.IntegerProperty() # Fuzzer's source (for accountability). source = ndb.StringProperty() # Testcase timeout. timeout = ndb.IntegerProperty() # Supported platforms. supported_platforms = ndb.StringProperty() # Custom script that should be used to launch chrome for this fuzzer. launcher_script = ndb.StringProperty() # Result from the last fuzzer run showing the number of testcases generated. result = ndb.StringProperty() # Last result update timestamp. result_timestamp = ndb.DateTimeProperty() # Console output from last fuzzer run. console_output = ndb.TextProperty() # Return code from last fuzzer run. return_code = ndb.IntegerProperty() # Blobstore key for the sample testcase generated by the fuzzer. sample_testcase = ndb.StringProperty() # Job types for this fuzzer. jobs = ndb.StringProperty(repeated=True) # Is the fuzzer coming from an external contributor ? Useful for adding # reward flags. external_contribution = ndb.BooleanProperty(default=False) # Max testcases to generate for this fuzzer. max_testcases = ndb.IntegerProperty() # Does it run un-trusted content ? Examples including running live sites. untrusted_content = ndb.BooleanProperty(default=False) # Data bundle name. data_bundle_name = ndb.StringProperty(default='') # Additional environment variables that need to be set for this fuzzer. additional_environment_string = ndb.TextProperty() # Column specification for stats. stats_columns = ndb.StringProperty(indexed=False) # Helpful descriptions for the stats_columns. In a yaml format. stats_column_descriptions = ndb.TextProperty(indexed=False) # Whether this is a builtin fuzzer. builtin = ndb.BooleanProperty(indexed=False, default=False) # Whether this is a differential fuzzer. differential = ndb.BooleanProperty(default=False)
class FuzzTargetsCount(Model): """Fuzz targets count for every job. Key IDs are the job name.""" count = ndb.IntegerProperty(indexed=False)