class FuzzerJob(Model): """Mapping between a fuzzer and job with additional metadata for selection.""" fuzzer = StringProperty() job = StringProperty() platform = StringProperty() weight = ndb.FloatProperty(default=1.0) multiplier = ndb.FloatProperty(default=1.0) @property def actual_weight(self): """Get the actual weight for this job.""" return self.weight * self.multiplier
class FuzzStrategyProbability(Model): """Mapping between fuzz strategies and probabilities with which they should be selected.""" strategy_name = StringProperty() probability = ndb.FloatProperty() engine = StringProperty()
class Trial(Model): """Trials for specific binaries.""" # App name that this trial is applied to. E.g. "d8" or "chrome". app_name = StringProperty() # Chance to select this set of arguments. Zero to one. probability = ndb.FloatProperty() # Additional arguments to apply if selected. app_args = TextProperty()
class ItemNdb(ndb.Model): # id: uuid4 TID = uuid.UUID name: str = ndb.StringProperty(required=True) price: float = ndb.FloatProperty(required=True) is_offer: typing.Optional[bool] = ndb.BooleanProperty(required=False) created_at: typing.Optional[datetime.datetime] = ndb.DateTimeProperty( auto_now_add=True) @classmethod def generate_id(cls) -> str: return str(uuid.uuid4()) @classmethod def get_by_id(cls, id_, *args, **kwargs) -> ItemNdb: return super().get_by_id(str(id_), *args, **kwargs)
class OssFuzzProject(Model): """Represents a project that has been set up for OSS-Fuzz.""" # Name of the project. name = StringProperty() # Whether or not the project should run on high end hosts. high_end = ndb.BooleanProperty(default=False) # Weight for CPU distribution. This is set by admins. cpu_weight = ndb.FloatProperty(default=1.0) # The disk size to use (overrides the default). disk_size_gb = ndb.IntegerProperty() # Service account for this project. service_account = StringProperty() # CCs for the project. ccs = StringProperty(repeated=True)
class FuzzTargetJob(Model): """Mapping between fuzz target and jobs with additional metadata for selection.""" # Fully qualified fuzz target name. fuzz_target_name = StringProperty() # Job this target ran as. job = StringProperty() # Engine this ran as. engine = StringProperty() # Relative frequency with which to select this fuzzer. weight = ndb.FloatProperty(default=1.0) # Approximate last time this target was run. last_run = ndb.DateTimeProperty() def _pre_put_hook(self): """Pre-put hook.""" self.key = ndb.Key(FuzzTargetJob, fuzz_target_job_key(self.fuzz_target_name, self.job))
class PaymentCoupons(ndb.Model): coupon = ndb.StringProperty() expiry_date = ndb.DateTimeProperty() consumed = ndb.BooleanProperty(default=False) type = ndb.IntegerProperty(default=0) # 0 percentage, 1 fixed amount amount = ndb.FloatProperty(default=0.0) added_date_time = ndb.DateTimeProperty(auto_now_add=True) @classmethod def generate_coupons(cls, request): number_of_coupons = int(request.data['number_of_coupons']) type = int(request.data['type']) amount = int(request.data['amount']) expiry_date = datetime.datetime.strptime(request.data['expiry_date'], '%Y-%m-%d %H:%M:%S.%f') # 2018-06-29 08:15:27.243860 import secrets for i in range(number_of_coupons): coupons = cls(parent=ancestor_key_coupons) coupons.coupon = secrets.token_hex(5) coupons.amount = amount coupons.type = type coupons.expiry_date = expiry_date coupons.put() return True @classmethod def get_details(cls, request): return cls.query(cls.coupon == request.data['coupon'], ancestor=ancestor_key_coupons).get() @classmethod def consume_coupon(cls, request): coupon = cls.query(cls.coupon == request.data['coupon'], ancestor=ancestor_key_coupons).get() if coupon: coupon.consumed = True coupon.put() return coupon return False
class Report(ndb.Model): """Defines a report for a given site.""" site = ndb.KeyProperty(kind=Site) created_on = ndb.DateTimeProperty(auto_now_add=True) accessibility_score = ndb.FloatProperty() best_practices_score = ndb.FloatProperty() desktop_performance_score = ndb.FloatProperty() mobile_performance_score = ndb.FloatProperty() pwa_score = ndb.FloatProperty() seo_score = ndb.FloatProperty() @classmethod def to_dict(cls, report): return { 'accessibility_score': report.accessibility_score, 'best_practices_score': report.best_practices_score, 'desktop_performance_score': report.desktop_performance_score, 'mobile_performance_score': report.mobile_performance_score, 'seo_score': report.seo_score, 'pwa_score': report.pwa_score, }
class Testcase(Model): """Represents a single testcase.""" # Crash on an invalid read/write. crash_type = StringProperty() # Crashing address. crash_address = TextProperty() # First x stack frames. crash_state = StringProperty() # Complete stacktrace. crash_stacktrace = TextProperty(indexed=False) # Last tested crash stacktrace using the latest revision. last_tested_crash_stacktrace = TextProperty(indexed=False) # Blobstore keys for various things like original testcase, minimized # testcase, etc. fuzzed_keys = TextProperty() minimized_keys = TextProperty() minidump_keys = TextProperty() # Tracking issue tracker bug. One bug number per line (future extension). bug_information = StringProperty() # Regression range. regression = StringProperty(default='') # Revisions where this issue has been fixed. fixed = StringProperty(default='') # Is it a security bug ? security_flag = ndb.BooleanProperty(default=False) # Security severity of the bug. security_severity = ndb.IntegerProperty(indexed=False) # Did the bug only reproduced once ? one_time_crasher_flag = ndb.BooleanProperty(default=False) # Any additional comments. comments = TextProperty(default='', indexed=False) # Revision that we discovered the crash in. crash_revision = ndb.IntegerProperty() # The file on the bot that generated the testcase. original_absolute_path = TextProperty(default='') absolute_path = TextProperty() # Minimized argument list. minimized_arguments = TextProperty(default='', indexed=False) # Window argument (usually width, height, top, left, etc). window_argument = TextProperty(default='', indexed=False) # Type of job associated with this testcase. job_type = StringProperty() # Original job queue used for tasks created for this testcase. queue = TextProperty() # State representing whether the fuzzed or minimized testcases are archived. archive_state = ndb.IntegerProperty(default=0, indexed=False) # File name of the original uploaded archive. archive_filename = TextProperty() # Is this a binary file? binary_flag = ndb.BooleanProperty(default=False, indexed=False) # Timestamp. timestamp = ndb.DateTimeProperty() # Does the testcase crash stack vary b/w crashes ? flaky_stack = ndb.BooleanProperty(default=False, indexed=False) # Do we need to test this testcase using an HTTP/HTTPS server? http_flag = ndb.BooleanProperty(default=False, indexed=False) # Name of the fuzzer used to generate this testcase. fuzzer_name = StringProperty() # Status of this testcase (pending, processed, unreproducible, etc). status = StringProperty(default='Processed') # Id of the testcase that this is marked as a duplicate of. duplicate_of = ndb.IntegerProperty(indexed=False) # Flag indicating whether or not the testcase has been symbolized. symbolized = ndb.BooleanProperty(default=False, indexed=False) # Id for this testcase's associated group. group_id = ndb.IntegerProperty(default=0) # Tracking issue tracker bug for this testcase group. group_bug_information = ndb.IntegerProperty(default=0) # Fake user interaction sequences like key clicks, mouse movements, etc. gestures = TextProperty(repeated=True) # ASAN redzone size in bytes. redzone = ndb.IntegerProperty(default=128, indexed=False) # Flag indicating if UBSan detection should be disabled. This is needed for # cases when ASan and UBSan are bundled in the same build configuration # and we need to disable UBSan in some runs to find the potentially more # interesting ASan bugs. disable_ubsan = ndb.BooleanProperty(default=False) # Whether testcase is open. open = ndb.BooleanProperty(default=True) # Adjusts timeout based on multiplier value. timeout_multiplier = ndb.FloatProperty(default=1.0, indexed=False) # Additional metadata stored as a JSON object. This should be used for # properties that are not commonly accessed and do not need to be indexed. additional_metadata = TextProperty(indexed=False) # Boolean attribute indicating if cleanup triage needs to be done. triaged = ndb.BooleanProperty(default=False) # Project name associated with this test case. project_name = StringProperty() # keywords is used for searching. keywords = StringProperty(repeated=True) # Whether testcase has a bug (either bug_information or # group_bug_information). has_bug_flag = ndb.BooleanProperty() # Indices for bug_information and group_bug_information. bug_indices = StringProperty(repeated=True) # Overridden fuzzer name because actual fuzzer name can be different in many # scenarios (libfuzzer, afl, etc). overridden_fuzzer_name = StringProperty() # Platform (e.g. windows, linux, android). platform = StringProperty() # Platform id (e.g. windows, linux, android:hammerhead:l). # For Android, includes device type and underlying OS version. platform_id = StringProperty() # Impact indices for searching. impact_indices = StringProperty(repeated=True) # Whether or not a testcase is a duplicate of other testcase. is_a_duplicate_flag = ndb.BooleanProperty() # Whether or not a testcase is the leader of its group. # If the testcase is not in a group, it's the leader of a group of 1. # The default is false because we prefer not to show crashes until we are # sure. And group_task will correctly set the value within 30 minutes. is_leader = ndb.BooleanProperty(default=False) # Fuzzer name indices fuzzer_name_indices = StringProperty(repeated=True) # The impacted version indices (including both beta and stable). impact_version_indices = StringProperty(repeated=True) # The impacted stable version. impact_stable_version = StringProperty() # The impacted stable version indices. impact_stable_version_indices = StringProperty(repeated=True) # The impacted stable version is merely probable (not definite). Because # for a non-asan build, we don't have a stable/beta build. Therefore, we # make an intelligent guess on the version. impact_stable_version_likely = ndb.BooleanProperty() # The impacted beta version. impact_beta_version = StringProperty() # The impacted beta version indices. impact_beta_version_indices = StringProperty(repeated=True) # The impacted beta version is merely probable (not definite). See the # comment on impact_stable_version_likely. impact_beta_version_likely = ndb.BooleanProperty() # Whether or not impact task has been run on this testcase. is_impact_set_flag = ndb.BooleanProperty() # Uploader email address. uploader_email = StringProperty() def has_blame(self): return self.project_name == 'chromium' def has_impacts(self): return self.project_name == 'chromium' and not self.one_time_crasher_flag def impacts_production(self): return bool(self.impact_stable_version) or bool(self.impact_beta_version) def is_status_unreproducible(self): return self.status and self.status.startswith('Unreproducible') def is_crash(self): return bool(self.crash_state) def populate_indices(self): """Populate keywords for fast test case list searching.""" self.keywords = list( search_tokenizer.tokenize(self.crash_state) | search_tokenizer.tokenize(self.crash_type) | search_tokenizer.tokenize(self.fuzzer_name) | search_tokenizer.tokenize(self.overridden_fuzzer_name) | search_tokenizer.tokenize(self.job_type) | search_tokenizer.tokenize(self.platform_id)) self.bug_indices = search_tokenizer.tokenize_bug_information(self) self.has_bug_flag = bool(self.bug_indices) self.is_a_duplicate_flag = bool(self.duplicate_of) fuzzer_name_indices = list( set([self.fuzzer_name, self.overridden_fuzzer_name])) self.fuzzer_name_indices = [f for f in fuzzer_name_indices if f] # If the impact task hasn't been run (aka is_impact_set_flag=False) OR # if impact isn't applicable (aka has_impacts() is False), we wipe all # the impact fields' indices. if self.has_impacts() and self.is_impact_set_flag: self.impact_stable_version_indices = ( search_tokenizer.tokenize_impact_version(self.impact_stable_version)) self.impact_beta_version_indices = ( search_tokenizer.tokenize_impact_version(self.impact_beta_version)) self.impact_version_indices = list( set(self.impact_stable_version_indices + self.impact_beta_version_indices)) if self.impact_beta_version: self.impact_version_indices.append('beta') if self.impact_stable_version: self.impact_version_indices.append('stable') if not self.impacts_production(): self.impact_version_indices.append('head') else: self.impact_version_indices = [] self.impact_stable_version_indices = [] self.impact_beta_version_indices = [] def _pre_put_hook(self): self.populate_indices() def _post_put_hook(self, _): if not self.key: # Failed put. An exception will be thrown automatically afterwards. return logs.log('Updated testcase %d (bug %s).' % (self.key.id(), self.bug_information or '-')) def set_impacts_as_na(self): self.impact_stable_version = self.impact_beta_version = None self.impact_stable_version_likely = self.impact_beta_version_likely = False self.is_impact_set_flag = False def _ensure_metadata_is_cached(self): """Ensure that the metadata for this has been cached.""" if hasattr(self, 'metadata_cache'): return try: cache = json_utils.loads(self.additional_metadata) except (TypeError, ValueError): cache = {} setattr(self, 'metadata_cache', cache) def get_metadata(self, key=None, default=None): """Get metadata for a test case. Slow on first access.""" self._ensure_metadata_is_cached() # If no key is specified, return all metadata. if not key: return self.metadata_cache try: return self.metadata_cache[key] except KeyError: return default def set_metadata(self, key, value, update_testcase=True): """Set metadata for a test case.""" self._ensure_metadata_is_cached() self.metadata_cache[key] = value self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def delete_metadata(self, key, update_testcase=True): """Remove metadata key for a test case.""" self._ensure_metadata_is_cached() # Make sure that the key exists in cache. If not, no work to do here. if key not in self.metadata_cache: return del self.metadata_cache[key] self.additional_metadata = json_utils.dumps(self.metadata_cache) if update_testcase: self.put() def actual_fuzzer_name(self): """Actual fuzzer name, uses one from overridden attribute if available.""" return self.overridden_fuzzer_name or self.fuzzer_name def get_fuzz_target(self): """Get the associated FuzzTarget entity for this test case.""" name = self.actual_fuzzer_name() if not name: return None return ndb.Key(FuzzTarget, name).get()
class Movie(ndb.Model): movieId = ndb.IntegerProperty() title = ndb.StringProperty() genres = ndb.StringProperty() year = ndb.IntegerProperty() rating = ndb.FloatProperty()
class CatNDB(ndb.Model): """Defines what a Cat is at database level.""" name = ndb.StringProperty() date_of_birth = ndb.DateProperty() weight = ndb.FloatProperty() species = ndb.StringProperty()