class User(BaseDocument): name = CharField(indexed=True, unique=True, length=50) email = CharField(indexed=True, unique=True, length=255) new_email = CharField(indexed=True, unique=False, length=255) email_change_requested_at = DateTimeField() email_validated = BooleanField(indexed=True, default=False) email_validation_code = CharField(indexed=True, length=64) password = CharField(indexed=False, length=128) password_reset_code = CharField(indexed=True, length=64) password_reset_requested_at = DateTimeField() terms_accepted = BooleanField(default=False) terms_accepted_at = DateTimeField() superuser = BooleanField(default=False) delete = BooleanField(default=False, indexed=True) def set_password(self, password): self.password = pbkdf2_sha256.hash(password) self.password_set = True def check_password(self, password): return pbkdf2_sha256.verify(password, self.password) def get_access_token(self): access_token = AccessToken({'user': self, 'token': uuid.uuid4().hex}) return access_token def is_superuser(self): return True if 'superuser' in self and self.superuser else False
class Issue(BaseDocument): """ An `Issue` object represents an issue or problem with the code. It can be associated with one or multiple file revisions, code objects etc. An issue fingerprint should be a unique identifier for a given issue, hence if two issues have the same fingerprint they should be judged "identical". """ class IgnoreReason: not_specified = 0 not_relevant = 1 false_positive = 2 #calculated as hash(analyzer,code,fingerprint) hash = CharField(indexed=True, length=64) configuration = CharField(indexed=True, length=64) project = ForeignKeyField('Project', backref='issues', nullable=False) analyzer = CharField(indexed=True, length=100, nullable=False) code = CharField(indexed=True, length=100, nullable=False) fingerprint = CharField(indexed=True, length=255, nullable=False) #determines if this issue should be ignored ignore = BooleanField(indexed=True, default=False, nullable=False, server_default=False) #gives a reason for the issue to be ignored (e.g. false_positive, ) ignore_reason = IntegerField(indexed=True, nullable=True) #an optional comment for the ignore reason ignore_comment = CharField(indexed=False, length=255, nullable=True) class Meta(Document.Meta): unique_together = [('project', 'fingerprint', 'analyzer', 'code')] dbref_includes = ['code', 'analyzer']
class ProjectIssueClass(BaseDocument): project = ForeignKeyField('Project',backref = 'project_issue_classes') issue_class = ForeignKeyField('IssueClass',backref = 'project_issue_classes') enabled = BooleanField(default = True) class Meta(BaseDocument.Meta): unique_together = (('project','issue_class'),)
class Actor(Document): name = CharField(indexed=True) gross_income_m = FloatField(indexed=True) salary_amount = FloatField(indexed=True, key="salary.amount") salary_currency = CharField(indexed=True, key="salary.currency") appearances = IntegerField(indexed=True) birth_year = IntegerField(indexed=True) favorite_food = ManyToManyField("Food") is_funny = BooleanField(indexed=True) movies = ManyToManyField("Movie", backref="actors")
class Actor(Document): name = CharField(indexed=True) gross_income_m = FloatField(indexed=True) salary_amount = FloatField(indexed=True, key='salary.amount') salary_currency = CharField(indexed=True, key='salary.currency') appearances = IntegerField(indexed=True) birth_year = IntegerField(indexed=True) favorite_food = ManyToManyField('Food') is_funny = BooleanField(indexed=True) movies = ManyToManyField('Movie', backref='actors')
class Snapshot(BaseDocument): #calculated as by the creating object hash = CharField(indexed=True, length=64) configuration = CharField(indexed=True, length=64) project = ForeignKeyField('Project') file_revisions = ManyToManyField('FileRevision', backref='snapshots') analyzed = BooleanField(indexed=True) class Meta(Document.Meta): pass def load(self, data): """ Imports a snapshot from a data structure """ pass def export(self): """ Exports a snapshot to a data structure """ def summarize_issues(self, include_filename=False, ignore=False): if isinstance(self.backend, SqlBackend): return self._summarize_issues_sql( include_filename=include_filename, ignore=ignore) raise NotImplementedError def _summarize_issues_sql(self, include_filename=False, ignore=False): snapshot_file_revisions_table = self.backend.get_table( self.fields['file_revisions'].relationship_class) fr_table = self.backend.get_table(FileRevision) issue_table = self.backend.get_table(Issue) issue_occurrence_table = self.backend.get_table(IssueOccurrence) project_issue_class_table = self.backend.get_table(ProjectIssueClass) issue_class_table = self.backend.get_table(self.project.IssueClass) project_pk_type = self.backend.get_field_type( self.project.fields['pk']) snapshot_pk_type = self.backend.get_field_type(self.fields['pk']) #we group by file revision path, issue code and analyzer group_columns = [ fr_table.c.language, fr_table.c.path, issue_table.c.code, issue_table.c.analyzer ] #we perform a JOIN of the file revision table to the issue tables table = fr_table\ .join(issue_occurrence_table,fr_table.c.pk == issue_occurrence_table.c.file_revision)\ .join(issue_table, and_(issue_table.c.pk == issue_occurrence_table.c.issue, issue_table.c.ignore == ignore)) #here we make sure that the given issue class is enabled for the project subselect = select([issue_class_table.c.pk])\ .select_from(issue_class_table.join(project_issue_class_table))\ .where(and_( issue_table.c.analyzer == issue_class_table.c.analyzer, issue_table.c.code == issue_class_table.c.code, issue_table.c.ignore == ignore, project_issue_class_table.c.project == expression.cast(self.project.pk,project_pk_type), project_issue_class_table.c.enabled == True))\ file_revisions_select = select([snapshot_file_revisions_table.c.filerevision])\ .where(snapshot_file_revisions_table.c.snapshot == expression.cast(self.pk,snapshot_pk_type)) #we select the aggregated issues for all file revisions in this snapshot s = select(group_columns+[func.count().label('count')])\ .select_from(table)\ .where(and_(exists(subselect),fr_table.c.pk.in_(file_revisions_select)))\ .group_by(*group_columns)\ .order_by(fr_table.c.path) #we fetch the result with self.backend.transaction(): result = self.backend.connection.execute(s).fetchall() #we aggregate the issues by path fragments aggregator = lambda f: directory_splitter( f['path'], include_filename=include_filename) #we perform a map/reduce on the result #the resulting items will contain the number of files and the number of issues in the file map_reducer = IssuesMapReducer(aggregators=[aggregator]) return map_reducer.mapreduce(result)
class Project(BaseProject): class AnalysisPriority: low = 0 medium = 1 high = 2 do_it_now_i_say_exclamation_mark = 3 class AnalysisStatus: succeeded = 'succeeded' in_progress = 'in_progress' failed = 'failed' IssueClass = IssueClass delete = BooleanField(indexed=True, default=False) deleted = BooleanField(indexed=True, default=False) name = CharField(indexed=True, length=100) description = CharField(indexed=True, length=2000) public = BooleanField(indexed=True, default=False) permalink = CharField(indexed=True, unique=True, nullable=False, length=100) source = CharField(indexed=True, length=100, nullable=False) analyze = BooleanField(indexed=True, default=False) analysis_priority = IntegerField(default=AnalysisPriority.low, indexed=True) analysis_requested_at = DateTimeField(indexed=True) analysis_status = CharField(indexed=True, length=50) analyzed_at = DateTimeField(indexed=True) reset = BooleanField(indexed=True, default=False) reset_requested_at = DateTimeField(indexed=True) fetched_at = DateTimeField(indexed=True, nullable=True) fetch_status = CharField(indexed=True, nullable=True) fetch_error = TextField(default='') tags = ManyToManyField('Tag') def get_analysis_queue_position(self, backend=None): if backend is None: backend = self.backend analysis_priority_query = [{ 'analysis_priority': self.analysis_priority }] if self.analysis_requested_at is not None: analysis_priority_query += [{ 'analysis_requested_at': { '$lte': self.analysis_requested_at } }] # if the project is flagged for analysis we calculate its position in the analysis queue... if self.get('analyze', False) and self.get('analysis_priority', None) is not None: return len( backend.filter( self.__class__, { '$and': [{ 'analyze': True }, { 'pk': { '$ne': self.pk } }, { '$or': [{ 'deleted': { '$exists': False } }, { 'deleted': False }] }, { '$or': [{ 'analysis_priority': { '$gt': self.analysis_priority } }, { '$and': analysis_priority_query }] }] })) + 1 return None def is_authorized(self, user, roles=None, public_ok=False, backend=None): """ Checks if a user is allowed to access a project. Returns True or False """ if backend is None: backend = self.backend if roles is None: roles = ['admin', 'collaborator', 'owner'] # super users can see everything if user.is_superuser(): return True if public_ok and self.get("public"): return True # check if the user is authorized via a role user_roles = backend.filter(UserRole, { 'project': self, 'user': user, 'role': { '$in': list(roles) } }) if user_roles: return True return False