def __init__(self, field=None, **params): super(Array, self).__init__(**params) if field is None: field = Text() self.__field = field self.__column_type = ARRAY(field.column_type)
class OAuthApplication(db.Model): """OAuth applications registered in Indico.""" __tablename__ = 'applications' @declared_attr def __table_args__(cls): return (db.Index('ix_uq_applications_name_lower', db.func.lower(cls.name), unique=True), db.Index( None, cls.system_app_type, unique=True, postgresql_where=db.text( f'system_app_type != {SystemAppType.none.value}')), { 'schema': 'oauth' }) #: the unique id of the application id = db.Column(db.Integer, primary_key=True) #: human readable name name = db.Column(db.String, nullable=False) #: human readable description description = db.Column(db.Text, nullable=False, default='') #: the OAuth client_id client_id = db.Column(UUID, unique=True, nullable=False, default=lambda: str(uuid4())) #: the OAuth client_secret client_secret = db.Column(UUID, nullable=False, default=lambda: str(uuid4())) #: the OAuth default scopes the application may request access to default_scopes = db.Column(ARRAY(db.String), nullable=False) #: the OAuth absolute URIs that a application may use to redirect to after authorization redirect_uris = db.Column(ARRAY(db.String), nullable=False, default=[]) #: whether the application is enabled or disabled is_enabled = db.Column(db.Boolean, nullable=False, default=True) #: whether the application can access user data without asking for permission is_trusted = db.Column(db.Boolean, nullable=False, default=False) #: the type of system app (if any). system apps cannot be deleted system_app_type = db.Column(PyIntEnum(SystemAppType), nullable=False, default=SystemAppType.none) # relationship backrefs: # - tokens (OAuthToken.application) @property def client_type(self): return 'public' @property def default_redirect_uri(self): return self.redirect_uris[0] if self.redirect_uris else None @property def locator(self): return {'id': self.id} def __repr__(self): # pragma: no cover return f'<OAuthApplication({self.id}, {self.name}, {self.client_id})>' def reset_client_secret(self): self.client_secret = str(uuid4()) logger.info("Client secret for %s has been reset.", self) def validate_redirect_uri(self, redirect_uri): """Called by flask-oauthlib to validate the redirect_uri. Uses a logic similar to the one at GitHub, i.e. protocol and host/port must match exactly and if there is a path in the whitelisted URL, the path of the redirect_uri must start with that path. """ uri_data = url_parse(redirect_uri) for valid_uri_data in map(url_parse, self.redirect_uris): if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and uri_data.path.startswith(valid_uri_data.path)): return True return False
def migrate(driver): if not driver.engine.dialect.supports_alter: print( "This engine dialect doesn't support altering so we are not migrating even if necessary!" ) return md = MetaData() table = Table(UserRefreshToken.__tablename__, md, autoload=True, autoload_with=driver.engine) if str(table.c.expires.type) != "BIGINT": print("Altering table %s expires to BIGINT" % (UserRefreshToken.__tablename__)) with driver.session as session: session.execute(to_timestamp) with driver.session as session: session.execute( "ALTER TABLE {} ALTER COLUMN expires TYPE BIGINT USING pc_datetime_to_timestamp(expires);" .format(UserRefreshToken.__tablename__)) # oidc migration table = Table(Client.__tablename__, md, autoload=True, autoload_with=driver.engine) if not any([index.name == "ix_name" for index in table.indexes]): with driver.session as session: session.execute( "ALTER TABLE {} ADD constraint ix_name unique (name);".format( Client.__tablename__)) if "_allowed_scopes" not in table.c: print("Altering table {} to add _allowed_scopes column".format( Client.__tablename__)) with driver.session as session: session.execute( "ALTER TABLE {} ADD COLUMN _allowed_scopes VARCHAR;".format( Client.__tablename__)) for client in session.query(Client): if not client._allowed_scopes: client._allowed_scopes = " ".join( config["CLIENT_ALLOWED_SCOPES"]) session.add(client) session.commit() session.execute( "ALTER TABLE {} ALTER COLUMN _allowed_scopes SET NOT NULL;". format(Client.__tablename__)) add_column_if_not_exist( table_name=GoogleProxyGroup.__tablename__, column=Column("email", String), driver=driver, metadata=md, ) drop_foreign_key_column_if_exist( table_name=GoogleProxyGroup.__tablename__, column_name="user_id", driver=driver, metadata=md, ) _add_google_project_id(driver, md) drop_unique_constraint_if_exist( table_name=GoogleServiceAccount.__tablename__, column_name="google_unique_id", driver=driver, metadata=md, ) drop_unique_constraint_if_exist( table_name=GoogleServiceAccount.__tablename__, column_name="google_project_id", driver=driver, metadata=md, ) add_column_if_not_exist( table_name=GoogleBucketAccessGroup.__tablename__, column=Column("privileges", ARRAY(String)), driver=driver, metadata=md, ) _update_for_authlib(driver, md) # Delete-user migration # Check if at least one constraint is already migrated and if so skip # the delete cascade migration. user = Table(User.__tablename__, md, autoload=True, autoload_with=driver.engine) found_user_constraint_already_migrated = False # TODO: Once sqlalchemy is bumped to above 1.0.0, just use the first version try: for fkey in list(user.foreign_key_constraints): if str( fkey.parent ) == "User.google_proxy_group_id" and fkey.ondelete == "SET NULL": found_user_constraint_already_migrated = True except: for fkey in list(user.foreign_keys): if str( fkey.parent ) == "User.google_proxy_group_id" and fkey.ondelete == "SET NULL": found_user_constraint_already_migrated = True if not found_user_constraint_already_migrated: # do delete user migration in one session delete_user_session = driver.Session() try: # Deleting google proxy group shouldn't delete user set_foreign_key_constraint_on_delete_setnull( table_name=User.__tablename__, column_name="google_proxy_group_id", fk_table_name=GoogleProxyGroup.__tablename__, fk_column_name="id", driver=driver, session=delete_user_session, metadata=md, ) _set_on_delete_cascades(driver, delete_user_session, md) delete_user_session.commit() except: delete_user_session.rollback() raise finally: delete_user_session.close()
class Category(SearchableTitleMixin, DescriptionMixin, ProtectionManagersMixin, AttachedItemsMixin, db.Model): """An Indico category""" __tablename__ = 'categories' disallowed_protection_modes = frozenset() inheriting_have_acl = True possible_render_modes = {RenderMode.markdown} default_render_mode = RenderMode.markdown allow_no_access_contact = True ATTACHMENT_FOLDER_ID_COLUMN = 'category_id' @strict_classproperty @classmethod def __auto_table_args(cls): return (db.CheckConstraint( "(icon IS NULL) = (icon_metadata::text = 'null')", 'valid_icon'), db.CheckConstraint( "(logo IS NULL) = (logo_metadata::text = 'null')", 'valid_logo'), db.CheckConstraint("(parent_id IS NULL) = (id = 0)", 'valid_parent'), db.CheckConstraint("(id != 0) OR NOT is_deleted", 'root_not_deleted'), db.CheckConstraint( "(id != 0) OR (protection_mode != {})".format( ProtectionMode.inheriting), 'root_not_inheriting'), db.CheckConstraint('visibility IS NULL OR visibility > 0', 'valid_visibility'), { 'schema': 'categories' }) @declared_attr def __table_args__(cls): return auto_table_args(cls) id = db.Column(db.Integer, primary_key=True) parent_id = db.Column(db.Integer, db.ForeignKey('categories.categories.id'), index=True, nullable=True) is_deleted = db.Column(db.Boolean, nullable=False, default=False) position = db.Column(db.Integer, nullable=False, default=_get_next_position) visibility = db.Column(db.Integer, nullable=True, default=None) icon_metadata = db.Column(JSONB, nullable=False, default=lambda: None) icon = db.deferred(db.Column(db.LargeBinary, nullable=True)) logo_metadata = db.Column(JSONB, nullable=False, default=lambda: None) logo = db.deferred(db.Column(db.LargeBinary, nullable=True)) timezone = db.Column(db.String, nullable=False, default=lambda: config.DEFAULT_TIMEZONE) default_event_themes = db.Column(JSONB, nullable=False, default=_get_default_event_themes) event_creation_restricted = db.Column(db.Boolean, nullable=False, default=True) event_creation_notification_emails = db.Column(ARRAY(db.String), nullable=False, default=[]) event_message_mode = db.Column(PyIntEnum(EventMessageMode), nullable=False, default=EventMessageMode.disabled) _event_message = db.Column('event_message', db.Text, nullable=False, default='') suggestions_disabled = db.Column(db.Boolean, nullable=False, default=False) notify_managers = db.Column(db.Boolean, nullable=False, default=False) default_ticket_template_id = db.Column( db.ForeignKey('indico.designer_templates.id'), nullable=True, index=True) children = db.relationship( 'Category', order_by='Category.position', primaryjoin=(id == db.remote(parent_id)) & ~db.remote(is_deleted), lazy=True, backref=db.backref('parent', primaryjoin=(db.remote(id) == parent_id), lazy=True)) acl_entries = db.relationship('CategoryPrincipal', backref='category', cascade='all, delete-orphan', collection_class=set) default_ticket_template = db.relationship( 'DesignerTemplate', lazy=True, foreign_keys=default_ticket_template_id, backref='default_ticket_template_of') # column properties: # - deep_events_count # relationship backrefs: # - attachment_folders (AttachmentFolder.category) # - designer_templates (DesignerTemplate.category) # - events (Event.category) # - favorite_of (User.favorite_categories) # - legacy_mapping (LegacyCategoryMapping.category) # - parent (Category.children) # - settings (CategorySetting.category) # - suggestions (SuggestedCategory.category) @hybrid_property def event_message(self): return MarkdownText(self._event_message) @event_message.setter def event_message(self, value): self._event_message = value @event_message.expression def event_message(cls): return cls._event_message @return_ascii def __repr__(self): return format_repr(self, 'id', is_deleted=False, _text=text_to_repr(self.title, max_length=75)) @property def protection_parent(self): return self.parent if not self.is_root else None @locator_property def locator(self): return {'category_id': self.id} @classmethod def get_root(cls): """Get the root category""" return cls.query.filter(cls.is_root).one() @property def url(self): return url_for('categories.display', self) @property def has_only_events(self): return self.has_events and not self.children @hybrid_property def is_root(self): return self.parent_id is None @is_root.expression def is_root(cls): return cls.parent_id.is_(None) @property def is_empty(self): return not self.deep_children_count and not self.deep_events_count @property def has_icon(self): return self.icon_metadata is not None @property def has_effective_icon(self): return self.effective_icon_data['metadata'] is not None @property def has_logo(self): return self.logo_metadata is not None @property def tzinfo(self): return pytz.timezone(self.timezone) @property def display_tzinfo(self): """The tzinfo of the category or the one specified by the user""" return get_display_tz(self, as_timezone=True) def can_create_events(self, user): """Check whether the user can create events in the category.""" # if creation is not restricted anyone who can access the category # can also create events in it, otherwise only people with the # creation role can return user and ( (not self.event_creation_restricted and self.can_access(user)) or self.can_manage(user, permission='create')) def move(self, target): """Move the category into another category.""" assert not self.is_root old_parent = self.parent self.position = (max(x.position for x in target.children) + 1) if target.children else 1 self.parent = target db.session.flush() signals.category.moved.send(self, old_parent=old_parent) @classmethod def get_tree_cte(cls, col='id'): """Create a CTE for the category tree. The CTE contains the following columns: - ``id`` -- the category id - ``path`` -- an array containing the path from the root to the category itself - ``is_deleted`` -- whether the category is deleted :param col: The name of the column to use in the path or a callable receiving the category alias that must return the expression used for the 'path' retrieved by the CTE. """ cat_alias = db.aliased(cls) if callable(col): path_column = col(cat_alias) else: path_column = getattr(cat_alias, col) cte_query = (select([ cat_alias.id, array([path_column]).label('path'), cat_alias.is_deleted ]).where(cat_alias.parent_id.is_(None)).cte(recursive=True)) rec_query = (select([ cat_alias.id, cte_query.c.path.op('||')(path_column), cte_query.c.is_deleted | cat_alias.is_deleted ]).where(cat_alias.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query) @classmethod def get_protection_cte(cls): cat_alias = db.aliased(cls) cte_query = (select([cat_alias.id, cat_alias.protection_mode]).where( cat_alias.parent_id.is_(None)).cte(recursive=True)) rec_query = (select([ cat_alias.id, db.case( {ProtectionMode.inheriting.value: cte_query.c.protection_mode}, else_=cat_alias.protection_mode, value=cat_alias.protection_mode) ]).where(cat_alias.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query) def get_protection_parent_cte(self): cte_query = (select([ Category.id, db.cast(literal(None), db.Integer).label('protection_parent') ]).where(Category.id == self.id).cte(recursive=True)) rec_query = (select([ Category.id, db.case( { ProtectionMode.inheriting.value: func.coalesce(cte_query.c.protection_parent, self.id) }, else_=Category.id, value=Category.protection_mode) ]).where(Category.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query) @classmethod def get_icon_data_cte(cls): cat_alias = db.aliased(cls) cte_query = (select([ cat_alias.id, cat_alias.id.label('source_id'), cat_alias.icon_metadata ]).where(cat_alias.parent_id.is_(None)).cte(recursive=True)) rec_query = (select([ cat_alias.id, db.case({'null': cte_query.c.source_id}, else_=cat_alias.id, value=db.func.jsonb_typeof(cat_alias.icon_metadata)), db.case({'null': cte_query.c.icon_metadata}, else_=cat_alias.icon_metadata, value=db.func.jsonb_typeof(cat_alias.icon_metadata)) ]).where(cat_alias.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query) @property def deep_children_query(self): """Get a query object for all subcategories. This includes subcategories at any level of nesting. """ cte = Category.get_tree_cte() return (Category.query.join(cte, Category.id == cte.c.id).filter( cte.c.path.contains([self.id]), cte.c.id != self.id, ~cte.c.is_deleted)) @staticmethod def _get_chain_query(start_criterion): cte_query = (select([ Category.id, Category.parent_id, literal(0).label('level') ]).where(start_criterion).cte('category_chain', recursive=True)) parent_query = (select([ Category.id, Category.parent_id, cte_query.c.level + 1 ]).where(Category.id == cte_query.c.parent_id)) cte_query = cte_query.union_all(parent_query) return Category.query.join(cte_query, Category.id == cte_query.c.id).order_by( cte_query.c.level.desc()) @property def chain_query(self): """Get a query object for the category chain. The query retrieves the root category first and then all the intermediate categories up to (and including) this category. """ return self._get_chain_query(Category.id == self.id) @property def parent_chain_query(self): """Get a query object for the category's parent chain. The query retrieves the root category first and then all the intermediate categories up to (excluding) this category. """ return self._get_chain_query(Category.id == self.parent_id) def nth_parent(self, n_categs, fail_on_overflow=True): """Return the nth parent of the category. :param n_categs: the number of categories to go up :param fail_on_overflow: whether to fail if we try to go above the root category :return: `Category` object or None (only if ``fail_on_overflow`` is not set) """ if n_categs == 0: return self chain = self.parent_chain_query.all() assert n_categs >= 0 if n_categs > len(chain): if fail_on_overflow: raise IndexError("Root category has no parent!") else: return None return chain[::-1][n_categs - 1] def is_descendant_of(self, categ): return categ != self and self.parent_chain_query.filter( Category.id == categ.id).has_rows() @property def visibility_horizon_query(self): """Get a query object that returns the highest category this one is visible from.""" cte_query = (select([ Category.id, Category.parent_id, db.case([(Category.visibility.is_(None), None)], else_=(Category.visibility - 1)).label('n'), literal(0).label('level') ]).where(Category.id == self.id).cte('visibility_horizon', recursive=True)) parent_query = (select([ Category.id, Category.parent_id, db.case([ (Category.visibility.is_(None) & cte_query.c.n.is_(None), None) ], else_=db.func.least(Category.visibility, cte_query.c.n) - 1), cte_query.c.level + 1 ]).where( db.and_(Category.id == cte_query.c.parent_id, (cte_query.c.n > 0) | cte_query.c.n.is_(None)))) cte_query = cte_query.union_all(parent_query) return db.session.query(cte_query.c.id, cte_query.c.n).order_by( cte_query.c.level.desc()).limit(1) @property def own_visibility_horizon(self): """Get the highest category this one would like to be visible from (configured visibility).""" if self.visibility is None: return Category.get_root() else: return self.nth_parent(self.visibility - 1) @property def real_visibility_horizon(self): """Get the highest category this one is actually visible from (as limited by categories above).""" horizon_id, final_visibility = self.visibility_horizon_query.one() if final_visibility is not None and final_visibility < 0: return None # Category is invisible return Category.get(horizon_id) @staticmethod def get_visible_categories_cte(category_id): """ Get a sqlalchemy select for the visible categories within the given category, including the category itself. """ cte_query = (select([ Category.id, literal(0).label('level') ]).where((Category.id == category_id) & (Category.visibility.is_(None) | (Category.visibility > 0))).cte(recursive=True)) parent_query = (select([Category.id, cte_query.c.level + 1]).where( db.and_( Category.parent_id == cte_query.c.id, db.or_(Category.visibility.is_(None), Category.visibility > cte_query.c.level + 1)))) return cte_query.union_all(parent_query) @property def visible_categories_query(self): """ Get a query object for the visible categories within this category, including the category itself. """ cte_query = Category.get_visible_categories_cte(self.id) return Category.query.join(cte_query, Category.id == cte_query.c.id) @property def icon_url(self): """Get the HTTP URL of the icon.""" return url_for('categories.display_icon', self, slug=self.icon_metadata['hash']) @property def effective_icon_url(self): """Get the HTTP URL of the icon (possibly inherited).""" data = self.effective_icon_data return url_for('categories.display_icon', category_id=data['source_id'], slug=data['metadata']['hash']) @property def logo_url(self): """Get the HTTP URL of the logo.""" return url_for('categories.display_logo', self, slug=self.logo_metadata['hash'])
class Appointment(Base): __tablename__ = 'appointments' id = db.Column(db.Integer, nullable=False, primary_key=True) # noqa: A003 advisor_dept_codes = db.Column(ARRAY(db.String), nullable=True) advisor_name = db.Column(db.String(255), nullable=True) advisor_role = db.Column(db.String(255), nullable=True) advisor_uid = db.Column(db.String(255), nullable=True) appointment_type = db.Column(appointment_type_enum, nullable=False) created_by = db.Column(db.Integer, db.ForeignKey('authorized_users.id'), nullable=False) deleted_at = db.Column(db.DateTime, nullable=True) deleted_by = db.Column(db.Integer, db.ForeignKey('authorized_users.id'), nullable=True) dept_code = db.Column(db.String(80), nullable=False) details = db.Column(db.Text, nullable=True) scheduled_time = db.Column(db.DateTime, nullable=True) status = db.Column(appointment_event_type, nullable=False) student_contact_info = db.Column(db.String(255), nullable=True) student_contact_type = db.Column(appointment_student_contact_type_enum, nullable=True) student_sid = db.Column(db.String(80), nullable=False) updated_by = db.Column(db.Integer, db.ForeignKey('authorized_users.id'), nullable=True) topics = db.relationship( 'AppointmentTopic', primaryjoin='and_(Appointment.id==AppointmentTopic.appointment_id, AppointmentTopic.deleted_at==None)', back_populates='appointment', order_by='AppointmentTopic.topic', lazy=True, ) def __init__( self, appointment_type, created_by, dept_code, details, status, student_sid, updated_by, advisor_dept_codes=None, advisor_name=None, advisor_role=None, advisor_uid=None, scheduled_time=None, student_contact_info=None, student_contact_type=None, ): self.advisor_dept_codes = advisor_dept_codes self.advisor_name = advisor_name self.advisor_role = advisor_role self.advisor_uid = advisor_uid self.appointment_type = appointment_type self.created_by = created_by self.dept_code = dept_code self.details = details self.scheduled_time = scheduled_time self.status = status self.student_contact_info = student_contact_info self.student_contact_type = student_contact_type self.student_sid = student_sid self.updated_by = updated_by @classmethod def find_by_id(cls, appointment_id): return cls.query.filter(and_(cls.id == appointment_id, cls.deleted_at == None)).first() # noqa: E711 @classmethod def get_appointments_per_sid(cls, sid): return cls.query.filter(and_(cls.student_sid == sid, cls.deleted_at == None)).order_by(cls.updated_at, cls.id).all() # noqa: E711 @classmethod def get_drop_in_waitlist(cls, dept_code, statuses=()): local_today = localize_datetime(datetime.now()).strftime('%Y-%m-%d') start_of_today = localized_timestamp_to_utc(f'{local_today}T00:00:00') criterion = and_( cls.created_at >= start_of_today, cls.appointment_type == 'Drop-in', cls.status.in_(statuses), cls.deleted_at == None, # noqa: E711 cls.dept_code == dept_code, ) return cls.query.filter(criterion).order_by(cls.created_at).all() @classmethod def get_scheduled(cls, dept_code, local_date, advisor_uid=None): date_str = local_date.strftime('%Y-%m-%d') start_of_today = localized_timestamp_to_utc(f'{date_str}T00:00:00') end_of_today = localized_timestamp_to_utc(f'{date_str}T23:59:59') query = cls.query.filter( and_( cls.scheduled_time >= start_of_today, cls.scheduled_time <= end_of_today, cls.appointment_type == 'Scheduled', cls.deleted_at == None, # noqa: E711 cls.dept_code == dept_code, ), ) if advisor_uid: query = query.filter(cls.advisor_uid == advisor_uid) return query.order_by(cls.scheduled_time).all() @classmethod def create( cls, created_by, dept_code, details, appointment_type, student_sid, advisor_attrs=None, topics=(), scheduled_time=None, student_contact_info=None, student_contact_type=None, ): # If this appointment comes in already assigned to the intake desk, we treat it as resolved. if advisor_attrs and advisor_attrs['role'] == 'Intake Desk': status = 'checked_in' elif advisor_attrs: status = 'reserved' else: status = 'waiting' appointment = cls( advisor_uid=advisor_attrs and advisor_attrs['uid'], advisor_name=advisor_attrs and advisor_attrs['name'], advisor_role=advisor_attrs and advisor_attrs['role'], advisor_dept_codes=advisor_attrs and advisor_attrs['deptCodes'], appointment_type=appointment_type, created_by=created_by, dept_code=dept_code, details=details, scheduled_time=scheduled_time, status=status, student_contact_info=student_contact_info, student_contact_type=student_contact_type, student_sid=student_sid, updated_by=created_by, ) for topic in topics: appointment.topics.append( AppointmentTopic.create(appointment, topic), ) db.session.add(appointment) std_commit() AppointmentEvent.create( appointment_id=appointment.id, advisor_id=advisor_attrs and advisor_attrs['id'], user_id=created_by, event_type=status, ) cls.refresh_search_index() return appointment @classmethod def check_in(cls, appointment_id, checked_in_by, advisor_attrs): appointment = cls.find_by_id(appointment_id=appointment_id) if appointment: appointment.status = 'checked_in' appointment.advisor_uid = advisor_attrs['uid'] appointment.advisor_name = advisor_attrs['name'] appointment.advisor_role = advisor_attrs['role'] appointment.advisor_dept_codes = advisor_attrs['deptCodes'] appointment.updated_by = checked_in_by std_commit() db.session.refresh(appointment) AppointmentEvent.create( appointment_id=appointment.id, user_id=checked_in_by, advisor_id=advisor_attrs['id'], event_type='checked_in', ) return appointment else: return None @classmethod def cancel(cls, appointment_id, cancelled_by, cancel_reason, cancel_reason_explained): appointment = cls.find_by_id(appointment_id=appointment_id) if appointment: event_type = 'cancelled' appointment.status = event_type appointment.updated_by = cancelled_by appointment.advisor_uid = None appointment.advisor_name = None appointment.advisor_role = None appointment.advisor_dept_codes = None AppointmentEvent.create( appointment_id=appointment.id, user_id=cancelled_by, event_type=event_type, cancel_reason=cancel_reason, cancel_reason_explained=cancel_reason_explained, ) std_commit() db.session.refresh(appointment) cls.refresh_search_index() return appointment else: return None @classmethod def reserve(cls, appointment_id, reserved_by, advisor_attrs): appointment = cls.find_by_id(appointment_id=appointment_id) if appointment: event_type = 'reserved' appointment.status = event_type appointment.updated_by = reserved_by appointment.advisor_uid = advisor_attrs['uid'] appointment.advisor_name = advisor_attrs['name'] appointment.advisor_role = advisor_attrs['role'] appointment.advisor_dept_codes = advisor_attrs['deptCodes'] AppointmentEvent.create( appointment_id=appointment.id, user_id=reserved_by, advisor_id=advisor_attrs['id'], event_type=event_type, ) std_commit() db.session.refresh(appointment) return appointment else: return None def set_to_waiting(self, updated_by): event_type = 'waiting' self.status = event_type self.updated_by = updated_by self.advisor_uid = None self.advisor_name = None self.advisor_role = None self.advisor_dept_codes = None AppointmentEvent.create( appointment_id=self.id, user_id=updated_by, event_type=event_type, ) std_commit() db.session.refresh(self) @classmethod def unreserve_all_for_advisor(cls, advisor_uid, updated_by): appointments = cls.query.filter(and_(cls.status == 'reserved', cls.advisor_uid == advisor_uid, cls.deleted_at == None)).all() # noqa: E711 event_type = 'waiting' for appointment in appointments: appointment.status = event_type appointment.advisor_uid = None appointment.advisor_name = None appointment.advisor_role = None appointment.advisor_dept_codes = None appointment.updated_by = updated_by AppointmentEvent.create( appointment_id=appointment.id, user_id=updated_by, event_type=event_type, ) std_commit() @classmethod def search( cls, search_phrase, advisor_uid=None, student_csid=None, topic=None, datetime_from=None, datetime_to=None, limit=20, offset=0, ): if search_phrase: search_terms = [t.group(0) for t in list(re.finditer(TEXT_SEARCH_PATTERN, search_phrase)) if t] search_phrase = ' & '.join(search_terms) fts_selector = """SELECT id, ts_rank(fts_index, plainto_tsquery('english', :search_phrase)) AS rank FROM appointments_fts_index WHERE fts_index @@ plainto_tsquery('english', :search_phrase)""" params = { 'search_phrase': search_phrase, } else: search_terms = [] fts_selector = 'SELECT id, 0 AS rank FROM appointments WHERE deleted_at IS NULL' params = {} if advisor_uid: advisor_filter = 'AND appointments.advisor_uid = :advisor_uid' params.update({'advisor_uid': advisor_uid}) else: advisor_filter = '' if student_csid: student_filter = 'AND appointments.student_sid = :student_csid' params.update({'student_csid': student_csid}) else: student_filter = '' date_filter = '' if datetime_from: date_filter += ' AND created_at >= :datetime_from' params.update({'datetime_from': datetime_from}) if datetime_to: date_filter += ' AND created_at < :datetime_to' params.update({'datetime_to': datetime_to}) if topic: topic_join = 'JOIN appointment_topics nt on nt.topic = :topic AND nt.appointment_id = appointments.id' params.update({'topic': topic}) else: topic_join = '' query = text(f""" SELECT appointments.* FROM ({fts_selector}) AS fts JOIN appointments ON fts.id = appointments.id {advisor_filter} {student_filter} {date_filter} {topic_join} ORDER BY fts.rank DESC, appointments.id LIMIT {limit} OFFSET {offset} """).bindparams(**params) result = db.session.execute(query) keys = result.keys() return [_to_json(search_terms, dict(zip(keys, row))) for row in result.fetchall()] def update( self, updated_by, details=None, scheduled_time=None, student_contact_info=None, student_contact_type=None, topics=(), ): if details != self.details: self.updated_at = utc_now() self.updated_by = updated_by self.details = details self.scheduled_time = scheduled_time self.student_contact_info = student_contact_info self.student_contact_type = student_contact_type _update_appointment_topics(self, topics, updated_by) std_commit() db.session.refresh(self) self.refresh_search_index() @classmethod def refresh_search_index(cls): def _refresh_search_index(db_session): db_session.execute(text('REFRESH MATERIALIZED VIEW appointments_fts_index')) db_session.execute(text('REFRESH MATERIALIZED VIEW advisor_author_index')) std_commit(session=db_session) bg_execute(_refresh_search_index) @classmethod def delete(cls, appointment_id): appointment = cls.find_by_id(appointment_id) if appointment: now = utc_now() appointment.deleted_at = now for topic in appointment.topics: topic.deleted_at = now std_commit() cls.refresh_search_index() def status_change_available(self): return self.status in ['reserved', 'waiting'] def to_api_json(self, current_user_id): topics = [t.to_api_json() for t in self.topics if not t.deleted_at] departments = None if self.advisor_dept_codes: departments = [{'code': c, 'name': BERKELEY_DEPT_CODE_TO_NAME.get(c, c)} for c in self.advisor_dept_codes] api_json = { 'id': self.id, 'advisorId': AuthorizedUser.get_id_per_uid(self.advisor_uid), 'advisorName': self.advisor_name, 'advisorRole': self.advisor_role, 'advisorUid': self.advisor_uid, 'advisorDepartments': departments, 'appointmentType': self.appointment_type, 'createdAt': _isoformat(self.created_at), 'createdBy': self.created_by, 'deptCode': self.dept_code, 'details': self.details, 'read': AppointmentRead.was_read_by(current_user_id, self.id), 'student': { 'sid': self.student_sid, }, 'topics': topics, 'updatedAt': _isoformat(self.updated_at), 'updatedBy': self.updated_by, } if self.appointment_type == 'Scheduled': api_json.update({ 'scheduledTime': _isoformat(self.scheduled_time), 'studentContactInfo': self.student_contact_info, 'studentContactType': self.student_contact_type, }) return { **api_json, **appointment_event_to_json(self.id, self.status), }
class Flow(Model): '''Model for flows A Flow is the series of :py:class:`~purchasing.data.stages.Stage` objects that a contract will go through as part of Conductor. It is meant to be as configurable and flexible as possible. Because of the nature of Flows, it is best to not allow them to be edited or deleted once they are in use. Instead, there is an ``is_archived`` flag. This is because of the difficulty of knowing how to handle contracts that are currently in the middle of a flow if that flow is edited. Instead, it is better to create a new flow. Attributes: id: Primary key unique ID flow_name: Name of this flow contract: Many-to-one relationship with :py:class:`~purchasing.data.contracts.ContractBase` (many contracts can share a flow) stage_order: Array of stage_id integers is_archived: Boolean of whether the flow is archived or active ''' __tablename__ = 'flow' id = Column(db.Integer, primary_key=True, index=True) flow_name = Column(db.Text, unique=True) contract = db.relationship('ContractBase', backref='flow', lazy='subquery') stage_order = Column(ARRAY(db.Integer)) is_archived = Column(db.Boolean, default=False, nullable=False) def __unicode__(self): return self.flow_name @classmethod def all_flow_query_factory(cls): '''Query factory that returns query of all flows ''' return cls.query @classmethod def nonarchived_query_factory(cls): '''Query factory that returns query of all non-archived flows ''' return cls.query.filter(cls.is_archived == False) def get_ordered_stages(self): '''Turns the flow's stage_order attribute into Stage objects Returns: Ordered list of :py:class:`~purchasing.data.stages.Stage` objects in the flow's ``stage_order`` ''' return [Stage.query.get(i) for i in self.stage_order] def create_contract_stages(self, contract): '''Creates new rows in contract_stage table. Extracts the rows out of the given flow, and creates new rows in the contract_stage table for each of them. If the stages already exist, that means that the contract is switching back into a flow that it had already been in. To handle this, the "revert" flag is set to true, which should signal to a downstream process to roll the stages back to the first one in the current flow. Arguments: contract: A :py:class:`~purchasing.data.contracts.ContractBase` object Returns: A three-tuple of (the flow's stage order, a list of the flow's :py:class:`~purchasing.data.contract_stages.ContractStage` objects, whether the we are "reverting") ''' revert = False contract_stages = [] for stage_id in self.stage_order: try: contract_stages.append( ContractStage.create( contract_id=contract.id, flow_id=self.id, stage_id=stage_id, )) except (IntegrityError, FlushError): revert = True db.session.rollback() stage = ContractStage.query.filter( ContractStage.contract_id == contract.id, ContractStage.flow_id == self.id, ContractStage.stage_id == stage_id).first() if stage: contract_stages.append(stage) else: raise IntegrityError except Exception: raise contract.flow_id = self.id db.session.commit() return self.stage_order, contract_stages, revert def _build_row(self, row, exited, data_dict): try: data_dict[row.contract_id]['stages'].append({ 'name': row.stage_name, 'id': row.stage_id, 'entered': localize_datetime(row.entered).isoformat(), 'exited': localize_datetime(exited).isoformat(), 'seconds': max([(exited - row.entered).total_seconds(), 0]), }) except KeyError: data_dict[row.contract_id] = { 'description': row.description, 'email': row.email, 'department': row.department, 'contract_id': row.contract_id, 'stages': [{ 'name': row.stage_name, 'id': row.stage_id, 'entered': localize_datetime(row.entered).isoformat(), 'exited': localize_datetime(exited).isoformat(), 'seconds': max([(exited - row.entered).total_seconds(), 0]), }] } return data_dict def build_metrics_data(self): '''Build the raw data sets to be transformed client-side for metrics charts Example: .. code-block:: python results = { 'current': { 'contract id': { 'description': 'a contract description', 'email': 'the contract is assigned to this email', 'department': 'the primary department for the contract', 'contract_id': 'the contract id', 'stages': [{ 'name': 'the stage name', 'id': 'the stage id', 'entered': 'when the stage was entered', 'exited': 'when the stage was exited', 'seconds': 'number of seconds the contract spent in this stage', }, ...] }, ... }, 'complete': { 'contract id': { }, ... } } Returns: A results dictionary described in the example above. ''' raw_data = self.get_metrics_csv_data() results = {'current': {}, 'complete': {}} for ix, row in enumerate(raw_data): exited = row.exited if row.exited else datetime.datetime.utcnow() if row.exited is None and row.is_archived: pass elif row.exited is None: results['current'] = self._build_row(row, exited, results['current']) else: results['complete'] = self._build_row(row, exited, results['complete']) return results def reshape_metrics_granular(self, enter_and_exit=False): '''Transform long data from database into wide data for consumption Take in a result set (list of tuples), return a dictionary of results. The key for the dictionary is the contract id, and the values are a list of (fieldname, value). Metadata (common to all rows) is listed first, and timing information from each stage is listed afterwords. Sorting is assumed to be done on the database layer Arguments: enter_and_exit: A boolean option of whether to add both the enter and exit times to the results list Returns: * Results - a dictionary of lists which can be used to generate a .csv or .tsv file to be downloaded by the client * Headers - A list of strings which can be used to create the headers for the downloadable file ''' raw_data = self.get_metrics_csv_data() results = defaultdict(list) headers = [] for ix, row in enumerate(raw_data): if ix == 0: headers.extend([ 'item_number', 'description', 'assigned_to', 'department' ]) # if this is a new contract row, append metadata if len(results[row.contract_id]) == 0: results[row.contract_id].extend([ row.contract_id, row.description, row.email, row.department, ]) # append the stage date data if enter_and_exit and row.exited: results[row.contract_id].extend([ localize_datetime(row.exited), localize_datetime(row.entered) ]) if row.stage_name + '_exit' not in headers: headers.append(row.stage_name.replace(' ', '_') + '_exit') headers.append(row.stage_name.replace(' ', '_') + '_enter') else: results[row.contract_id].extend( [localize_datetime(row.exited)]) if row.stage_name not in headers: headers.append(row.stage_name) return results, headers def get_metrics_csv_data(self): '''Raw SQL query that returns the raw data to be reshaped for download or charting ''' return db.session.execute( ''' select x.contract_id, x.description, x.department, x.email, x.stage_name, x.rn, x.stage_id, x.is_archived, min(x.entered) as entered, max(x.exited) as exited from ( select c.id as contract_id, c.description, d.name as department, c.is_archived, u.email, s.name as stage_name, s.id as stage_id, cs.exited, cs.entered, row_number() over (partition by c.id order by cs.entered asc, cs.id asc) as rn, f.stage_order[s.id] as pos from contract_stage cs join stage s on cs.stage_id = s.id join contract c on cs.contract_id = c.id join users u on c.assigned_to = u.id left join department d on c.department_id = d.id join flow f on cs.flow_id = f.id where cs.entered is not null and cs.flow_id = :flow_id and c.has_metrics is true ) x group by 1,2,3,4,5,6,7,8, pos order by contract_id, pos, rn asc ''', { 'flow_id': self.id }).fetchall()
class Contest(Base): """Class to store a contest (which is a single day of a programming competition). """ __tablename__ = 'contests' __table_args__ = ( CheckConstraint("start <= stop"), CheckConstraint("stop <= analysis_start"), CheckConstraint("analysis_start <= analysis_stop"), CheckConstraint("token_gen_initial <= token_gen_max"), ) # Auto increment primary key. id = Column(Integer, primary_key=True) # Short name of the contest. name = Column(Unicode, CodenameConstraint("name"), nullable=False, unique=True) # Description of the contest (human readable). description = Column(Unicode, nullable=False) # Presentation of the contest (human readable). presentation = Column(Unicode, default="", nullable=False) # The list of language codes of the localizations that contestants # are allowed to use (empty means all). allowed_localizations = Column(ARRAY(String), nullable=False, default=[]) # The list of names of languages allowed in the contest. languages = Column(ARRAY(String), nullable=False, default=["C11 / gcc", "C++11 / g++", "Pascal / fpc"]) # Whether contestants allowed to download their submissions. submissions_download_allowed = Column(Boolean, nullable=False, default=True) # Whether the user question is enabled. allow_questions = Column(Boolean, nullable=False, default=True) # Whether the user test interface is enabled. allow_user_tests = Column(Boolean, nullable=False, default=True) # Whether to prevent hidden participations to log in. block_hidden_participations = Column(Boolean, nullable=False, default=False) # Whether to allow username/password authentication allow_password_authentication = Column(Boolean, nullable=False, default=True) # Whether to enforce that the IP address of the request matches # the IP address or subnet specified for the participation (if # present). ip_restriction = Column(Boolean, nullable=False, default=True) # Whether to automatically log in users connecting from an IP # address specified in the ip field of a participation to this # contest. ip_autologin = Column(Boolean, nullable=False, default=False) # The parameters that control contest-tokens follow. Note that # their effect during the contest depends on the interaction with # the parameters that control task-tokens, defined on each Task. # The "kind" of token rules that will be active during the contest. # - disabled: The user will never be able to use any token. # - finite: The user has a finite amount of tokens and can choose # when to use them, subject to some limitations. Tokens may not # be all available at start, but given periodically during the # contest instead. # - infinite: The user will always be able to use a token. token_mode = Column(Enum("disabled", "finite", "infinite", name="token_mode"), nullable=False, default="infinite") # The maximum number of tokens a contestant is allowed to use # during the whole contest (on all tasks). token_max_number = Column(Integer, CheckConstraint("token_max_number > 0"), nullable=True) # The minimum interval between two successive uses of tokens for # the same user (on any task). token_min_interval = Column( Interval, CheckConstraint("token_min_interval >= '0 seconds'"), nullable=False, default=timedelta()) # The parameters that control generation (if mode is "finite"): # the user starts with "initial" tokens and receives "number" more # every "interval", but their total number is capped to "max". token_gen_initial = Column(Integer, CheckConstraint("token_gen_initial >= 0"), nullable=False, default=2) token_gen_number = Column(Integer, CheckConstraint("token_gen_number >= 0"), nullable=False, default=2) token_gen_interval = Column( Interval, CheckConstraint("token_gen_interval > '0 seconds'"), nullable=False, default=timedelta(minutes=30)) token_gen_max = Column(Integer, CheckConstraint("token_gen_max > 0"), nullable=True) # Beginning and ending of the contest. start = Column(DateTime, nullable=False, default=datetime(2000, 1, 1)) stop = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) # Beginning and ending of the contest anaylsis mode. analysis_enabled = Column(Boolean, nullable=False, default=False) analysis_start = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) analysis_stop = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) # Timezone for the contest. All timestamps in CWS will be shown # using the timezone associated to the logged-in user or (if it's # None or an invalid string) the timezone associated to the # contest or (if it's None or an invalid string) the local # timezone of the server. This value has to be a string like # "Europe/Rome", "Australia/Sydney", "America/New_York", etc. timezone = Column(Unicode, nullable=True) # Max contest time for each user in seconds. per_user_time = Column(Interval, CheckConstraint("per_user_time >= '0 seconds'"), nullable=True) # Maximum number of submissions or user_tests allowed for each user # during the whole contest or None to not enforce this limitation. max_submission_number = Column( Integer, CheckConstraint("max_submission_number > 0"), nullable=True) max_user_test_number = Column(Integer, CheckConstraint("max_user_test_number > 0"), nullable=True) # Minimum interval between two submissions or user_tests, or None to # not enforce this limitation. min_submission_interval = Column( Interval, CheckConstraint("min_submission_interval > '0 seconds'"), nullable=True) min_user_test_interval = Column( Interval, CheckConstraint("min_user_test_interval > '0 seconds'"), nullable=True) # The scores for this contest will be rounded to this number of # decimal places. score_precision = Column(Integer, CheckConstraint("score_precision >= 0"), nullable=False, default=0) path_to_logo = Column(Unicode, default="", nullable=False) # Follows the description of the fields automatically added by # SQLAlchemy. # tasks (list of Task objects) # announcements (list of Announcement objects) # participations (list of Participation objects) # Moreover, we have the following methods. # get_submissions (defined in __init__.py) # get_submission_results (defined in __init__.py) # get_user_tests (defined in __init__.py) # get_user_test_results (defined in __init__.py) # FIXME - Use SQL syntax def get_task(self, task_name): """Return the first task in the contest with the given name. task_name (string): the name of the task we are interested in. return (Task): the corresponding task object. raise (KeyError): if no tasks with the given name are found. """ for task in self.tasks: if task.name == task_name: return task raise KeyError("Task not found") # FIXME - Use SQL syntax def get_task_index(self, task_name): """Return the index of the first task in the contest with the given name. task_name (string): the name of the task we are interested in. return (int): the index of the corresponding task. raise (KeyError): if no tasks with the given name are found. """ for idx, task in enumerate(self.tasks): if task.name == task_name: return idx raise KeyError("Task not found") def enumerate_files(self, skip_submissions=False, skip_user_tests=False, skip_generated=False): """Enumerate all the files (by digest) referenced by the contest. return (set): a set of strings, the digests of the file referenced in the contest. """ # Here we cannot use yield, because we want to detect # duplicates files = set() for task in self.tasks: # Enumerate statements for file_ in itervalues(task.statements): files.add(file_.digest) # Enumerate attachments for file_ in itervalues(task.attachments): files.add(file_.digest) # Enumerate managers for dataset in task.datasets: for file_ in itervalues(dataset.managers): files.add(file_.digest) # Enumerate testcases for dataset in task.datasets: for testcase in itervalues(dataset.testcases): files.add(testcase.input) files.add(testcase.output) if not skip_submissions: for submission in self.get_submissions(): # Enumerate files for file_ in itervalues(submission.files): files.add(file_.digest) # Enumerate executables if not skip_generated: for sr in submission.results: for file_ in itervalues(sr.executables): files.add(file_.digest) if not skip_user_tests: for user_test in self.get_user_tests(): files.add(user_test.input) if not skip_generated: for ur in user_test.results: if ur.output is not None: files.add(ur.output) # Enumerate files for file_ in itervalues(user_test.files): files.add(file_.digest) # Enumerate managers for file_ in itervalues(user_test.managers): files.add(file_.digest) # Enumerate executables if not skip_generated: for ur in user_test.results: for file_ in itervalues(ur.executables): files.add(file_.digest) return files def phase(self, timestamp): """Return: -1 if contest isn't started yet at time timestamp, 0 if the contest is active at time timestamp, 1 if the contest has ended but analysis mode hasn't started yet 2 if the contest has ended and analysis mode is active 3 if the contest has ended and analysis mode is disabled or has ended timestamp (datetime): the time we are iterested in. return (int): contest phase as above. """ if timestamp < self.start: return -1 if timestamp <= self.stop: return 0 if self.analysis_enabled: if timestamp < self.analysis_start: return 1 elif timestamp <= self.analysis_stop: return 2 return 3 @staticmethod def _tokens_available(token_timestamps, token_mode, token_max_number, token_min_interval, token_gen_initial, token_gen_number, token_gen_interval, token_gen_max, start, timestamp): """Do exactly the same computation stated in tokens_available, but ensuring only a single set of token_* directive. Basically, tokens_available calls this twice for contest-wise and task-wise parameters and then assembles the result. token_timestamps ([datetime]): list of timestamps of used tokens, sorted in chronological order. token_* (int): the parameters we want to enforce. start (datetime): the time from which we start accumulating tokens. timestamp (datetime): the time relative to which make the calculation (has to be greater than or equal to all elements of token_timestamps). return ((int, datetime|None, datetime|None)): same as tokens_available. """ # If tokens are disabled there are no tokens available. if token_mode == "disabled": return (0, None, None) # If tokens are infinite there are always tokens available. if token_mode == "infinite": return (-1, None, None) # expiration is the timestamp at which all min_intervals for # the tokens played up to now have expired (i.e. the first # time at which we can play another token). If no tokens have # been played so far, this time is the start of the contest. expiration = \ token_timestamps[-1] + token_min_interval \ if len(token_timestamps) > 0 else start # If we already played the total number allowed, we don't have # anything left. played_tokens = len(token_timestamps) if token_max_number is not None and played_tokens >= token_max_number: return (0, None, None) # avail is the current number of available tokens. We are # going to rebuild all the history to know how many of them we # have now. # We start with the initial number (it's already capped to max # by the DB). token_gen_initial can be ignored after this. avail = token_gen_initial def generate_tokens(prev_time, next_time): """Compute how many tokens have been generated between the two timestamps. prev_time (datetime): timestamp of begin of interval. next_time (datetime): timestamp of end of interval. return (int): number of tokens generated. """ # How many generation times we passed from start to # the previous considered time? before_prev = ((prev_time - start).total_seconds() // token_gen_interval.total_seconds()) # And from start to the current considered time? before_next = ((next_time - start).total_seconds() // token_gen_interval.total_seconds()) # So... return token_gen_number * (before_next - before_prev) # Previous time we considered prev_token = start # Simulating! for token in token_timestamps: # Increment the number of tokens because of generation. avail += generate_tokens(prev_token, token) if token_gen_max is not None: avail = min(avail, token_gen_max) # Play the token. avail -= 1 prev_token = token avail += generate_tokens(prev_token, timestamp) if token_gen_max is not None: avail = min(avail, token_gen_max) # Compute the time in which the next token will be generated. next_gen_time = None if token_gen_number > 0 and \ (token_gen_max is None or avail < token_gen_max): next_gen_time = \ start + token_gen_interval * \ int((timestamp - start).total_seconds() / token_gen_interval.total_seconds() + 1) # If we have more tokens than how many we are allowed to play, # cap it, and note that no more will be generated. if token_max_number is not None: if avail >= token_max_number - played_tokens: avail = token_max_number - played_tokens next_gen_time = None return (avail, next_gen_time, expiration if expiration > timestamp else None) def tokens_available(self, participation, task, timestamp=None): """Return three pieces of data: [0] the number of available tokens for the user to play on the task (independently from the fact that (s)he can play it right now or not due to a min_interval wating for expiration); -1 means infinite tokens; [1] the next time in which a token will be generated (or None); from the user perspective, i.e.: if the user will do nothing, [1] is the first time in which their number of available tokens will be greater than [0]; [2] the time when the min_interval will expire, or None In particular, let r the return value of this method. We can sketch the code in the following way.: if r[0] > 0 or r[0] == -1: we have tokens if r[2] is None: we can play a token else: we must wait till r[2] to play a token if r[1] is not None: next one will be generated at r[1] else: no other tokens will be generated (max/total reached ?) else: we don't have tokens right now if r[1] is not None: next one will be generated at r[1] if r[2] is not None and r[2] > r[1]: but we must wait also till r[2] to play it else: no other tokens will be generated (max/total reached ?) Note also that this method assumes that all played tokens were regularly played, and that there are no tokens played in the future. Also, if r[0] == 0 and r[1] is None, then r[2] should be ignored. participation (Participation): the participation. task (Task): the task. timestamp (datetime|None): the time relative to which making the calculation, or None to use now. return ((int, datetime|None, datetime|None)): see description above. """ if timestamp is None: timestamp = make_datetime() # Take the list of the tokens already played (sorted by time). tokens = participation.get_tokens() token_timestamps_contest = sorted(token.timestamp for token in tokens) token_timestamps_task = sorted( token.timestamp for token in tokens if token.submission.task.name == task.name) # If the contest is USACO-style (i.e., the time for each user # start when they log in for the first time), then we start # accumulating tokens from the user starting time; otherwise, # from the start of the contest. start = self.start if self.per_user_time is not None: start = participation.starting_time # Compute separately for contest-wise and task-wise. res_contest = Contest._tokens_available( token_timestamps_contest, self.token_mode, self.token_max_number, self.token_min_interval, self.token_gen_initial, self.token_gen_number, self.token_gen_interval, self.token_gen_max, start, timestamp) res_task = Contest._tokens_available( token_timestamps_task, task.token_mode, task.token_max_number, task.token_min_interval, task.token_gen_initial, task.token_gen_number, task.token_gen_interval, task.token_gen_max, start, timestamp) # Merge the results. # First, the "expiration". if res_contest[2] is None: expiration = res_task[2] elif res_task[2] is None: expiration = res_contest[2] else: expiration = max(res_task[2], res_contest[2]) # Then, check if both are infinite if res_contest[0] == -1 and res_task[0] == -1: res = (-1, None, expiration) # Else, "combine" them appropriately. else: # Having infinite contest tokens, in this situation, is the # same as having a finite number that is strictly greater # than the task tokens. The same holds the other way, too. if res_contest[0] == -1: res_contest = (res_task[0] + 1, None, None) if res_task[0] == -1: res_task = (res_contest[0] + 1, None, None) # About next token generation time: we need to see when the # *minimum* between res_contest[0] and res_task[0] is # increased by one, so if there is an actual minimum we # need to consider only the next generation time for it. # Otherwise, if they are equal, we need both to generate an # additional token and we store the maximum between the two # next times of generation. if res_contest[0] < res_task[0]: # We have more task-tokens than contest-tokens. # We just need a contest-token to be generated. res = (res_contest[0], res_contest[1], expiration) elif res_task[0] < res_contest[0]: # We have more contest-tokens than task-tokens. # We just need a task-token to be generated. res = (res_task[0], res_task[1], expiration) else: # Darn, we need both! if res_contest[1] is None or res_task[1] is None: res = (res_task[0], None, expiration) else: res = (res_task[0], max(res_contest[1], res_task[1]), expiration) return res
class Workflow(Base): __tablename__ = "workflow" # Columns common to all DB models id_ = Column(UUID(as_uuid=True), primary_key=True, unique=True, nullable=False, default=uuid4) # Columns common to validatable Workflow components errors = Column(ARRAY(String)) is_valid = Column(Boolean, default=True) name = Column(String(80), nullable=False, unique=True) start = Column(UUID(as_uuid=True)) description = Column(String(), default="") tags = Column(JSON, default="") _walkoff_type = Column(String(80), default=__tablename__) permissions = Column(JSON) actions = relationship("Action", cascade="all, delete-orphan", passive_deletes=True) branches = relationship("Branch", cascade="all, delete-orphan", passive_deletes=True) conditions = relationship("Condition", cascade="all, delete-orphan", passive_deletes=True) transforms = relationship("Transform", cascade="all, delete-orphan", passive_deletes=True) workflow_variables = relationship("WorkflowVariable", cascade="save-update") triggers = relationship("Trigger", cascade="all, delete-orphan", passive_deletes=True) children = ['actions', 'conditions', 'transforms', 'triggers'] def __init__(self, **kwargs): super(Workflow, self).__init__(**kwargs) self._walkoff_type = self.__tablename__ self.validate() def validate(self): """Validates the object""" node_ids = { node.id_ for node in self.actions + self.conditions + self.transforms + self.triggers } wfv_ids = { workflow_var.id_ for workflow_var in self.workflow_variables } global_ids = set( id_ for id_, in current_app.running_context.execution_db.session.query( GlobalVariable.id_)) self.errors = [] if not self.start: self.errors.append("Workflows must have a starting action.") elif self.actions and self.start not in node_ids: self.errors.append( f"Workflow start ID '{self.start}' not found in nodes") self.branches[:] = [ branch for branch in self.branches if branch.source_id in node_ids and branch.destination_id in node_ids ] action: Action for action in self.actions: errors = [] action_api = current_app.running_context.execution_db.session.query( ActionApi).filter(ActionApi.location == f"{action.app_name}.{action.name}").first() if not action_api: self.errors.append( f"Action {action.app_name}.{action.name} does not exist") continue params = {} for p in action_api.parameters: params[p.name] = {"api": p} count = 0 for p in action.parameters: params.get(p.name, {})["wf"] = p if p.parallelized: count += 1 if count == 0 and action.parallelized: action.errors.append("No parallelized parameter set.") elif count == 1 and not action.parallelized: action.errors.append("Set action to be parallelized.") elif count > 1: action.errors.append("Too many parallelized parameters") for name, pair in params.items(): api = pair.get("api") wf = pair.get("wf") message = "" if not api: message = f"Parameter '{wf.name}' found in workflow but not in '{action.app_name}' API." elif not wf: if api.required: message = ( f"Parameter '{api.name}' not found in workflow but is required in " f"'{action.app_name}' API.") elif wf.variant == ParameterVariant.STATIC_VALUE: try: Draft4Validator(api.schema).validate(wf.value) except JSONSchemaValidationError as e: message = ( f"Parameter {wf.name} value {wf.value} is not valid under given schema " f"{api.schema}. JSONSchema output: {e}") elif wf.variant != ParameterVariant.STATIC_VALUE: wf_uuid = validate_uuid(wf.value) if not wf_uuid: message = ( f"Parameter '{wf.name}' is a reference but '{wf.value}' is not a valid " f"uuid4") elif wf.variant == ParameterVariant.ACTION_RESULT and wf_uuid not in node_ids: message = ( f"Parameter '{wf.name}' refers to action '{wf.value}' " f"which does not exist in this workflow.") elif wf.variant == ParameterVariant.WORKFLOW_VARIABLE and wf_uuid not in wfv_ids: message = ( f"Parameter '{wf.name}' refers to workflow variable '{wf.value}' " f"which does not exist in this workflow.") elif wf.variant == ParameterVariant.GLOBAL and wf_uuid not in global_ids: message = ( f"Parameter '{wf.name}' refers to global variable '{wf.value}' " f"which does not exist.") elif wf.parallelized and not api.parallelizable: action.errors.append( f"Parameter {wf.name} is marked parallelized in workflow, but is not " f"parallelizable in api") if message is not "": errors.append(message) action.errors = errors action.is_valid = action.is_valid_rec() self.is_valid = self.is_valid_rec() def is_valid_rec(self): if self.errors: return False for child in self.children: child = getattr(self, child, None) if isinstance(child, list): for actual_child in child: if not actual_child.is_valid_rec(): return False elif child is not None: if not child.is_valid_rec(): return False return True
class Event(SearchableTitleMixin, DescriptionMixin, LocationMixin, ProtectionManagersMixin, AttachedItemsMixin, AttachedNotesMixin, PersonLinkDataMixin, db.Model): """An Indico event This model contains the most basic information related to an event. Note that the ACL is currently only used for managers but not for view access! """ __tablename__ = 'events' disallowed_protection_modes = frozenset() inheriting_have_acl = True allow_access_key = True allow_no_access_contact = True location_backref_name = 'events' allow_location_inheritance = False possible_render_modes = {RenderMode.html} default_render_mode = RenderMode.html __logging_disabled = False ATTACHMENT_FOLDER_ID_COLUMN = 'event_id' @strict_classproperty @classmethod def __auto_table_args(cls): return ( db.Index('ix_events_start_dt_desc', cls.start_dt.desc()), db.Index('ix_events_end_dt_desc', cls.end_dt.desc()), db.Index('ix_events_not_deleted_category', cls.is_deleted, cls.category_id), db.Index('ix_events_not_deleted_category_dates', cls.is_deleted, cls.category_id, cls.start_dt, cls.end_dt), db.Index('ix_uq_events_url_shortcut', db.func.lower(cls.url_shortcut), unique=True, postgresql_where=db.text('NOT is_deleted')), db.CheckConstraint("category_id IS NOT NULL OR is_deleted", 'category_data_set'), db.CheckConstraint( "(logo IS NULL) = (logo_metadata::text = 'null')", 'valid_logo'), db.CheckConstraint( "(stylesheet IS NULL) = (stylesheet_metadata::text = 'null')", 'valid_stylesheet'), db.CheckConstraint("end_dt >= start_dt", 'valid_dates'), db.CheckConstraint("url_shortcut != ''", 'url_shortcut_not_empty'), db.CheckConstraint("cloned_from_id != id", 'not_cloned_from_self'), db.CheckConstraint('visibility IS NULL OR visibility >= 0', 'valid_visibility'), { 'schema': 'events' }) @declared_attr def __table_args__(cls): return auto_table_args(cls) #: The ID of the event id = db.Column(db.Integer, primary_key=True) #: If the event has been deleted is_deleted = db.Column(db.Boolean, nullable=False, default=False) #: If the event is locked (read-only mode) is_locked = db.Column(db.Boolean, nullable=False, default=False) #: The ID of the user who created the event creator_id = db.Column(db.Integer, db.ForeignKey('users.users.id'), nullable=False, index=True) #: The ID of immediate parent category of the event category_id = db.Column(db.Integer, db.ForeignKey('categories.categories.id'), nullable=True, index=True) #: The ID of the series this events belongs to series_id = db.Column(db.Integer, db.ForeignKey('events.series.id'), nullable=True, index=True) #: If this event was cloned, the id of the parent event cloned_from_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), nullable=True, index=True, ) #: The creation date of the event created_dt = db.Column(UTCDateTime, nullable=False, index=True, default=now_utc) #: The start date of the event start_dt = db.Column(UTCDateTime, nullable=False, index=True) #: The end date of the event end_dt = db.Column(UTCDateTime, nullable=False, index=True) #: The timezone of the event timezone = db.Column(db.String, nullable=False) #: The type of the event _type = db.Column('type', PyIntEnum(EventType), nullable=False) #: The visibility depth in category overviews visibility = db.Column(db.Integer, nullable=True, default=None) #: A list of tags/keywords for the event keywords = db.Column( ARRAY(db.String), nullable=False, default=[], ) #: The URL shortcut for the event url_shortcut = db.Column(db.String, nullable=True) #: The metadata of the logo (hash, size, filename, content_type) logo_metadata = db.Column(JSONB, nullable=False, default=lambda: None) #: The logo's raw image data logo = db.deferred(db.Column(db.LargeBinary, nullable=True)) #: The metadata of the stylesheet (hash, size, filename) stylesheet_metadata = db.Column(JSONB, nullable=False, default=lambda: None) #: The stylesheet's raw image data stylesheet = db.deferred(db.Column(db.Text, nullable=True)) #: The ID of the event's default page (conferences only) default_page_id = db.Column(db.Integer, db.ForeignKey('events.pages.id'), index=True, nullable=True) #: The last user-friendly registration ID _last_friendly_registration_id = db.deferred( db.Column('last_friendly_registration_id', db.Integer, nullable=False, default=0)) #: The last user-friendly contribution ID _last_friendly_contribution_id = db.deferred( db.Column('last_friendly_contribution_id', db.Integer, nullable=False, default=0)) #: The last user-friendly session ID _last_friendly_session_id = db.deferred( db.Column('last_friendly_session_id', db.Integer, nullable=False, default=0)) #: The category containing the event category = db.relationship( 'Category', lazy=True, backref=db.backref( 'events', primaryjoin= '(Category.id == Event.category_id) & ~Event.is_deleted', order_by=(start_dt, id), lazy=True)) #: The user who created the event creator = db.relationship('User', lazy=True, backref=db.backref('created_events', lazy='dynamic')) #: The event this one was cloned from cloned_from = db.relationship('Event', lazy=True, remote_side='Event.id', backref=db.backref('clones', lazy=True, order_by=start_dt)) #: The event's default page (conferences only) default_page = db.relationship( 'EventPage', lazy=True, foreign_keys=[default_page_id], post_update=True, # don't use this backref. we just need it so SA properly NULLs # this column when deleting the default page backref=db.backref('_default_page_of_event', lazy=True)) #: The ACL entries for the event acl_entries = db.relationship('EventPrincipal', backref='event', cascade='all, delete-orphan', collection_class=set) #: External references associated with this event references = db.relationship('EventReference', lazy=True, cascade='all, delete-orphan', backref=db.backref('event', lazy=True)) #: Persons associated with this event person_links = db.relationship('EventPersonLink', lazy=True, cascade='all, delete-orphan', backref=db.backref('event', lazy=True)) #: The series this event is part of series = db.relationship( 'EventSeries', lazy=True, backref=db.backref( 'events', lazy=True, order_by=(start_dt, id), primaryjoin= '(Event.series_id == EventSeries.id) & ~Event.is_deleted', )) #: Users who can review on all tracks global_abstract_reviewers = db.relationship( 'User', secondary='events.track_abstract_reviewers', collection_class=set, lazy=True, backref=db.backref('global_abstract_reviewer_for_events', collection_class=set, lazy=True)) #: Users who are conveners on all tracks global_conveners = db.relationship('User', secondary='events.track_conveners', collection_class=set, lazy=True, backref=db.backref( 'global_convener_for_events', collection_class=set, lazy=True)) # relationship backrefs: # - abstract_email_templates (AbstractEmailTemplate.event) # - abstract_review_questions (AbstractReviewQuestion.event) # - abstracts (Abstract.event) # - agreements (Agreement.event) # - all_attachment_folders (AttachmentFolder.event) # - all_legacy_attachment_folder_mappings (LegacyAttachmentFolderMapping.event) # - all_legacy_attachment_mappings (LegacyAttachmentMapping.event) # - all_notes (EventNote.event) # - all_room_reservation_links (ReservationLink.event) # - all_vc_room_associations (VCRoomEventAssociation.event) # - attachment_folders (AttachmentFolder.linked_event) # - clones (Event.cloned_from) # - contribution_fields (ContributionField.event) # - contribution_types (ContributionType.event) # - contributions (Contribution.event) # - custom_pages (EventPage.event) # - designer_templates (DesignerTemplate.event) # - editing_file_types (EditingFileType.event) # - editing_tags (EditingTag.event) # - layout_images (ImageFile.event) # - legacy_contribution_mappings (LegacyContributionMapping.event) # - legacy_mapping (LegacyEventMapping.event) # - legacy_session_block_mappings (LegacySessionBlockMapping.event) # - legacy_session_mappings (LegacySessionMapping.event) # - legacy_subcontribution_mappings (LegacySubContributionMapping.event) # - log_entries (EventLogEntry.event) # - menu_entries (MenuEntry.event) # - note (EventNote.linked_event) # - paper_competences (PaperCompetence.event) # - paper_review_questions (PaperReviewQuestion.event) # - paper_templates (PaperTemplate.event) # - persons (EventPerson.event) # - registration_forms (RegistrationForm.event) # - registrations (Registration.event) # - reminders (EventReminder.event) # - requests (Request.event) # - roles (EventRole.event) # - room_reservation_links (ReservationLink.linked_event) # - session_types (SessionType.event) # - sessions (Session.event) # - settings (EventSetting.event) # - settings_principals (EventSettingPrincipal.event) # - static_list_links (StaticListLink.event) # - static_sites (StaticSite.event) # - surveys (Survey.event) # - timetable_entries (TimetableEntry.event) # - track_groups (TrackGroup.event) # - tracks (Track.event) # - vc_room_associations (VCRoomEventAssociation.linked_event) start_dt_override = _EventSettingProperty(event_core_settings, 'start_dt_override') end_dt_override = _EventSettingProperty(event_core_settings, 'end_dt_override') organizer_info = _EventSettingProperty(event_core_settings, 'organizer_info') additional_info = _EventSettingProperty(event_core_settings, 'additional_info') contact_title = _EventSettingProperty(event_contact_settings, 'title') contact_emails = _EventSettingProperty(event_contact_settings, 'emails') contact_phones = _EventSettingProperty(event_contact_settings, 'phones') @classmethod def category_chain_overlaps(cls, category_ids): """ Create a filter that checks whether the event has any of the provided category ids in its parent chain. :param category_ids: A list of category ids or a single category id """ from indico.modules.categories import Category if not isinstance(category_ids, (list, tuple, set)): category_ids = [category_ids] cte = Category.get_tree_cte() return (cte.c.id == Event.category_id) & cte.c.path.overlap(category_ids) @classmethod def is_visible_in(cls, category_id): """ Create a filter that checks whether the event is visible in the specified category. """ cte = Category.get_visible_categories_cte(category_id) return (db.exists(db.select([1])).where( db.and_( cte.c.id == Event.category_id, db.or_(Event.visibility.is_(None), Event.visibility > cte.c.level)))) @property def event(self): """Convenience property so all event entities have it""" return self @property def has_logo(self): return self.logo_metadata is not None @property def has_stylesheet(self): return self.stylesheet_metadata is not None @property def theme(self): from indico.modules.events.layout import layout_settings, theme_settings theme = layout_settings.get(self, 'timetable_theme') if theme and theme in theme_settings.get_themes_for(self.type): return theme else: return theme_settings.defaults[self.type] @property def locator(self): return {'confId': self.id} @property def logo_url(self): return url_for('event_images.logo_display', self, slug=self.logo_metadata['hash']) @property def external_logo_url(self): return url_for('event_images.logo_display', self, slug=self.logo_metadata['hash'], _external=True) @property def participation_regform(self): return next( (form for form in self.registration_forms if form.is_participation), None) @property @memoize_request def published_registrations(self): from indico.modules.events.registration.util import get_published_registrations return get_published_registrations(self) @property def protection_parent(self): return self.category @property def start_dt_local(self): return self.start_dt.astimezone(self.tzinfo) @property def end_dt_local(self): return self.end_dt.astimezone(self.tzinfo) @property def start_dt_display(self): """ The 'displayed start dt', which is usually the actual start dt, but may be overridden for a conference. """ if self.type_ == EventType.conference and self.start_dt_override: return self.start_dt_override else: return self.start_dt @property def end_dt_display(self): """ The 'displayed end dt', which is usually the actual end dt, but may be overridden for a conference. """ if self.type_ == EventType.conference and self.end_dt_override: return self.end_dt_override else: return self.end_dt @property def type(self): # XXX: this should eventually be replaced with the type_ # property returning the enum - but there are too many places # right now that rely on the type string return self.type_.name @hybrid_property def type_(self): return self._type @type_.setter def type_(self, value): old_type = self._type self._type = value if old_type is not None and old_type != value: signals.event.type_changed.send(self, old_type=old_type) @property def url(self): return url_for('events.display', self) @property def external_url(self): return url_for('events.display', self, _external=True) @property def short_url(self): id_ = self.url_shortcut or self.id return url_for('events.shorturl', confId=id_) @property def short_external_url(self): id_ = self.url_shortcut or self.id return url_for('events.shorturl', confId=id_, _external=True) @property def tzinfo(self): return pytz.timezone(self.timezone) @property def display_tzinfo(self): """The tzinfo of the event as preferred by the current user""" return get_display_tz(self, as_timezone=True) @property @contextmanager def logging_disabled(self): """Temporarily disables event logging This is useful when performing actions e.g. during event creation or at other times where adding entries to the event log doesn't make sense. """ self.__logging_disabled = True try: yield finally: self.__logging_disabled = False @hybrid_method def happens_between(self, from_dt=None, to_dt=None): """Check whether the event takes place within two dates""" if from_dt is not None and to_dt is not None: # any event that takes place during the specified range return overlaps((self.start_dt, self.end_dt), (from_dt, to_dt), inclusive=True) elif from_dt is not None: # any event that starts on/after the specified date return self.start_dt >= from_dt elif to_dt is not None: # any event that ends on/before the specifed date return self.end_dt <= to_dt else: return True @happens_between.expression def happens_between(cls, from_dt=None, to_dt=None): if from_dt is not None and to_dt is not None: # any event that takes place during the specified range return db_dates_overlap(cls, 'start_dt', from_dt, 'end_dt', to_dt, inclusive=True) elif from_dt is not None: # any event that starts on/after the specified date return cls.start_dt >= from_dt elif to_dt is not None: # any event that ends on/before the specifed date return cls.end_dt <= to_dt else: return True @hybrid_method def starts_between(self, from_dt=None, to_dt=None): """Check whether the event starts within two dates""" if from_dt is not None and to_dt is not None: return from_dt <= self.start_dt <= to_dt elif from_dt is not None: return self.start_dt >= from_dt elif to_dt is not None: return self.start_dt <= to_dt else: return True @starts_between.expression def starts_between(cls, from_dt=None, to_dt=None): if from_dt is not None and to_dt is not None: return cls.start_dt.between(from_dt, to_dt) elif from_dt is not None: return cls.start_dt >= from_dt elif to_dt is not None: return cls.start_dt <= to_dt else: return True @hybrid_method def ends_after(self, dt): """Check whether the event ends on/after the specified date""" return self.end_dt >= dt if dt is not None else True @ends_after.expression def ends_after(cls, dt): return cls.end_dt >= dt if dt is not None else True @hybrid_property def duration(self): return self.end_dt - self.start_dt def can_lock(self, user): """Check whether the user can lock/unlock the event""" return user and (user.is_admin or user == self.creator or self.category.can_manage(user)) def get_relative_event_ids(self): """Get the first, last, previous and next event IDs. Any of those values may be ``None`` if there is no matching event or if it would be the current event. :return: A dict containing ``first``, ``last``, ``prev`` and ``next``. """ subquery = (select([ Event.id, db.func.first_value( Event.id).over(order_by=(Event.start_dt, Event.id)).label('first'), db.func.last_value(Event.id).over(order_by=(Event.start_dt, Event.id), range_=(None, None)).label('last'), db.func.lag(Event.id).over(order_by=(Event.start_dt, Event.id)).label('prev'), db.func.lead(Event.id).over(order_by=(Event.start_dt, Event.id)).label('next') ]).where((Event.category_id == self.category_id) & ~Event.is_deleted).alias()) rv = (db.session.query( subquery.c.first, subquery.c.last, subquery.c.prev, subquery.c.next).filter(subquery.c.id == self.id).one()._asdict()) if rv['first'] == self.id: rv['first'] = None if rv['last'] == self.id: rv['last'] = None return rv def get_verbose_title(self, show_speakers=False, show_series_pos=False): """Get the event title with some additional information :param show_speakers: Whether to prefix the title with the speakers of the event. :param show_series_pos: Whether to suffix the title with the position and total count in the event's series. """ title = self.title if show_speakers and self.person_links: speakers = ', '.join( sorted([pl.full_name for pl in self.person_links], key=unicode.lower)) title = '{}, "{}"'.format(speakers, title) if show_series_pos and self.series and self.series.show_sequence_in_title: title = '{} ({}/{})'.format(title, self.series_pos, self.series_count) return title def get_non_inheriting_objects(self): """Get a set of child objects that do not inherit protection""" return get_non_inheriting_objects(self) def get_contribution(self, id_): """Get a contribution of the event""" return get_related_object(self, 'contributions', {'id': id_}) def get_sorted_tracks(self): """Return tracks and track groups in the correct order""" track_groups = self.track_groups tracks = [track for track in self.tracks if not track.track_group] return sorted(tracks + track_groups, key=attrgetter('position')) def get_session(self, id_=None, friendly_id=None): """Get a session of the event""" if friendly_id is None and id_ is not None: criteria = {'id': id_} elif id_ is None and friendly_id is not None: criteria = {'friendly_id': friendly_id} else: raise ValueError('Exactly one kind of id must be specified') return get_related_object(self, 'sessions', criteria) def get_session_block(self, id_, scheduled_only=False): """Get a session block of the event""" from indico.modules.events.sessions.models.blocks import SessionBlock query = SessionBlock.query.filter( SessionBlock.id == id_, SessionBlock.session.has(event=self, is_deleted=False)) if scheduled_only: query.filter(SessionBlock.timetable_entry != None) # noqa return query.first() def get_allowed_sender_emails(self, include_current_user=True, include_creator=True, include_managers=True, include_contact=True, include_chairs=True, extra=None): """ Return the emails of people who can be used as senders (or rather Reply-to contacts) in emails sent from within an event. :param include_current_user: Whether to include the email of the currently logged-in user :param include_creator: Whether to include the email of the event creator :param include_managers: Whether to include the email of all event managers :param include_contact: Whether to include the "event contact" emails :param include_chairs: Whether to include the emails of event chairpersons (or lecture speakers) :param extra: An email address that is always included, even if it is not in any of the included lists. :return: An OrderedDict mapping emails to pretty names """ emails = {} # Contact/Support if include_contact: for email in self.contact_emails: emails[email] = self.contact_title # Current user if include_current_user and has_request_context() and session.user: emails[session.user.email] = session.user.full_name # Creator if include_creator: emails[self.creator.email] = self.creator.full_name # Managers if include_managers: emails.update((p.principal.email, p.principal.full_name) for p in self.acl_entries if p.type == PrincipalType.user and p.full_access) # Chairs if include_chairs: emails.update((pl.email, pl.full_name) for pl in self.person_links if pl.email) # Extra email (e.g. the current value in an object from the DB) if extra: emails.setdefault(extra, extra) # Sanitize and format emails emails = { to_unicode(email.strip().lower()): '{} <{}>'.format(to_unicode(name), to_unicode(email)) for email, name in emails.iteritems() if email and email.strip() } own_email = session.user.email if has_request_context( ) and session.user else None return OrderedDict( sorted(emails.items(), key=lambda x: (x[0] != own_email, x[1].lower()))) @memoize_request def has_feature(self, feature): """Checks if a feature is enabled for the event""" from indico.modules.events.features.util import is_feature_enabled return is_feature_enabled(self, feature) @property @memoize_request def scheduled_notes(self): from indico.modules.events.notes.util import get_scheduled_notes return get_scheduled_notes(self) def log(self, realm, kind, module, summary, user=None, type_='simple', data=None): """Creates a new log entry for the event :param realm: A value from :class:`.EventLogRealm` indicating the realm of the action. :param kind: A value from :class:`.EventLogKind` indicating the kind of the action that was performed. :param module: A human-friendly string describing the module related to the action. :param summary: A one-line summary describing the logged action. :param user: The user who performed the action. :param type_: The type of the log entry. This is used for custom rendering of the log message/data :param data: JSON-serializable data specific to the log type. :return: The newly created `EventLogEntry` In most cases the ``simple`` log type is fine. For this type, any items from data will be shown in the detailed view of the log entry. You may either use a dict (which will be sorted) alphabetically or a list of ``key, value`` pairs which will be displayed in the given order. """ if self.__logging_disabled: return entry = EventLogEntry(user=user, realm=realm, kind=kind, module=module, type=type_, summary=summary, data=data or {}) self.log_entries.append(entry) return entry def get_contribution_field(self, field_id): return next((v for v in self.contribution_fields if v.id == field_id), '') def move_start_dt(self, start_dt): """Set event start_dt and adjust its timetable entries""" diff = start_dt - self.start_dt for entry in self.timetable_entries.filter( TimetableEntry.parent_id.is_(None)): new_dt = entry.start_dt + diff entry.move(new_dt) self.start_dt = start_dt def iter_days(self, tzinfo=None): start_dt = self.start_dt end_dt = self.end_dt if tzinfo: start_dt = start_dt.astimezone(tzinfo) end_dt = end_dt.astimezone(tzinfo) duration = (end_dt.replace(hour=23, minute=59) - start_dt.replace(hour=0, minute=0)).days for offset in xrange(duration + 1): yield (start_dt + timedelta(days=offset)).date() def preload_all_acl_entries(self): db.m.Contribution.preload_acl_entries(self) db.m.Session.preload_acl_entries(self) def move(self, category): old_category = self.category self.category = category db.session.flush() signals.event.moved.send(self, old_parent=old_category) def delete(self, reason, user=None): from indico.modules.events import logger, EventLogRealm, EventLogKind self.is_deleted = True signals.event.deleted.send(self, user=user) db.session.flush() logger.info('Event %r deleted [%s]', self, reason) self.log(EventLogRealm.event, EventLogKind.negative, 'Event', 'Event deleted', user, data={'Reason': reason}) @property @memoize_request def cfa(self): from indico.modules.events.abstracts.models.call_for_abstracts import CallForAbstracts return CallForAbstracts(self) @property @memoize_request def cfp(self): from indico.modules.events.papers.models.call_for_papers import CallForPapers return CallForPapers(self) @property def reservations(self): return [link.reservation for link in self.all_room_reservation_links] @property def has_ended(self): return self.end_dt <= now_utc() @return_ascii def __repr__(self): return format_repr(self, 'id', 'start_dt', 'end_dt', is_deleted=False, is_locked=False, _text=text_to_repr(self.title, max_length=75))
class CacheUser(Base): __tablename__ = 'cache_users' user_id = FKCol('users.id', primary_key=True) paras = Encrypt(array=True) clean = Encrypt(array=True) vectors = Column(ARRAY(Float, dimensions=2))
def Encrypt(Col=Unicode, array=False, **args): enc = StringEncryptedType(Col, vars.FLASK_KEY, FernetEngine) if array: enc = ARRAY(enc) return Column(enc, **args)
class Post(Base): """ General Post """ __tablename__ = "posts" slug = db.Column(db.String, nullable=False, unique=True, default=_make_slug) comment = db.Column(db.String) public = db.Column(db.Boolean, nullable=False, default=False) location_lat = db.Column(db.Float) location_lon = db.Column(db.Float) location_name = db.Column(db.String) review = db.Column(db.Integer) link_name = db.Column(db.String) link_uri = db.Column(db.String) love_count = db.Column(db.Integer, default=0) media = db.Column(ARRAY(db.String, dimensions=1)) topics = db.Column(ARRAY(db.String, dimensions=1)) tweet_id = db.Column(db.String) user_id = db.Column(db.ForeignKey(User.id), nullable=False) site_id = db.Column(db.ForeignKey(Site.id), nullable=False) def _fetch_friendly_location(self): GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", None) # Check to make sure we have a key if GOOGLE_API_KEY is None: return # Check that this call is necessary if self.location_lat is None or self.location_lon is None: return loc_resp = requests.get( "https://maps.googleapis.com/maps/api/geocode/json?" "latlng={},{}&key={}".format( self.location_lat, self.location_lon, GOOGLE_API_KEY ) ).json() # Check for valid results if len(loc_resp["results"]) == 0: return address_comps = loc_resp["results"][0]["address_components"] locality_comps = list(filter( lambda ac: "locality" in ac["types"], address_comps )) if len(locality_comps) == 0: return self.location_name = locality_comps[0]["long_name"] def increment_love_count(self, factor=1): # Increments the love counter when an object is loved self.love_count += factor
class PostgresEngineSpec(PostgresBaseEngineSpec, BasicParametersMixin): engine = "postgresql" engine_aliases = {"postgres"} default_driver = "psycopg2" sqlalchemy_uri_placeholder = ( "postgresql://*****:*****@host:port/dbname[?key=value&key=value...]") # https://www.postgresql.org/docs/9.1/libpq-ssl.html#LIBQ-SSL-CERTIFICATES encryption_parameters = {"sslmode": "require"} max_column_name_length = 63 try_remove_schema_from_table_name = False column_type_mappings = ( ( re.compile(r"^double precision", re.IGNORECASE), DOUBLE_PRECISION(), GenericDataType.NUMERIC, ), ( re.compile(r"^array.*", re.IGNORECASE), lambda match: ARRAY(int(match[2])) if match[2] else String(), utils.GenericDataType.STRING, ), ( re.compile(r"^json.*", re.IGNORECASE), JSON(), utils.GenericDataType.STRING, ), ( re.compile(r"^enum.*", re.IGNORECASE), ENUM(), utils.GenericDataType.STRING, ), ) @classmethod def get_allow_cost_estimate(cls, extra: Dict[str, Any]) -> bool: return True @classmethod def estimate_statement_cost(cls, statement: str, cursor: Any) -> Dict[str, Any]: sql = f"EXPLAIN {statement}" cursor.execute(sql) result = cursor.fetchone()[0] match = re.search(r"cost=([\d\.]+)\.\.([\d\.]+)", result) if match: return { "Start-up cost": float(match.group(1)), "Total cost": float(match.group(2)), } return {} @classmethod def query_cost_formatter( cls, raw_cost: List[Dict[str, Any]]) -> List[Dict[str, str]]: return [{k: str(v) for k, v in row.items()} for row in raw_cost] @classmethod def get_table_names(cls, database: "Database", inspector: PGInspector, schema: Optional[str]) -> List[str]: """Need to consider foreign tables for PostgreSQL""" tables = inspector.get_table_names(schema) tables.extend(inspector.get_foreign_table_names(schema)) return sorted(tables) @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: tt = target_type.upper() if tt == utils.TemporalType.DATE: return f"TO_DATE('{dttm.date().isoformat()}', 'YYYY-MM-DD')" if "TIMESTAMP" in tt or "DATETIME" in tt: dttm_formatted = dttm.isoformat(sep=" ", timespec="microseconds") return f"""TO_TIMESTAMP('{dttm_formatted}', 'YYYY-MM-DD HH24:MI:SS.US')""" return None @staticmethod def get_extra_params(database: "Database") -> Dict[str, Any]: """ For Postgres, the path to a SSL certificate is placed in `connect_args`. :param database: database instance from which to extract extras :raises CertificateException: If certificate is not valid/unparseable :raises SupersetException: If database extra json payload is unparseable """ try: extra = json.loads(database.extra or "{}") except json.JSONDecodeError: raise SupersetException("Unable to parse database extras") if database.server_cert: engine_params = extra.get("engine_params", {}) connect_args = engine_params.get("connect_args", {}) connect_args["sslmode"] = connect_args.get("sslmode", "verify-full") path = utils.create_ssl_cert_file(database.server_cert) connect_args["sslrootcert"] = path engine_params["connect_args"] = connect_args extra["engine_params"] = engine_params return extra @classmethod def get_column_spec( # type: ignore cls, native_type: Optional[str], source: utils.ColumnTypeSource = utils.ColumnTypeSource.GET_TABLE, column_type_mappings: Tuple[Tuple[Pattern[str], Union[TypeEngine, Callable[[Match[str]], TypeEngine]], GenericDataType, ], ..., ] = column_type_mappings, ) -> Union[ColumnSpec, None]: column_spec = super().get_column_spec(native_type) if column_spec: return column_spec return super().get_column_spec( native_type, column_type_mappings=column_type_mappings)
def _add_acls(name, acls): acl = sql.cast(acls, ARRAY(VARCHAR)) query = (webservice.update().values( acl=sql.func.array_cat(webservice.c.acl, acl)).where( webservice.c.name == name)) op.execute(query)
async def contractor_list(request): # noqa: C901 (ignore complexity) sort_val = request.query.get('sort') sort_col = SORT_OPTIONS.get(sort_val, SORT_OPTIONS['last_updated']) pagination, offset = get_pagination(request, 100, 100) company = request['company'] options = company.options or {} fields = ( c.id, c.first_name, c.last_name, c.tag_line, c.primary_description, c.town, c.country, c.photo_hash, ) show_labels = options.get('show_labels') if show_labels: fields += (c.labels, ) show_stars = options.get('show_stars') if show_stars: fields += (c.review_rating, ) show_hours_reviewed = options.get('show_hours_reviewed') if show_hours_reviewed: fields += (c.review_duration, ) where = (c.company == company.id, ) subject_filter = get_arg(request, 'subject') qual_level_filter = get_arg(request, 'qual_level') select_from = None if subject_filter or qual_level_filter: select_from = sa_contractors.join(sa_con_skills) if subject_filter: select_from = select_from.join(sa_subjects) where += (sa_subjects.c.id == subject_filter, ) if qual_level_filter: select_from = select_from.join(sa_qual_levels) where += (sa_qual_levels.c.id == qual_level_filter, ) labels_filter = request.query.getall('label', []) labels_exclude_filter = request.query.getall('label_exclude', []) if labels_filter: where += (c.labels.contains(cast(labels_filter, ARRAY(String(255)))), ) if labels_exclude_filter: where += (or_( ~c.labels.overlap(cast(labels_exclude_filter, ARRAY(String(255)))), c.labels.is_(None)), ) location = await geocode(request) inc_distance = None if location: if location.get('error'): return json_response( request, location=location, results=[], count=0, ) max_distance = get_arg(request, 'max_distance', default=80_000) inc_distance = True request_loc = func.ll_to_earth(location['lat'], location['lng']) con_loc = func.ll_to_earth(c.latitude, c.longitude) distance_func = func.earth_distance(request_loc, con_loc) where += (distance_func < max_distance, ) fields += (distance_func.label('distance'), ) sort_col = distance_func distinct_cols = sort_col, c.id if sort_col == c.review_rating: sort_on = c.review_rating.desc().nullslast(), c.review_duration.desc( ).nullslast(), c.id distinct_cols = c.review_rating, c.review_duration, c.id elif sort_col == c.last_updated: sort_on = sort_col.desc(), c.id else: sort_on = sort_col.asc(), c.id q_iter = (select(fields).where(and_(*where)).order_by(*sort_on).distinct( *distinct_cols).offset(offset).limit(pagination)) q_count = select([sql_f.count(distinct(c.id))]).where(and_(*where)) if select_from is not None: q_iter = q_iter.select_from(select_from) q_count = q_count.select_from(select_from) results = [] name_display = company.name_display conn = await request['conn_manager'].get_connection() async for row in conn.execute(q_iter): name = _get_name(name_display, row) con = dict( id=row.id, url=route_url(request, 'contractor-get', company=company.public_key, id=row.id), link=f'{row.id}-{slugify(name)}', name=name, tag_line=row.tag_line, primary_description=row.primary_description, town=row.town, country=row.country, photo=_photo_url(request, row, True), distance=inc_distance and int(row.distance), ) if show_labels: con['labels'] = row.labels or [] if show_stars: con['review_rating'] = row.review_rating if show_hours_reviewed: con['review_duration'] = row.review_duration results.append(con) cur_count = await conn.execute(q_count) return json_response( request, location=location, results=results, count=(await cur_count.first())[0], )
class SubmissionResult(Base): """Class to store the evaluation results of a submission. """ # Possible statuses of a submission result. COMPILING and # EVALUATING do not necessarily imply we are going to schedule # compilation and evalution for these submission results: for # example, they might be for datasets not scheduled for # evaluation, or they might have passed the maximum number of # tries. If a submission result does not exists for a pair # (submission, dataset), its status can be implicitly assumed to # be COMPILING. COMPILING = 1 COMPILATION_FAILED = 2 EVALUATING = 3 SCORING = 4 SCORED = 5 __tablename__ = 'submission_results' __table_args__ = (UniqueConstraint('submission_id', 'dataset_id'), ) # Primary key is (submission_id, dataset_id). submission_id = Column(Integer, ForeignKey(Submission.id, onupdate="CASCADE", ondelete="CASCADE"), primary_key=True) submission = relationship(Submission, backref=backref("results", cascade="all, delete-orphan", passive_deletes=True)) dataset_id = Column(Integer, ForeignKey(Dataset.id, onupdate="CASCADE", ondelete="CASCADE"), primary_key=True) dataset = relationship(Dataset) # Now below follow the actual result fields. # Compilation outcome (can be None = yet to compile, "ok" = # compilation successful and we can evaluate, "fail" = # compilation unsuccessful, throw it away). compilation_outcome = Column(Enum("ok", "fail", name="compilation_outcome"), nullable=True) # The output from the sandbox (to allow localization the first item # of the list is a format string, possibly containing some "%s", # that will be filled in using the remaining items of the list). compilation_text = Column(ARRAY(String), nullable=False, default=[]) # Number of failures during compilation. compilation_tries = Column(Integer, nullable=False, default=0) # The compiler stdout and stderr. compilation_stdout = Column(Unicode, nullable=True) compilation_stderr = Column(Unicode, nullable=True) # Other information about the compilation. compilation_time = Column(Float, nullable=True) compilation_wall_clock_time = Column(Float, nullable=True) compilation_memory = Column(Integer, nullable=True) # Worker shard and sandbox where the compilation was performed. compilation_shard = Column(Integer, nullable=True) compilation_sandbox = Column(Unicode, nullable=True) # Evaluation outcome (can be None = yet to evaluate, "ok" = # evaluation successful). At any time, this should be equal to # evaluations != []. evaluation_outcome = Column(Enum("ok", name="evaluation_outcome"), nullable=True) # Number of failures during evaluation. evaluation_tries = Column(Integer, nullable=False, default=0) # Score as computed by ScoringService. Null means not yet scored. score = Column(Float, nullable=True) # Score details. It's a JSON-like structure containing information # that is given to ScoreType.get_html_details to generate an HTML # snippet that is shown on AWS and, if the user used a token, on # CWS to display the details of the submission. # For example, results for each testcases, subtask, etc. score_details = Column(JSONB, nullable=True) # The same as the last two fields, but only showing information # visible to the user (assuming they did not use a token on this # submission). public_score = Column(Float, nullable=True) public_score_details = Column(JSONB, nullable=True) # Ranking score details. It is a list of strings that are going to # be shown in a single row in the table of submission in RWS. ranking_score_details = Column(ARRAY(String), nullable=True) # Follows the description of the fields automatically added by # SQLAlchemy. # executables (dict of Executable objects indexed by filename) # evaluations (list of Evaluation objects) def get_status(self): """Return the status of this object. """ if not self.compiled(): return SubmissionResult.COMPILING elif self.compilation_failed(): return SubmissionResult.COMPILATION_FAILED elif not self.evaluated(): return SubmissionResult.EVALUATING elif not self.scored(): return SubmissionResult.SCORING else: return SubmissionResult.SCORED def get_evaluation(self, testcase): """Return the Evaluation of this SR on the given Testcase, if any testcase (Testcase): the testcase the returned evaluation will belong to. return (Evaluation|None): the (only!) evaluation of this submission result on the given testcase, or None if there isn't any. """ # Use IDs to avoid triggering a lazy-load query. assert self.dataset_id == testcase.dataset_id # XXX If self.evaluations is already loaded we can walk over it # and spare a query. # (We could use .one() and avoid a LIMIT but we would need to # catch a NoResultFound exception.) return self.sa_session.query(Evaluation)\ .filter(Evaluation.submission_result == self)\ .filter(Evaluation.testcase == testcase)\ .first() def get_max_evaluation_resources(self): """Return the maximum time and memory used by this result return (float|None, int|None): max used time in seconds and memory in bytes, or None if data is incomplete or unavailable. """ t, m = None, None if self.evaluated() and self.evaluations: for ev in self.evaluations: if ev.execution_time is not None \ and (t is None or t < ev.execution_time): t = ev.execution_time if ev.execution_memory is not None \ and (m is None or m < ev.execution_memory): m = ev.execution_memory return (t, m) def compiled(self): """Return whether the submission result has been compiled. return (bool): True if compiled, False otherwise. """ return self.compilation_outcome is not None @staticmethod def filter_compiled(): """Return a filtering expression for compiled submission results. """ return SubmissionResult.compilation_outcome != None # noqa def compilation_failed(self): """Return whether the submission result did not compile. return (bool): True if the compilation failed (in the sense that there is a problem in the user's source), False if not yet compiled or compilation was successful. """ return self.compilation_outcome == "fail" @staticmethod def filter_compilation_failed(): """Return a filtering expression for submission results failing compilation. """ return SubmissionResult.compilation_outcome == "fail" def compilation_succeeded(self): """Return whether the submission compiled. return (bool): True if the compilation succeeded (in the sense that an executable was created), False if not yet compiled or compilation was unsuccessful. """ return self.compilation_outcome == "ok" @staticmethod def filter_compilation_succeeded(): """Return a filtering expression for submission results passing compilation. """ return SubmissionResult.compilation_outcome == "ok" def evaluated(self): """Return whether the submission result has been evaluated. return (bool): True if evaluated, False otherwise. """ return self.evaluation_outcome is not None @staticmethod def filter_evaluated(): """Return a filtering lambda for evaluated submission results. """ return SubmissionResult.evaluation_outcome != None # noqa def needs_scoring(self): """Return whether the submission result needs to be scored. return (bool): True if in need of scoring, False otherwise. """ return (self.compilation_failed() or self.evaluated()) and \ not self.scored() def scored(self): """Return whether the submission result has been scored. return (bool): True if scored, False otherwise. """ return all( getattr(self, k) is not None for k in [ "score", "score_details", "public_score", "public_score_details", "ranking_score_details" ]) @staticmethod def filter_scored(): """Return a filtering lambda for scored submission results. """ return ((SubmissionResult.score != None) & (SubmissionResult.score_details != None) & (SubmissionResult.public_score != None) & (SubmissionResult.public_score_details != None) & (SubmissionResult.ranking_score_details != None)) # noqa def invalidate_compilation(self): """Blank all compilation and evaluation outcomes, and the score. """ self.invalidate_evaluation() self.compilation_outcome = None self.compilation_text = [] self.compilation_tries = 0 self.compilation_time = None self.compilation_wall_clock_time = None self.compilation_memory = None self.compilation_shard = None self.compilation_sandbox = None self.executables = {} def invalidate_evaluation(self): """Blank the evaluation outcomes and the score. """ self.invalidate_score() self.evaluation_outcome = None self.evaluation_tries = 0 self.evaluations = [] def invalidate_score(self): """Blank the score. """ self.score = None self.score_details = None self.public_score = None self.public_score_details = None self.ranking_score_details = None def set_compilation_outcome(self, success): """Set the compilation outcome based on the success. success (bool): if the compilation was successful. """ self.compilation_outcome = "ok" if success else "fail" def set_evaluation_outcome(self): """Set the evaluation outcome (always ok now). """ self.evaluation_outcome = "ok"
def test_compare_array_of_integer_text(self): self._compare_default_roundtrip(ARRAY(Integer), text("(ARRAY[]::integer[])"))
class Evaluation(Base): """Class to store information about the outcome of the evaluation of a submission against one testcase. """ __tablename__ = 'evaluations' __table_args__ = ( ForeignKeyConstraint( ('submission_id', 'dataset_id'), (SubmissionResult.submission_id, SubmissionResult.dataset_id), onupdate="CASCADE", ondelete="CASCADE"), UniqueConstraint('submission_id', 'dataset_id', 'testcase_id'), ) # Auto increment primary key. id = Column(Integer, primary_key=True) # Submission (id and object) owning the evaluation. submission_id = Column(Integer, ForeignKey(Submission.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) submission = relationship(Submission, viewonly=True) # Dataset (id and object) owning the evaluation. dataset_id = Column(Integer, ForeignKey(Dataset.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) dataset = relationship(Dataset, viewonly=True) # SubmissionResult owning the evaluation. submission_result = relationship(SubmissionResult, backref=backref( 'evaluations', cascade="all, delete-orphan", passive_deletes=True)) # Testcase (id and object) this evaluation was performed on. testcase_id = Column(Integer, ForeignKey(Testcase.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) testcase = relationship(Testcase) # String containing the outcome of the evaluation (usually 1.0, # ...) not necessary the points awarded, that will be computed by # the score type. outcome = Column(Unicode, nullable=True) # The output from the grader, usually "Correct", "Time limit", ... # (to allow localization the first item of the list is a format # string, possibly containing some "%s", that will be filled in # using the remaining items of the list). text = Column(ARRAY(String), nullable=False, default=[]) # Evaluation's time and wall-clock time, in seconds. execution_time = Column(Float, nullable=True) execution_wall_clock_time = Column(Float, nullable=True) # Memory used by the evaluation, in bytes. execution_memory = Column(Integer, nullable=True) # Worker shard and sandbox where the evaluation was performed. evaluation_shard = Column(Integer, nullable=True) evaluation_sandbox = Column(Unicode, nullable=True) @property def codename(self): """Return the codename of the testcase.""" return self.testcase.codename
def search_json() -> Response: playerid_string = request.args.get('player') uploaderid_string = request.args.get('uploader') log_string = request.args.get('title') map_string = request.args.get('map') format_string = request.args.get('format') limit_string = request.args.get('limit') offset_string = request.args.get('offset') if log_string and len(log_string) < 3: return jsonify(success=False, error='Title should be longer than 2 characters'), 400 if log_string: log_string = like_string(log_string) log_search_string = '%' + log_string + '%' if map_string and len(map_string) < 3: return jsonify( success=False, error='Map name should be longer than 2 characters'), 400 players = [] try: if playerid_string: players = [int(x.strip()) for x in playerid_string.split(",")] except Exception: return jsonify( success=False, error='Invalid format. Use comma-separated numerical player ids.' ), 400 if len(players) > 18: return jsonify(success=False, error="Maximum amount of players is 18."), 400 uploader_id = None if uploaderid_string: uploader_id = to_steam64(uploaderid_string) if uploader_id == 0: try: uploader_id = int(uploaderid_string) except ValueError: return jsonify(success=False, error='Uploader id is invalid'), 400 logs = Log.query if len(players) > 0: logs = logs.filter( Log.player_cache.op('@>')(cast(players, ARRAY(BigInteger)))) if uploader_id: logs = logs.filter(Log.uploader_id == uploader_id) if log_string: logs = logs.filter(Log.logname.ilike(log_search_string, escape=None)) if map_string: logs = logs.filter(Log.tf2map.like(f'{like_string(map_string)}%')) if format_string == "highlander": logs = logs.filter(Log.player_count > 17) elif format_string == "6v6": logs = logs.filter((Log.player_count < 18) & (Log.player_count > 10)) elif format_string == "4v4": logs = logs.filter((Log.player_count < 11) & (Log.player_count > 7)) elif format_string == "2v2": logs = logs.filter((Log.player_count < 8) & (Log.player_count > 3)) elif format_string == "1v1": logs = logs.filter(Log.player_count < 4) logs = logs.order_by(Log.id.desc()) limit = 1000 if limit_string: try: limit = int(limit_string) except ValueError: pass offset = 0 if offset_string: try: offset = int(offset_string) except ValueError: pass limit = max(0, limit) limit = min(10000, limit) offset = max(0, offset) offset = min(100000000, offset) logs_count = logs.count() logs_query = logs.limit(limit).offset(offset).all() logs = [{ 'id': log.id, 'title': log.logname, 'map': log.tf2map, 'date': log.date, 'views': log.views, 'players': log.player_count, } for log in logs_query] parameters = { "player": playerid_string, "uploader": uploaderid_string, "title": log_string, "map": map_string, "limit": limit, "offset": offset, } data = { "success": True, "results": len(logs), "total": logs_count, "parameters": parameters, "logs": logs, } return Response(json.dumps(data, indent=4), mimetype='application/json')
class Scheduled(db.Model): __tablename__ = 'scheduled' id = db.Column(db.Integer, nullable=False, primary_key=True) # noqa: A003 section_id = db.Column(db.Integer, nullable=False) term_id = db.Column(db.Integer, nullable=False) alerts = db.Column(ARRAY(email_template_type)) instructor_uids = db.Column(ARRAY(db.String(80)), nullable=False) kaltura_schedule_id = db.Column(db.Integer, nullable=False) meeting_days = db.Column(db.String, nullable=False) meeting_end_date = db.Column(db.DateTime, nullable=False) meeting_end_time = db.Column(db.String, nullable=False) meeting_start_date = db.Column(db.DateTime, nullable=False) meeting_start_time = db.Column(db.String, nullable=False) publish_type = db.Column(publish_type, nullable=False) recording_type = db.Column(recording_type, nullable=False) room_id = db.Column(db.Integer, db.ForeignKey('rooms.id'), nullable=False) created_at = db.Column(db.DateTime, nullable=False, default=datetime.now) deleted_at = db.Column(db.DateTime, nullable=True) def __init__( self, section_id, term_id, instructor_uids, kaltura_schedule_id, meeting_days, meeting_end_date, meeting_end_time, meeting_start_date, meeting_start_time, publish_type_, recording_type_, room_id, ): self.section_id = section_id self.term_id = term_id self.instructor_uids = instructor_uids self.kaltura_schedule_id = kaltura_schedule_id self.meeting_days = meeting_days self.meeting_end_date = meeting_end_date self.meeting_end_time = meeting_end_time self.meeting_start_date = meeting_start_date self.meeting_start_time = meeting_start_time self.publish_type = publish_type_ self.recording_type = recording_type_ self.room_id = room_id def __repr__(self): return f"""<Scheduled id={self.id}, section_id={self.section_id}, term_id={self.term_id}, alerts={', '.join(self.alerts or [])}, instructor_uids={', '.join(self.instructor_uids)}, kaltura_schedule_id={self.kaltura_schedule_id} meeting_days={self.meeting_days}, meeting_end_date={self.meeting_end_date}, meeting_end_time={self.meeting_end_time}, meeting_start_date={self.meeting_start_date}, meeting_start_time={self.meeting_start_time}, publish_type={self.publish_type}, recording_type={self.recording_type}, room_id={self.room_id}, created_at={self.created_at}> """ @classmethod def create( cls, section_id, term_id, instructor_uids, kaltura_schedule_id, meeting_days, meeting_end_date, meeting_end_time, meeting_start_date, meeting_start_time, publish_type_, recording_type_, room_id, ): scheduled = cls( instructor_uids=instructor_uids, kaltura_schedule_id=kaltura_schedule_id, meeting_days=meeting_days, meeting_end_date=meeting_end_date, meeting_end_time=meeting_end_time, meeting_start_date=meeting_start_date, meeting_start_time=meeting_start_time, publish_type_=publish_type_, recording_type_=recording_type_, room_id=room_id, section_id=section_id, term_id=term_id, ) db.session.add(scheduled) std_commit() return scheduled @classmethod def get_all_scheduled(cls, term_id): return cls.query.filter_by(term_id=term_id, deleted_at=None).all() @classmethod def get_scheduled_per_section_ids(cls, section_ids, term_id): criteria = and_(cls.section_id.in_(section_ids), cls.term_id == term_id, cls.deleted_at == None) # noqa: E711 return cls.query.filter(criteria).order_by(cls.created_at).all() @classmethod def get_scheduled(cls, section_id, term_id): return cls.query.filter_by(section_id=section_id, term_id=term_id, deleted_at=None).first() @classmethod def delete(cls, section_id, term_id): sql = """UPDATE scheduled SET deleted_at = now() WHERE term_id = :term_id AND section_id = :section_id AND deleted_at IS NULL""" db.session.execute( text(sql), { 'section_id': section_id, 'term_id': term_id, }, ) @classmethod def add_alert(cls, scheduled_id, template_type): row = cls.query.filter_by(id=scheduled_id).first() if row.alerts: row.alerts = list(set(row.alerts + [template_type])) else: row.alerts = [template_type] db.session.add(row) std_commit() def to_api_json(self, rooms_by_id=None): room_feed = None if self.room_id: if rooms_by_id: room_feed = rooms_by_id.get(self.room_id, None).to_api_json() else: room_feed = Room.get_room(self.room_id).to_api_json() formatted_days = format_days(self.meeting_days) return { 'id': self.id, 'alerts': self.alerts or [], 'createdAt': to_isoformat(self.created_at), 'instructorUids': self.instructor_uids, 'kalturaScheduleId': self.kaltura_schedule_id, 'meetingDays': formatted_days, 'meetingDaysNames': get_names_of_days(formatted_days), 'meetingEndDate': datetime.strftime(self.meeting_end_date, '%Y-%m-%d'), 'meetingEndTime': self.meeting_end_time, 'meetingEndTimeFormatted': format_time(self.meeting_end_time), 'meetingStartDate': datetime.strftime(self.meeting_start_date, '%Y-%m-%d'), 'meetingStartTime': self.meeting_start_time, 'meetingStartTimeFormatted': format_time(self.meeting_start_time), 'publishType': self.publish_type, 'publishTypeName': NAMES_PER_PUBLISH_TYPE[self.publish_type], 'recordingType': self.recording_type, 'recordingTypeName': NAMES_PER_RECORDING_TYPE[self.recording_type], 'room': room_feed, 'sectionId': self.section_id, 'termId': self.term_id, }
class Idf(db.Model): __tablename__ = 'idf' term = db.Column(db.String(1000), primary_key=True) docs = db.Column(MutableList.as_mutable(ARRAY(Integer())))
class Organisation(ModelBase): """ Establishes organisation object that resources can be associated with. """ __tablename__ = 'organisation' is_master = db.Column(db.Boolean, default=False, index=True) name = db.Column(db.String) external_auth_username = db.Column(db.String) valid_roles = db.Column(ARRAY(db.String, dimensions=1)) _external_auth_password = db.Column(db.String) default_lat = db.Column(db.Float()) default_lng = db.Column(db.Float()) _timezone = db.Column(db.String) _country_code = db.Column(db.String, nullable=False) _default_disbursement_wei = db.Column(db.Numeric(27), default=0) require_transfer_card = db.Column(db.Boolean, default=False) # TODO: Create a mixin so that both user and organisation can use the same definition here # This is the blockchain address used for transfer accounts, unless overridden primary_blockchain_address = db.Column(db.String) # This is the 'behind the scenes' blockchain address used for paying gas fees system_blockchain_address = db.Column(db.String) auto_approve_externally_created_users = db.Column(db.Boolean, default=False) users = db.relationship( "User", secondary=organisation_association_table, back_populates="organisations") token_id = db.Column(db.Integer, db.ForeignKey('token.id')) org_level_transfer_account_id = db.Column(db.Integer, db.ForeignKey('transfer_account.id', name="fk_org_level_account")) # We use this weird join pattern because SQLAlchemy # doesn't play nice when doing multiple joins of the same table over different declerative bases org_level_transfer_account = db.relationship( "TransferAccount", post_update=True, primaryjoin="Organisation.org_level_transfer_account_id==TransferAccount.id", uselist=False) @hybrid_property def timezone(self): return self._timezone @timezone.setter def timezone(self, val): if val is not None and val not in pendulum.timezones: raise Exception(f"{val} is not a valid timezone") self._timezone = val @hybrid_property def country_code(self): return self._country_code @country_code.setter def country_code(self, val): if val is not None: val = val.upper() if len(val) != 2: # will try handle 'AD: Andorra' val = val.split(':')[0] if val not in ISO_COUNTRIES: raise Exception(f"{val} is not a valid country code") self._country_code = val @property def default_disbursement(self): return Decimal((self._default_disbursement_wei or 0) / int(1e16)) @default_disbursement.setter def default_disbursement(self, val): if val is not None: self._default_disbursement_wei = int(val) * int(1e16) # TODO: This is a hack to get around the fact that org level TAs don't always show up. Super not ideal @property def queried_org_level_transfer_account(self): if self.org_level_transfer_account_id: return server.models.transfer_account.TransferAccount\ .query.execution_options(show_all=True).get(self.org_level_transfer_account_id) return None @hybrid_property def external_auth_password(self): return decrypt_string(self._external_auth_password) @external_auth_password.setter def external_auth_password(self, value): self._external_auth_password = encrypt_string(value) credit_transfers = db.relationship("CreditTransfer", secondary=organisation_association_table, back_populates="organisations") transfer_accounts = db.relationship('TransferAccount', backref='organisation', lazy=True, foreign_keys='TransferAccount.organisation_id') blockchain_addresses = db.relationship('BlockchainAddress', backref='organisation', lazy=True, foreign_keys='BlockchainAddress.organisation_id') email_whitelists = db.relationship('EmailWhitelist', backref='organisation', lazy=True, foreign_keys='EmailWhitelist.organisation_id') kyc_applications = db.relationship('KycApplication', backref='organisation', lazy=True, foreign_keys='KycApplication.organisation_id') attribute_maps = db.relationship('AttributeMap', backref='organisation', lazy=True, foreign_keys='AttributeMap.organisation_id') custom_welcome_message_key = db.Column(db.String) @staticmethod def master_organisation() -> "Organisation": return Organisation.query.filter_by(is_master=True).first() def _setup_org_transfer_account(self): transfer_account = server.models.transfer_account.TransferAccount( bound_entity=self, is_approved=True ) db.session.add(transfer_account) self.org_level_transfer_account = transfer_account # Back setup for delayed organisation transfer account instantiation for user in self.users: if AccessControl.has_any_tier(user.roles, 'ADMIN'): user.transfer_accounts.append(self.org_level_transfer_account) def bind_token(self, token): self.token = token self._setup_org_transfer_account() def __init__(self, token=None, is_master=False, valid_roles=None, **kwargs): super(Organisation, self).__init__(**kwargs) self.external_auth_username = '******'+ self.name.lower().replace(' ', '_') self.external_auth_password = secrets.token_hex(16) self.valid_roles = valid_roles or list(ASSIGNABLE_TIERS.keys()) if is_master: if Organisation.query.filter_by(is_master=True).first(): raise Exception("A master organisation already exists") self.is_master = True self.system_blockchain_address = bt.create_blockchain_wallet( private_key=current_app.config['MASTER_WALLET_PRIVATE_KEY'], wei_target_balance=0, wei_topup_threshold=0, ) self.primary_blockchain_address = self.system_blockchain_address or bt.create_blockchain_wallet() else: self.is_master = False self.system_blockchain_address = bt.create_blockchain_wallet( wei_target_balance=current_app.config['SYSTEM_WALLET_TARGET_BALANCE'], wei_topup_threshold=current_app.config['SYSTEM_WALLET_TOPUP_THRESHOLD'], ) self.primary_blockchain_address = bt.create_blockchain_wallet() if token: self.bind_token(token)
class Collection(db.Model, IdModel, SoftDeleteModel): """A set of documents and entities against which access control is enforced.""" # Category schema for collections. # TODO: add extra weight info. # TODO: should this be configurable? CATEGORIES = { 'news': 'News archives', 'leak': 'Leaks', 'land': 'Land registry', 'gazette': 'Gazettes', 'court': 'Court archives', 'company': 'Company registries', 'watchlist': 'Watchlists', 'investigation': 'Personal collections', 'sanctions': 'Sanctions lists', 'scrape': 'Scrapes', 'procurement': 'Procurement', 'grey': 'Grey literature', 'license': 'Licenses and concessions', 'regulatory': 'Regulatory filings', 'other': 'Other material' } label = db.Column(db.Unicode) summary = db.Column(db.Unicode, nullable=True) category = db.Column(db.Unicode, nullable=True) countries = db.Column(ARRAY(db.Unicode()), default=[]) languages = db.Column(ARRAY(db.Unicode()), default=[]) foreign_id = db.Column(db.Unicode, unique=True, nullable=False) # Managed collections are generated by API crawlers and thus UI users # shouldn't be allowed to add entities or documents to them. They also # don't use advanced entity extraction features for performance reasons. managed = db.Column(db.Boolean, default=False) creator_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) creator = db.relationship(Role) def update(self, data, creator=None): self.label = data.get('label', self.label) self.summary = data.get('summary', self.summary) self.category = data.get('category', self.category) self.managed = data.get('managed', False) self.countries = data.get('countries', []) if creator is None: creator = Role.by_id(data.get('creator_id')) self.creator = creator self.updated_at = datetime.utcnow() db.session.add(self) db.session.flush() if creator is not None: Permission.grant(self, creator, True, True) @property def roles(self): if not hasattr(self, '_roles'): q = db.session.query(Permission.role_id) q = q.filter(Permission.deleted_at == None) # noqa q = q.filter(Permission.collection_id == self.id) # noqa q = q.filter(Permission.read == True) # noqa self._roles = [e.role_id for e in q.all()] return self._roles @classmethod def by_foreign_id(cls, foreign_id, deleted=False): if foreign_id is None: return q = cls.all(deleted=deleted) return q.filter(cls.foreign_id == foreign_id).first() @classmethod def all_by_ids(cls, ids, deleted=False, authz=None): q = super(Collection, cls).all_by_ids(ids, deleted=deleted) if authz is not None and not authz.is_admin: q = q.join(Permission, cls.id == Permission.collection_id) q = q.filter(Permission.deleted_at == None) # noqa q = q.filter(Permission.read == True) # noqa q = q.filter(Permission.role_id.in_(authz.roles)) return q @classmethod def create(cls, data, role=None): foreign_id = data.get('foreign_id') or make_textid() collection = cls.by_foreign_id(foreign_id, deleted=True) if collection is None: collection = cls() collection.foreign_id = foreign_id collection.update(data, creator=role) collection.deleted_at = None return collection def __repr__(self): return '<Collection(%r, %r, %r)>' % \ (self.id, self.foreign_id, self.label)
class CompanyOrm(Base): __tablename__ = 'companies' id = Column(Integer, primary_key=True, nullable=False) public_key = Column(String(20), index=True, nullable=False, unique=True) name = Column(String(63), unique=True) domains = Column(ARRAY(String(255)))
class Event(DescriptionMixin, LocationMixin, ProtectionManagersMixin, AttachedItemsMixin, AttachedNotesMixin, PersonLinkDataMixin, db.Model): """An Indico event This model contains the most basic information related to an event. Note that the ACL is currently only used for managers but not for view access! """ __tablename__ = 'events' disallowed_protection_modes = frozenset() inheriting_have_acl = True location_backref_name = 'events' allow_location_inheritance = False description_wrapper = RichMarkup __logging_disabled = False ATTACHMENT_FOLDER_ID_COLUMN = 'event_id' @strict_classproperty @classmethod def __auto_table_args(cls): return ( db.Index(None, 'category_chain', postgresql_using='gin'), db.Index('ix_events_title_fts', db.func.to_tsvector('simple', cls.title), postgresql_using='gin'), db.Index('ix_events_start_dt_desc', cls.start_dt.desc()), db.Index('ix_events_end_dt_desc', cls.end_dt.desc()), db.CheckConstraint( "(category_id IS NOT NULL AND category_chain IS NOT NULL) OR is_deleted", 'category_data_set'), db.CheckConstraint("category_id = category_chain[1]", 'category_id_matches_chain'), db.CheckConstraint( "category_chain[array_length(category_chain, 1)] = 0", 'category_chain_has_root'), db.CheckConstraint( "(logo IS NULL) = (logo_metadata::text = 'null')", 'valid_logo'), db.CheckConstraint( "(stylesheet IS NULL) = (stylesheet_metadata::text = 'null')", 'valid_stylesheet'), db.CheckConstraint("end_dt >= start_dt", 'valid_dates'), db.CheckConstraint("title != ''", 'valid_title'), db.CheckConstraint("cloned_from_id != id", 'not_cloned_from_self'), { 'schema': 'events' }) @declared_attr def __table_args__(cls): return auto_table_args(cls) #: The ID of the event id = db.Column(db.Integer, primary_key=True) #: If the event has been deleted is_deleted = db.Column(db.Boolean, nullable=False, default=False) #: The ID of the user who created the event creator_id = db.Column(db.Integer, db.ForeignKey('users.users.id'), nullable=False, index=True) #: The ID of immediate parent category of the event category_id = db.Column(db.Integer, nullable=True, index=True) #: The category chain of the event (from immediate parent to root) category_chain = db.Column(ARRAY(db.Integer), nullable=True) #: If this event was cloned, the id of the parent event cloned_from_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), nullable=True, index=True, ) #: The start date of the event start_dt = db.Column(UTCDateTime, nullable=False, index=True) #: The end date of the event end_dt = db.Column(UTCDateTime, nullable=False, index=True) #: The timezone of the event timezone = db.Column(db.String, nullable=False) #: The title of the event title = db.Column(db.String, nullable=False) #: The metadata of the logo (hash, size, filename, content_type) logo_metadata = db.Column(JSON, nullable=False, default=None) #: The logo's raw image data logo = db.deferred(db.Column(db.LargeBinary, nullable=True)) #: The metadata of the stylesheet (hash, size, filename) stylesheet_metadata = db.Column(JSON, nullable=False, default=None) #: The stylesheet's raw image data stylesheet = db.deferred(db.Column(db.Text, nullable=True)) #: The ID of the event's default page (conferences only) default_page_id = db.Column(db.Integer, db.ForeignKey('events.pages.id'), index=True, nullable=True) #: The last user-friendly registration ID _last_friendly_registration_id = db.deferred( db.Column('last_friendly_registration_id', db.Integer, nullable=False, default=0)) #: The last user-friendly contribution ID _last_friendly_contribution_id = db.deferred( db.Column('last_friendly_contribution_id', db.Integer, nullable=False, default=0)) #: The last user-friendly session ID _last_friendly_session_id = db.deferred( db.Column('last_friendly_session_id', db.Integer, nullable=False, default=0)) #: The user who created the event creator = db.relationship('User', lazy=True, backref=db.backref('created_events', lazy='dynamic')) #: The event this one was cloned from cloned_from = db.relationship('Event', lazy=True, remote_side='Event.id', backref=db.backref('clones', lazy=True, order_by=start_dt)) #: The event's default page (conferences only) default_page = db.relationship( 'EventPage', lazy=True, foreign_keys=[default_page_id], # don't use this backref. we just need it so SA properly NULLs # this column when deleting the default page backref=db.backref('_default_page_of_event', lazy=True)) #: The ACL entries for the event acl_entries = db.relationship('EventPrincipal', backref='event_new', cascade='all, delete-orphan', collection_class=set) #: External references associated with this event references = db.relationship('EventReference', lazy=True, cascade='all, delete-orphan', backref=db.backref('event_new', lazy=True)) #: Persons associated with this event person_links = db.relationship('EventPersonLink', lazy=True, cascade='all, delete-orphan', backref=db.backref('event', lazy=True)) # relationship backrefs: # - abstracts (Abstract.event_new) # - agreements (Agreement.event_new) # - all_attachment_folders (AttachmentFolder.event_new) # - all_legacy_attachment_folder_mappings (LegacyAttachmentFolderMapping.event_new) # - all_legacy_attachment_mappings (LegacyAttachmentMapping.event_new) # - all_notes (EventNote.event_new) # - all_vc_room_associations (VCRoomEventAssociation.event_new) # - attachment_folders (AttachmentFolder.linked_event) # - clones (Event.cloned_from) # - contribution_fields (ContributionField.event_new) # - contribution_types (ContributionType.event_new) # - contributions (Contribution.event_new) # - custom_pages (EventPage.event_new) # - layout_images (ImageFile.event_new) # - legacy_contribution_mappings (LegacyContributionMapping.event_new) # - legacy_mapping (LegacyEventMapping.event_new) # - legacy_session_block_mappings (LegacySessionBlockMapping.event_new) # - legacy_session_mappings (LegacySessionMapping.event_new) # - legacy_subcontribution_mappings (LegacySubContributionMapping.event_new) # - log_entries (EventLogEntry.event_new) # - menu_entries (MenuEntry.event_new) # - note (EventNote.linked_event) # - persons (EventPerson.event_new) # - registration_forms (RegistrationForm.event_new) # - registrations (Registration.event_new) # - reminders (EventReminder.event_new) # - report_links (ReportLink.event_new) # - requests (Request.event_new) # - reservations (Reservation.event_new) # - sessions (Session.event_new) # - settings (EventSetting.event_new) # - settings_principals (EventSettingPrincipal.event_new) # - static_sites (StaticSite.event_new) # - surveys (Survey.event_new) # - timetable_entries (TimetableEntry.event_new) # - vc_room_associations (VCRoomEventAssociation.linked_event) @property @memoize_request def as_legacy(self): """Returns a legacy `Conference` object (ZODB)""" from MaKaC.conference import ConferenceHolder return ConferenceHolder().getById(self.id, True) @property def event_new(self): """Convenience property so all event entities have it""" return self @property def category(self): from MaKaC.conference import CategoryManager return CategoryManager().getById(str(self.category_id), True) if self.category_id else None @property def has_logo(self): return self.logo_metadata is not None @property def has_stylesheet(self): return self.stylesheet_metadata is not None @property def theme(self): from indico.modules.events.layout import layout_settings, theme_settings return (layout_settings.get(self, 'timetable_theme') or theme_settings.defaults[self.type]) @property def is_protected(self): return self.as_legacy.isProtected() @property def locator(self): return {'confId': self.id} @property def logo_url(self): return url_for('event_images.logo_display', self, slug=self.logo_metadata['hash']) @property def participation_regform(self): return self.registration_forms.filter_by(is_participation=True, is_deleted=False).first() @property @memoize_request def published_registrations(self): from indico.modules.events.registration.util import get_published_registrations return get_published_registrations(self) @property def protection_parent(self): return self.as_legacy.getOwner() @property def start_dt_local(self): return self.start_dt.astimezone(self.tzinfo) @property def end_dt_local(self): return self.end_dt.astimezone(self.tzinfo) @property def type(self): event_type = self.as_legacy.getType() if event_type == 'simple_event': event_type = 'lecture' return event_type @property def tzinfo(self): return pytz.timezone(self.timezone) @property def display_tzinfo(self): """The tzinfo of the event as preferred by the current user""" from MaKaC.common.timezoneUtils import DisplayTZ return DisplayTZ(conf=self).getDisplayTZ(as_timezone=True) @property @contextmanager def logging_disabled(self): """Temporarily disables event logging This is useful when performing actions e.g. during event creation or at other times where adding entries to the event log doesn't make sense. """ self.__logging_disabled = True try: yield finally: self.__logging_disabled = False @classmethod def title_matches(cls, search_string, exact=False): """Check whether the title matches a search string. To be used in a SQLAlchemy `filter` call. :param search_string: A string to search for :param exact: Whether to search for the exact string """ crit = db.func.to_tsvector('simple', cls.title).match( preprocess_ts_string(search_string), postgresql_regconfig='simple') if exact: crit = crit & cls.title.ilike('%{}%'.format( escape_like(search_string))) return crit @hybrid_method def happens_between(self, from_dt=None, to_dt=None): """Check whether the event takes place within two dates""" if from_dt is not None and to_dt is not None: # any event that takes place during the specified range return overlaps((self.start_dt, self.end_dt), (from_dt, to_dt), inclusive=True) elif from_dt is not None: # any event that starts on/after the specified date return self.start_dt >= from_dt elif to_dt is not None: # any event that ends on/before the specifed date return self.end_dt <= to_dt else: return True @happens_between.expression def happens_between(cls, from_dt=None, to_dt=None): if from_dt is not None and to_dt is not None: # any event that takes place during the specified range return db_dates_overlap(cls, 'start_dt', from_dt, 'end_dt', to_dt, inclusive=True) elif from_dt is not None: # any event that starts on/after the specified date return cls.start_dt >= from_dt elif to_dt is not None: # any event that ends on/before the specifed date return cls.end_dt <= to_dt else: return True @hybrid_method def starts_between(self, from_dt=None, to_dt=None): """Check whether the event starts within two dates""" if from_dt is not None and to_dt is not None: return from_dt <= self.start_dt <= to_dt elif from_dt is not None: return self.start_dt >= from_dt elif to_dt is not None: return self.start_dt <= to_dt else: return True @starts_between.expression def starts_between(cls, from_dt=None, to_dt=None): if from_dt is not None and to_dt is not None: return cls.start_dt.between(from_dt, to_dt) elif from_dt is not None: return cls.start_dt >= from_dt elif to_dt is not None: return cls.start_dt <= to_dt else: return True @hybrid_property def duration(self): return self.end_dt - self.start_dt def can_access(self, user, allow_admin=True): if not allow_admin: raise NotImplementedError( 'can_access(..., allow_admin=False) is unsupported until ACLs are migrated' ) from MaKaC.accessControl import AccessWrapper return self.as_legacy.canAccess( AccessWrapper(user.as_avatar if user else None)) def can_manage(self, user, role=None, allow_key=False, *args, **kwargs): # XXX: Remove this method once modification keys are gone! return (super(Event, self).can_manage(user, role, *args, **kwargs) or bool(allow_key and user and self.as_legacy.canKeyModify())) def get_non_inheriting_objects(self): """Get a set of child objects that do not inherit protection""" return get_non_inheriting_objects(self) def get_contribution(self, id_): """Get a contribution of the event""" return get_related_object(self, 'contributions', {'id': id_}) def get_session(self, id_=None, friendly_id=None): """Get a session of the event""" if friendly_id is None and id_ is not None: criteria = {'id': id_} elif id_ is None and friendly_id is not None: criteria = {'friendly_id': friendly_id} else: raise ValueError('Exactly one kind of id must be specified') return get_related_object(self, 'sessions', criteria) def get_session_block(self, id_, scheduled_only=False): """Get a session block of the event""" from indico.modules.events.sessions.models.blocks import SessionBlock query = SessionBlock.query.filter( SessionBlock.id == id_, SessionBlock.session.has(event_new=self.event_new, is_deleted=False)) if scheduled_only: query.filter(SessionBlock.timetable_entry != None) # noqa return query.first() @memoize_request def has_feature(self, feature): """Checks if a feature is enabled for the event""" from indico.modules.events.features.util import is_feature_enabled return is_feature_enabled(self, feature) @property @memoize_request def scheduled_notes(self): from indico.modules.events.notes.util import get_scheduled_notes return get_scheduled_notes(self) def log(self, realm, kind, module, summary, user=None, type_='simple', data=None): """Creates a new log entry for the event :param realm: A value from :class:`.EventLogRealm` indicating the realm of the action. :param kind: A value from :class:`.EventLogKind` indicating the kind of the action that was performed. :param module: A human-friendly string describing the module related to the action. :param summary: A one-line summary describing the logged action. :param user: The user who performed the action. :param type_: The type of the log entry. This is used for custom rendering of the log message/data :param data: JSON-serializable data specific to the log type. In most cases the ``simple`` log type is fine. For this type, any items from data will be shown in the detailed view of the log entry. You may either use a dict (which will be sorted) alphabetically or a list of ``key, value`` pairs which will be displayed in the given order. """ if self.__logging_disabled: return entry = EventLogEntry(user=user, realm=realm, kind=kind, module=module, type=type_, summary=summary, data=data or {}) self.log_entries.append(entry) # XXX: Delete once event ACLs are in the new DB def get_access_list(self, skip_managers=False, skip_self_acl=False): return { x.as_new for x in self.as_legacy.getRecursiveAllowedToAccessList( skip_managers, skip_self_acl) } def get_contribution_field(self, field_id): return next((v for v in self.contribution_fields if v.id == field_id), '') def move_start_dt(self, start_dt): """Set event start_dt and adjust its timetable entries""" diff = start_dt - self.start_dt for entry in self.timetable_entries.filter( TimetableEntry.parent_id.is_(None)): new_dt = entry.start_dt + diff entry.move(new_dt) self.start_dt = start_dt def preload_all_acl_entries(self): db.m.Contribution.preload_acl_entries(self) db.m.Session.preload_acl_entries(self) @return_ascii def __repr__(self): # TODO: add self.protection_repr once we use it return format_repr(self, 'id', 'start_dt', 'end_dt', is_deleted=False, _text=text_to_repr(self.title, max_length=75)) # TODO: Remove the next block of code once event acls (read access) are migrated def _fail(self, *args, **kwargs): raise NotImplementedError( 'These properties are not usable until event ACLs are in the new DB' ) is_public = classproperty(classmethod(_fail)) is_inheriting = classproperty(classmethod(_fail)) is_self_protected = classproperty(classmethod(_fail)) protection_repr = property(_fail) del _fail
class Cinema(DeclarativeBase): __tablename__ = "cinema" id = Column(Integer, primary_key=True) # name may differ depends on crawled site, so we collect all names # in order to make query easier. names = Column('names', ARRAY(String), nullable=False) county = Column('county', String, nullable=False) company = Column('company', String) site = Column('site', String) # screens are handled as same as names screens = Column('screens', JSONB, nullable=False) # as screens may contain multiple versions of single screen, # we use next two column to help identify a cinema screen_count = Column('screen_count', Integer, nullable=False) total_seats = Column('total_seats', Integer, nullable=False) # site that data mainly crawled from source = Column('source', String, nullable=False) @staticmethod def get_cinema_if_exist(item): """ Get cinema if it already exists in database, otherwise return None As data crawled from those sites often differs between each other, we have several rules to use to find exist cinema: - first of all, same "county", then: - have "site", same "site"; - have name in "names", same name in "names"; Some cinemas may be treated as different cinemas when crawled from different site but we will leave them there now. """ query = Session.query(Cinema).filter( and_( Cinema.county == item.county, or_( and_(item.site is not None, Cinema.site == item.site), and_(item.names is not None, Cinema.names.overlap(cast(item.names, ARRAY(String))))))) result = query.first() return result @staticmethod def get_by_name(cinema_name): query = Session.query(Cinema).filter(Cinema.names.any(cinema_name)) cinema = query.first() return cinema @staticmethod def get_screen_seat_count(cinema_name, cinema_site, screen): query = Session.query(Cinema).filter( or_( and_(cinema_site is not None, Cinema.site == cinema_site), and_(cinema_name is not None, Cinema.names.overlap(cast([cinema_name], ARRAY(String)))))) cinema = query.first() if not cinema: return 0 screens = cinema.screens # get screen data from cinema data in database. # this is a bit difficult as there is no standard screen name exist. return ScreenUtils.get_seat_count(screens, cinema_name, screen) class MergeMethod(Enum): info_only = 1 # update names and screens only update_count = 2 # also update screen count and total seat number replace = 3 # replace all data def merge(self, new_cinema, merge_method): """ merge data from new crawled cinema data depends on strategy """ if merge_method == self.MergeMethod.info_only: self.names.extend(x for x in new_cinema.names if x not in self.names) new_cinema.screens.update(self.screens) self.screens = new_cinema.screens elif merge_method == self.MergeMethod.update_count: self.names.extend(x for x in new_cinema.names if x not in self.names) for new_screen in new_cinema.screens: if new_screen not in self.screens: curr_seat_count = int(new_cinema.screens[new_screen]) self.screens[new_screen] = curr_seat_count self.screen_count += 1 self.total_seats += curr_seat_count else: new_cinema.id = self.id self = new_cinema
class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) ### Fields ### name = Column(Unicode(20), nullable=False, index=True) name_en = Column(String(80), index=True) name_cn = Column(Unicode(20), index=True) gender = Column(Enum('m', 'f', name='enum_gender'), index=True) birthday = Column(CHAR(8), index=True) education = deferred(Column(ARRAY(Unicode(60))), group='profile') education_id = deferred(Column(ARRAY(String(20))), group='profile') address = deferred(Column(ARRAY(Unicode(20))), group='profile') address_id = deferred(Column(ARRAY(String(16))), group='profile') image = Column(String(1024)) email = deferred(Column(Text), group='extra') twitter = deferred(Column(String(20)), group='extra') facebook = deferred(Column(String(80)), group='extra') blog = deferred(Column(String(255)), group='extra') homepage = deferred(Column(String(255)), group='extra') wiki = deferred(Column(Text), group='extra') extra_vars = deferred(Column(Text), group='extra') ### Relations ### candidacies = relationship('Candidacy', order_by='desc(Candidacy.assembly_id)', backref='person') bills_ = relationship('Bill', secondary=cosponsorship, order_by='desc(Bill.proposed_date)', backref='cosponsors') withdrawed_bills = relationship('Bill', secondary=bill_withdrawal, backref='withdrawers') parties = relationship('Party', secondary=PartyAffiliation.__table__, order_by='desc(PartyAffiliation.date)', backref=backref('members', lazy='dynamic'), lazy='dynamic') @hybrid_property def birthday_year(self): return int(self.birthday[:4]) @birthday_year.expression def birthday_year(cls): return func.substr(cls.birthday, 1, 4) @property def birthday_month(self): return int(self.birthday[4:6]) or 1 @property def birthday_day(self): return int(self.birthday[6:8]) or 1 @property def birthday_date(self): return date(self.birthday_year, self.birthday_month, self.birthday_day) @property def birthday_formatted(self): return format_date(self.birthday_date) @property def age(self): return date.today().year + 1 - self.birthday_year @property def ages(self): if self.age < 30: return 30 elif self.age >= 70: return 70 else: return (self.age / 10) * 10 @property def cur_party(self): return self.parties.first()
nullable=True, ), sa.Column("creation_date", sa.DateTime(), nullable=False, server_default=func.now()), sa.Column( "last_change_date", sa.DateTime(), nullable=False, server_default=func.now(), onupdate=func.now(), # this will auto-update on modification ), sa.Column("access_rights", JSONB, nullable=False, server_default=sa.text("'{}'::jsonb")), sa.Column("workbench", sa.JSON, nullable=False), sa.Column( "classifiers", ARRAY(sa.String, dimensions=1), nullable=False, server_default="{}", ), sa.Column("dev", JSONB, nullable=False, server_default=sa.text("'{}'::jsonb")), sa.Column("published", sa.Boolean, nullable=False, default=False), )
def create_sql_filter(self, data_list): return RegistrationData.data.has_any( db.func.cast(data_list, ARRAY(db.String)))
class CohortFilter(Base): __tablename__ = 'cohort_filters' id = db.Column(db.Integer, nullable=False, primary_key=True) # noqa: A003 name = db.Column(db.String(255), nullable=False) filter_criteria = db.Column(JSONB, nullable=False) # Fetching a large array literal from Postgres can be expensive. We defer until invoking code demands it. sids = deferred(db.Column(ARRAY(db.String(80)))) student_count = db.Column(db.Integer) alert_count = db.Column(db.Integer) owners = db.relationship('AuthorizedUser', secondary=cohort_filter_owners, back_populates='cohort_filters') def __init__(self, name, filter_criteria): self.name = name self.filter_criteria = filter_criteria def __repr__(self): return f"""<CohortFilter {self.id}, name={self.name}, owners={self.owners}, filter_criteria={self.filter_criteria}, sids={self.sids}, student_count={self.student_count}, alert_count={self.alert_count}, updated_at={self.updated_at}, created_at={self.created_at}>""" @classmethod def create(cls, uid, name, filter_criteria, **kwargs): if all(not isinstance(value, bool) and not value for value in filter_criteria.values()): raise InternalServerError( 'Cohort creation requires at least one filter specification.') cohort = cls(name=name, filter_criteria=filter_criteria) user = AuthorizedUser.find_by_uid(uid) user.cohort_filters.append(cohort) db.session.flush() std_commit() return cohort.to_api_json(**kwargs) @classmethod def update(cls, cohort_id, name=None, filter_criteria=None, alert_count=None, **kwargs): cohort = cls.query.filter_by(id=cohort_id).first() if name: cohort.name = name if filter_criteria: cohort.filter_criteria = filter_criteria cohort.sids = None cohort.student_count = None if alert_count is not None: cohort.alert_count = alert_count else: # Alert count will be refreshed cohort.update_alert_count(None) std_commit() return cohort.to_api_json(**kwargs) @classmethod def get_sids(cls, cohort_id): query = db.session.query(cls).options(undefer('sids')) cohort = query.filter_by(id=cohort_id).first() return cohort and cohort.sids def update_sids_and_student_count(self, sids, student_count): self.sids = sids self.student_count = student_count std_commit() return self def update_alert_count(self, count): self.alert_count = count std_commit() return self @classmethod def share(cls, cohort_id, user_id): cohort = cls.query.filter_by(id=cohort_id).first() user = AuthorizedUser.find_by_uid(user_id) user.cohort_filters.append(cohort) std_commit() @classmethod def get_cohorts_of_user_id(cls, user_id): query = text(f""" SELECT id, name, filter_criteria, alert_count, student_count FROM cohort_filters c LEFT JOIN cohort_filter_owners o ON o.cohort_filter_id = c.id WHERE o.user_id = :user_id ORDER BY c.name """) results = db.session.execute(query, {'user_id': user_id}) def transform(row): return { 'id': row['id'], 'name': row['name'], 'criteria': row['filter_criteria'], 'alertCount': row['alert_count'], 'totalStudentCount': row['student_count'], } return [transform(row) for row in results] @classmethod def get_cohorts_owned_by_uids(cls, uids): query = text(f""" SELECT c.id, c.name, c.filter_criteria, c.alert_count, c.student_count, ARRAY_AGG(uid) authorized_users FROM cohort_filters c INNER JOIN cohort_filter_owners o ON c.id = o.cohort_filter_id INNER JOIN authorized_users u ON o.user_id = u.id WHERE u.uid = ANY(:uids) GROUP BY c.id, c.name, c.filter_criteria, c.alert_count, c.student_count """) results = db.session.execute(query, {'uids': uids}) def transform(row): return { 'id': row['id'], 'name': row['name'], 'criteria': row['filter_criteria'], 'owners': row['authorized_users'], 'alertCount': row['alert_count'], 'totalStudentCount': row['student_count'], } return [transform(row) for row in results] @classmethod def is_cohort_owned_by(cls, cohort_id, user_id): query = text(f""" SELECT count(*) FROM cohort_filters c LEFT JOIN cohort_filter_owners o ON o.cohort_filter_id = c.id WHERE o.user_id = :user_id AND c.id = :cohort_id """) results = db.session.execute( query, { 'cohort_id': cohort_id, 'user_id': user_id, }, ) return results.first()['count'] @classmethod def refresh_alert_counts_for_owner(cls, owner_id): query = text(f""" UPDATE cohort_filters SET alert_count = updated_cohort_counts.alert_count FROM ( SELECT cohort_filters.id AS cohort_filter_id, count(*) AS alert_count FROM alerts JOIN cohort_filters ON alerts.sid = ANY(cohort_filters.sids) AND alerts.key LIKE :key AND alerts.active IS TRUE JOIN cohort_filter_owners ON cohort_filters.id = cohort_filter_owners.cohort_filter_id AND cohort_filter_owners.user_id = :owner_id LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :owner_id WHERE alert_views.dismissed_at IS NULL GROUP BY cohort_filters.id ) updated_cohort_counts WHERE cohort_filters.id = updated_cohort_counts.cohort_filter_id """) result = db.session.execute(query, { 'owner_id': owner_id, 'key': current_term_id() + '_%' }) std_commit() return result @classmethod def find_by_id(cls, cohort_id, **kwargs): cohort = cls.query.filter_by(id=cohort_id).first() return cohort and cohort.to_api_json(**kwargs) @classmethod def delete(cls, cohort_id): cohort_filter = cls.query.filter_by(id=cohort_id).first() db.session.delete(cohort_filter) std_commit() def to_api_json( self, order_by=None, offset=0, limit=50, alert_offset=None, alert_limit=None, include_sids=False, include_students=True, include_profiles=False, include_alerts_for_user_id=None, ): benchmark = get_benchmarker(f'CohortFilter {self.id} to_api_json') benchmark('begin') c = self.filter_criteria c = c if isinstance(c, dict) else json.loads(c) coe_advisor_ldap_uids = util.get(c, 'coeAdvisorLdapUids') if not isinstance(coe_advisor_ldap_uids, list): coe_advisor_ldap_uids = [coe_advisor_ldap_uids ] if coe_advisor_ldap_uids else None cohort_name = self.name cohort_json = { 'id': self.id, 'code': self.id, 'name': cohort_name, 'owners': [], 'alertCount': self.alert_count, } for owner in self.owners: cohort_json['owners'].append({ 'uid': owner.uid, 'deptCodes': [ m.university_dept.dept_code for m in owner.department_memberships ], }) coe_ethnicities = c.get('coeEthnicities') coe_genders = c.get('coeGenders') coe_prep_statuses = c.get('coePrepStatuses') coe_probation = util.to_bool_or_none(c.get('coeProbation')) coe_underrepresented = util.to_bool_or_none( c.get('coeUnderrepresented')) cohort_owner_academic_plans = util.get(c, 'cohortOwnerAcademicPlans') entering_terms = c.get('enteringTerms') ethnicities = c.get('ethnicities') expected_grad_terms = c.get('expectedGradTerms') genders = c.get('genders') gpa_ranges = c.get('gpaRanges') group_codes = c.get('groupCodes') in_intensive_cohort = util.to_bool_or_none(c.get('inIntensiveCohort')) is_inactive_asc = util.to_bool_or_none(c.get('isInactiveAsc')) is_inactive_coe = util.to_bool_or_none(c.get('isInactiveCoe')) last_name_ranges = c.get('lastNameRanges') last_term_gpa_ranges = c.get('lastTermGpaRanges') levels = c.get('levels') majors = c.get('majors') midpoint_deficient_grade = util.to_bool_or_none( c.get('midpointDeficient')) team_groups = athletics.get_team_groups( group_codes) if group_codes else [] transfer = util.to_bool_or_none(c.get('transfer')) underrepresented = util.to_bool_or_none(c.get('underrepresented')) unit_ranges = c.get('unitRanges') cohort_json.update({ 'criteria': { 'coeAdvisorLdapUids': coe_advisor_ldap_uids, 'coeEthnicities': coe_ethnicities, 'coeGenders': coe_genders, 'coePrepStatuses': coe_prep_statuses, 'coeProbation': coe_probation, 'coeUnderrepresented': coe_underrepresented, 'cohortOwnerAcademicPlans': cohort_owner_academic_plans, 'enteringTerms': entering_terms, 'ethnicities': ethnicities, 'expectedGradTerms': expected_grad_terms, 'genders': genders, 'gpaRanges': gpa_ranges, 'groupCodes': group_codes, 'inIntensiveCohort': in_intensive_cohort, 'isInactiveAsc': is_inactive_asc, 'isInactiveCoe': is_inactive_coe, 'lastNameRanges': last_name_ranges, 'lastTermGpaRanges': last_term_gpa_ranges, 'levels': levels, 'majors': majors, 'midpointDeficient': midpoint_deficient_grade, 'transfer': transfer, 'unitRanges': unit_ranges, 'underrepresented': underrepresented, }, 'teamGroups': team_groups, }) if not include_students and not include_alerts_for_user_id and self.student_count is not None: # No need for a students query; return the database-stashed student count. cohort_json.update({ 'totalStudentCount': self.student_count, }) benchmark('end') return cohort_json benchmark('begin students query') sids_only = not include_students # Translate the "My Students" filter, if present, into queryable criteria. Although our database relationships allow # for multiple cohort owners, we assume a single owner here since the "My Students" filter makes no sense # in any other scenario. if cohort_owner_academic_plans: if self.owners: owner_sid = get_csid_for_uid(app, self.owners[0].uid) else: owner_sid = current_user.get_csid() advisor_plan_mappings = [{ 'advisor_sid': owner_sid, 'academic_plan_code': plan } for plan in cohort_owner_academic_plans] else: advisor_plan_mappings = None results = query_students( advisor_plan_mappings=advisor_plan_mappings, coe_advisor_ldap_uids=coe_advisor_ldap_uids, coe_ethnicities=coe_ethnicities, coe_genders=coe_genders, coe_prep_statuses=coe_prep_statuses, coe_probation=coe_probation, coe_underrepresented=coe_underrepresented, entering_terms=entering_terms, ethnicities=ethnicities, expected_grad_terms=expected_grad_terms, genders=genders, gpa_ranges=gpa_ranges, group_codes=group_codes, in_intensive_cohort=in_intensive_cohort, include_profiles=(include_students and include_profiles), is_active_asc=None if is_inactive_asc is None else not is_inactive_asc, is_active_coe=None if is_inactive_coe is None else not is_inactive_coe, last_name_ranges=last_name_ranges, last_term_gpa_ranges=last_term_gpa_ranges, levels=levels, limit=limit, majors=majors, midpoint_deficient_grade=midpoint_deficient_grade, offset=offset, order_by=order_by, sids_only=sids_only, transfer=transfer, underrepresented=underrepresented, unit_ranges=unit_ranges, ) benchmark('end students query') if results: # Cohort might have tens of thousands of SIDs. if include_sids: cohort_json['sids'] = results['sids'] cohort_json.update({ 'totalStudentCount': results['totalStudentCount'], }) # If the cohort is new or cache refresh is underway then store student_count and sids in the db. if self.student_count is None: self.update_sids_and_student_count( results['sids'], results['totalStudentCount']) if include_students: cohort_json.update({ 'students': results['students'], }) if include_alerts_for_user_id: benchmark('begin alerts query') alert_count_per_sid = Alert.include_alert_counts_for_students( viewer_user_id=include_alerts_for_user_id, group=results, offset=alert_offset, limit=alert_limit, ) benchmark('end alerts query') cohort_json.update({ 'alerts': alert_count_per_sid, }) if self.alert_count is None: alert_count = sum(student['alertCount'] for student in alert_count_per_sid) self.update_alert_count(alert_count) cohort_json.update({ 'alertCount': alert_count, }) benchmark('end') return cohort_json