def create_mapper(barcoded_location_tbl, rack_barcoded_location_tbl): "Mapper factory." bl = barcoded_location_tbl rbl = rack_barcoded_location_tbl.alias() m = mapper(BarcodedLocation, barcoded_location_tbl, id_attribute='barcoded_location_id', slug_expression=lambda cls: as_slug_expression(cls.name), properties=dict( label=column_property( barcoded_location_tbl.c.label, comparator_factory=CaseInsensitiveComparator ), device=relationship(Device, uselist=False, back_populates='locations'), location_rack=relationship(BarcodedLocationRack, uselist=False, back_populates='location', # lazy='joined', cascade='all,delete,delete-orphan', single_parent=True ), empty= column_property(~exists(# the symbol "~" negates the clause select([rbl.c.barcoded_location_id], bl.c.barcoded_location_id == rbl.c.barcoded_location_id )), deferred=True ), ) ) return m
def create_mapper(tag_tbl, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl, tagging_tbl): "Mapper factory." m = mapper(Tag, tag_tbl, id_attribute='tag_id', slug_expression=lambda cls: as_slug_expression( func.concatenate(cls.domain, ':', cls.predicate, '=', cls.value)), extension=TagMapperExtension(tag_domain_tbl, tag_predicate_tbl, tag_value_tbl), properties= dict(tagged=relationship(Tagged, secondary=tagging_tbl, back_populates='tags'), domain=column_property( select([tag_domain_tbl.c.domain]) \ .where(tag_tbl.c.tag_domain_id == tag_domain_tbl.c.tag_domain_id) ), predicate=column_property( select([tag_predicate_tbl.c.predicate]) \ .where(tag_tbl.c.tag_predicate_id == tag_predicate_tbl.c.tag_predicate_id) ), value=column_property( select([tag_value_tbl.c.value]) \ .where(tag_tbl.c.tag_value_id == tag_value_tbl.c.tag_value_id) ), ) ) return m
def _mappers_configured(): event_alias = db.aliased(Event) # Event.category_chain -- the category ids of the event, starting # with the root category down to the event's immediate parent. cte = Category.get_tree_cte() query = select([cte.c.path]).where(cte.c.id == Event.category_id).correlate_except(cte) Event.category_chain = column_property(query, deferred=True) # Event.effective_protection_mode -- the effective protection mode # (public/protected) of the event, even if it's inheriting it from its # parent category query = (select([db.case({ProtectionMode.inheriting.value: Category.effective_protection_mode}, else_=Event.protection_mode, value=Event.protection_mode)]) .where(Category.id == Event.category_id)) Event.effective_protection_mode = column_property(query, deferred=True) # Event.series_pos -- the position of the event in its series subquery = (select([event_alias.id, db.func.row_number().over(order_by=(event_alias.start_dt, event_alias.id)).label('pos')]) .where((event_alias.series_id == Event.series_id) & ~event_alias.is_deleted) .correlate(Event) .alias()) query = select([subquery.c.pos]).where(subquery.c.id == Event.id).correlate_except(subquery) Event.series_pos = column_property(query, group='series', deferred=True) # Event.series_count -- the number of events in the event's series query = (db.select([db.func.count(event_alias.id)]) .where((event_alias.series_id == Event.series_id) & ~event_alias.is_deleted) .correlate_except(event_alias)) Event.series_count = column_property(query, group='series', deferred=True)
def test_cascading_extensions(self): t1 = self.tables.t1 ext_msg = [] class Ex1(sa.orm.AttributeExtension): def set(self, state, value, oldvalue, initiator): ext_msg.append("Ex1 %r" % value) return "ex1" + value class Ex2(sa.orm.AttributeExtension): def set(self, state, value, oldvalue, initiator): ext_msg.append("Ex2 %r" % value) return "ex2" + value class A(fixtures.BasicEntity): pass class B(A): pass class C(B): pass mapper( A, t1, polymorphic_on=t1.c.type, polymorphic_identity="a", properties={"data": column_property(t1.c.data, extension=Ex1())}, ) mapper(B, polymorphic_identity="b", inherits=A) mc = mapper( C, polymorphic_identity="c", inherits=B, properties={"data": column_property(t1.c.data, extension=Ex2())} ) a1 = A(data="a1") b1 = B(data="b1") c1 = C(data="c1") eq_(a1.data, "ex1a1") eq_(b1.data, "ex1b1") eq_(c1.data, "ex2c1") a1.data = "a2" b1.data = "b2" c1.data = "c2" eq_(a1.data, "ex1a2") eq_(b1.data, "ex1b2") eq_(c1.data, "ex2c2") eq_(ext_msg, ["Ex1 'a1'", "Ex1 'b1'", "Ex2 'c1'", "Ex1 'a2'", "Ex1 'b2'", "Ex2 'c2'"])
def create_mapper(refseq_gene_tbl, molecule_design_gene_tbl, molecule_design_set_gene_tbl, molecule_design_tbl, molecule_design_pool_tbl): "Mapper factory." rsg = refseq_gene_tbl mdg = molecule_design_gene_tbl mdsg = molecule_design_set_gene_tbl md = molecule_design_tbl mdp = molecule_design_pool_tbl m = mapper(Gene, rsg, id_attribute='gene_id', slug_expression=lambda cls: as_slug_expression(cls.accession), properties=dict( accession=column_property( rsg.c.accession, comparator_factory=CaseInsensitiveComparator ), locus_name=column_property( rsg.c.locus_name, comparator_factory=CaseInsensitiveComparator ), molecule_designs= relationship( MoleculeDesign, viewonly=True, secondary=mdg, primaryjoin=(mdg.c.gene_id == rsg.c.gene_id), secondaryjoin=(md.c.molecule_design_id == mdg.c.molecule_design_id), foreign_keys=(mdg.c.molecule_design_id, mdg.c.gene_id), back_populates='genes', ), molecule_design_pools= relationship( MoleculeDesignPool, viewonly=True, secondary=mdsg, primaryjoin=(mdsg.c.gene_id == rsg.c.gene_id), secondaryjoin=(mdsg.c.molecule_design_set_id == mdp.c.molecule_design_set_id), foreign_keys=(mdsg.c.molecule_design_set_id, mdsg.c.gene_id), ), species=relationship(Species, uselist=False, back_populates='genes', # lazy='joined' ), ), ) Gene.name = synonym('locus_name') return m
def test_attrs_props_prop_added_after_configure(self): class AnonClass(object): pass from sqlalchemy.orm import mapper, column_property from sqlalchemy.ext.hybrid import hybrid_property m = mapper(AnonClass, self.tables.users) eq_( set(inspect(AnonClass).attrs.keys()), set(['id', 'name'])) eq_( set(inspect(AnonClass).all_orm_descriptors.keys()), set(['id', 'name'])) m.add_property('q', column_property(self.tables.users.c.name)) def desc(self): return self.name AnonClass.foob = hybrid_property(desc) eq_( set(inspect(AnonClass).attrs.keys()), set(['id', 'name', 'q'])) eq_( set(inspect(AnonClass).all_orm_descriptors.keys()), set(['id', 'name', 'q', 'foob']))
def iri_class_decorator(klass): iri_hpropname = '_'+iri_propname setattr(klass, iri_hpropname, column_property(id_to_iri(getattr(klass, iri_id_colname)))) def iri_accessor(self): return getattr(self, iri_hpropname) def iri_expression(klass): return id_to_iri(getattr(klass, iri_id_colname)) def iri_setter(self, val): setattr(self, iri_hpropname, val) setattr(self, iri_id_colname, iri_to_id(val)) def iri_deleter(self): setattr(self, iri_id_colname, None) col = getattr(klass, iri_id_colname) if not col.property.columns[0].nullable: iri_deleter = None prop = hybrid_property( iri_accessor, iri_setter, iri_deleter, iri_expression) setattr(klass, iri_propname, prop) return klass
def GeometryColumn(*args, **kw): """Define a declarative column property with GIS behavior. This just produces orm.column_property() with the appropriate extension and comparator_factory arguments. The given arguments are passed through to Column. The declarative module extracts the Column for inclusion in the mapped table. This method can also be used for non-declarative mappings to set the properties for a geometry column when defining the mapping. """ if kw.has_key("comparator"): comparator = kw.pop("comparator") else: comparator = SpatialComparator if isinstance(args[0], GeometryExtensionColumn): # if used for non-declarative, use the column of the table definition column = args[0] args = args[1:] else: # if used for declarative, create a new column column = GeometryExtensionColumn(*args, **kw) return column_property( column, extension=SpatialAttribute(), comparator_factory=comparator )
def create_mapper(rack_tbl, rack_barcoded_location_tbl): "Mapper factory." rbl = rack_barcoded_location_tbl m = mapper(Rack, rack_tbl, id_attribute='rack_id', slug_expression=lambda cls: as_slug_expression(cls.barcode), properties=dict( label=column_property( rack_tbl.c.label, comparator_factory=CaseInsensitiveComparator ), specs=relationship(RackSpecs, innerjoin=True, uselist=False), status=relationship(ItemStatus, innerjoin=True, uselist=False), location_rack=relationship(BarcodedLocationRack, uselist=False, back_populates='rack', cascade='all,delete,delete-orphan'), _location=relationship(BarcodedLocation, viewonly=True, uselist=False, secondary=rbl, foreign_keys=(rbl.c.rack_id, rbl.c.barcoded_location_id), ), ), polymorphic_on=rack_tbl.c.rack_type, polymorphic_identity=RACK_TYPES.RACK, ) return m
def address_count(cls): counter(cls.id) return column_property( select([func.count(Address.id)]). where(Address.user_id == cls.id). as_scalar() )
def create_mapper(rack_specs_tbl): "Mapper factory." rs = rack_specs_tbl polymorphic_select = select([ rs, (case([(rs.c.has_movable_subitems, literal(RACK_SPECS_TYPES.TUBE_RACK_SPECS))], else_=literal(RACK_SPECS_TYPES.PLATE_SPECS))).label( 'rackspecs_type') ], ).alias('rackspecs') m = mapper(RackSpecs, polymorphic_select, id_attribute='rack_specs_id', slug_expression=lambda cls: as_slug_expression(cls.name), properties=dict( manufacturer=relationship(Organization), shape=relationship(RackShape, uselist=False, back_populates='specs'), rack_specs_type= column_property(polymorphic_select.c.rackspecs_type), ), polymorphic_on=polymorphic_select.c.rackspecs_type, polymorphic_identity=RACK_SPECS_TYPES.RACK_SPECS, ) RackSpecs.has_tubes = synonym('has_movable_subitems') return m
def __declare_last__(cls): alias = cls.__table__.alias() cls.calc_is_current_version = column_property( select([func.max(alias.c.version_id) == cls.version_id]).where( alias.c.id == cls.id ) )
def create_mapper(experiment_metadata_tbl, experiment_metadata_iso_request_tbl): "Mapper factory." m = mapper( ExperimentMetadata, experiment_metadata_tbl, id_attribute='experiment_metadata_id', slug_expression=lambda cls: as_slug_expression(cls.label), properties= dict(label=column_property( experiment_metadata_tbl.c.label, comparator_factory=CaseInsensitiveComparator ), experiment_design=relationship(ExperimentDesign, uselist=False, cascade='all,delete,delete-orphan', back_populates='experiment_metadata'), subproject=relationship(Subproject, uselist=False), lab_iso_request=relationship(LabIsoRequest, uselist=False, secondary=experiment_metadata_iso_request_tbl, back_populates='experiment_metadata', cascade='all,delete,delete-orphan', single_parent=True), experiment_metadata_type=relationship(ExperimentMetadataType, uselist=False), ), ) return m
def create_mapper(iso_request_tbl, worklist_series_iso_request_tbl, iso_request_pool_set_tbl): "Mapper factory." wsir = worklist_series_iso_request_tbl m = mapper(IsoRequest, iso_request_tbl, id_attribute='iso_request_id', properties= dict(owner=column_property(iso_request_tbl.c.owner, comparator_factory=CaseInsensitiveComparator ), isos=relationship(Iso, back_populates='iso_request'), worklist_series=relationship(WorklistSeries, uselist=False, secondary=wsir, cascade='all,delete,delete-orphan', single_parent=True), molecule_design_pool_set=relationship( MoleculeDesignPoolSet, secondary=iso_request_pool_set_tbl, uselist=False, single_parent=True, cascade='all,delete,delete-orphan') ), polymorphic_on=iso_request_tbl.c.iso_type, polymorphic_identity=ISO_TYPES.BASE, ) return m
def sessionmaker(dbconfig): dbconfig = dbconfig.copy() conn_str = dbconfig.pop('url') if 'schema' in dbconfig: schema = dbconfig.pop('schema') else: schema = None engine = create_engine(conn_str, **dbconfig) mappers, tables, Session = reflect(engine, models, schema) # add mapper relationships mappers['Data'].add_properties({ 'versions': relationship(models.Version, lazy='dynamic', backref=backref('ref', lazy='joined')) }) mappers['Version'].add_properties({ 'data': deferred(tables['version'].c['data']), 'size': column_property(func.length(tables['version'].c['data'])) }) Session.class_.mappers = mappers return Session
def mapper_kw (self) : return {'inherits' : self.classes['element'], 'polymorphic_identity' : self.name, 'properties' : {'_id' : column_property(self.table.c.id, self.tables['element'].c.id), '_charges' : self.table.c.charges, '_errors' : self.table.c.errors, 'io' : relationship(self.classes['neural_network_io'], foreign_keys=self.tables['neural_network_io'].c.layer_id)}}
def mapper_kw (self) : return {'inherits' : self.classes['element'], 'polymorphic_identity' : self.name, 'properties' : {'_id' : column_property(self.table.c.id, self.tables['element'].c.id), '_layer_id' : self.table.c.layer_id, '_model_id' : self.table.c.model_id, '_model' : relationship(self.classes['seq']), '_json' : self.table.c.json}}
def GISColumn(*args, **kw): """Define a declarative column property with GIS behavior. This just produces orm.column_property() with the appropriate extension and comparator_factory arguments. The given arguments are passed through to Column. The declarative module extracts the Column for inclusion in the mapped table. """ return column_property( Column(*args, **kw), extension=GisAttribute(), comparator_factory=GisComparator )
def create_mapper(organization_tbl): "Mapper factory." m = mapper(Organization, organization_tbl, id_attribute='organization_id', slug_expression=lambda cls: as_slug_expression(cls.name), properties=dict( name=column_property( organization_tbl.c.name, comparator_factory=CaseInsensitiveComparator ), ), ) return m
def __declare_last__(cls): super(cls, cls).__declare_last__() s_alias = cls.__table__.alias() cls.normalized_score = column_property( select([ (cls.score - func.min(s_alias.c.score)) / (func.max(s_alias.c.score) - func.min(s_alias.c.score)) * 100 ]). where(and_( s_alias.c.criterion_id == cls.criterion_id, s_alias.c.assignment_id == cls.assignment_id, )) )
def GeometryColumn(*args, **kw): """Define a declarative column property with GIS behavior. This just produces orm.column_property() with the appropriate extension and comparator_factory arguments. The given arguments are passed through to Column. The declarative module extracts the Column for inclusion in the mapped table. """ sfs = False if kw.has_key("sfs"): sfs = kw.pop("sfs") if sfs: return column_property( Column(*args, **kw), extension=SpatialAttribute(), comparator_factory=SFSComparator ) return column_property( Column(*args, **kw), extension=SpatialAttribute(), comparator_factory=SQLMMComparator )
def includeme(config): tables = config.registry['metadata'].tables orm.mapper(Account, tables['account'], properties={ 'count_content': orm.column_property( sql.select( [sql.func.count()], tables['account'].c.id == tables['content'].c.owner_id ).label('count_content'), deferred=True ) })
def __declare_last__(cls): super(cls, cls).__declare_last__() s_alias = cls.__table__.alias() cls.normalized_score = column_property( select([ (cls.score - func.min(s_alias.c.score)) / (func.max(s_alias.c.score) - func.min(s_alias.c.score)) * 100 ]). select_from(join(Answer, s_alias, s_alias.c.answer_id == Answer.id)). where(and_( Answer.active == True, s_alias.c.assignment_id == cls.assignment_id, )) )
def __declare_last__(cls): super(cls, cls).__declare_last__() cls.comment_count = column_property( select([func.count(AnswerComment.id)]). where(and_( AnswerComment.answer_id == cls.id, AnswerComment.active == True, AnswerComment.draft == False )), deferred=True, group='counts' ) cls.public_comment_count = column_property( select([func.count(AnswerComment.id)]). where(and_( AnswerComment.answer_id == cls.id, AnswerComment.active == True, AnswerComment.draft == False, AnswerComment.comment_type == AnswerCommentType.public )), deferred=True, group='counts' ) cls.self_evaluation_count = column_property( select([func.count(AnswerComment.id)]). where(and_( AnswerComment.comment_type == AnswerCommentType.self_evaluation, AnswerComment.active == True, AnswerComment.draft == False, AnswerComment.answer_id == cls.id )), deferred=True, group='counts' )
def joined(mapped, other): ''' Creates a joined mapped for the provided mapped class with other class. The joined mapped class will be cached on the other class. @param mapped: class The mapped class to create the joined mapped class with. @param other: class The other class to create the joined mapping with. @return: class The joined mapped class. ''' assert isclass(mapped), 'Invalid mapped class %s' % mapped assert isclass(other), 'Invalid other class %s' % other name = '%s%s' % (mapped.__name__, other.__name__) try: return getattr(mapped, name) except AttributeError: pass properties = {} mapping, omapping = mappingFor(mapped), mappingFor(other) for cp in mapping.iterate_properties: if not isinstance(cp, ColumnProperty) or not cp.key: continue assert isinstance(cp, ColumnProperty) properties['%s_%s' % (mapped.__name__, cp.key)] = column_property(getattr(mapping.c, cp.key)) for cp in omapping.iterate_properties: if not isinstance(cp, ColumnProperty) or not cp.key: continue assert isinstance(cp, ColumnProperty) properties['%s_%s' % (other.__name__, cp.key)] = column_property(getattr(omapping.c, cp.key)) clazz = type(name, (object,), {}) mapper(clazz, join(tableFor(mapped), tableFor(other)), properties=properties) setattr(mapped, name, clazz) return clazz
def get_inherited_denormalized_columns(self, table): parent = find_closest_versioned_parent(self.manager, self.model) mapper = sa.inspect(self.model) args = {} if parent and not (mapper.single or mapper.concrete): columns = [ self.manager.option(self.model, "operation_type_column_name"), self.manager.option(self.model, "transaction_column_name"), ] if self.manager.option(self.model, "strategy") == "validity": columns.append(self.manager.option(self.model, "end_transaction_column_name")) for column in columns: args[column] = column_property(table.c[column], parent.__table__.c[column]) return args
def String(maxlen: int=None, minlen: int=None, args: tuple=None, as_property: bool=True, info: dict=None, **kwargs): """ SQLAlchemy Column representing a string :param minlen: minimum string len :param maxlen: maximum string len :param as_property: Returns :class:`sqlalchemy.ColumnProperty` otherwise :class:`sqlalchemy.Column` :param args: positional arguments passed to the sqlalchemy.Column constructor :param info: Passed into info dictionary of the column for usage with form maker or similiar libaries :param kwargs: * min: passed to formencode.validators.String (and overrides minlen) * max: passed to formencode.validators.String (and overrides maxlen) * length: passed to sqlalchemy.String (and overrides maxlen) * collation: passed to sqlalchemy.Unicode * convert_unicode: passed to sqlalchemy.Unicode * unicode_error: passed to sqlalchemy.Unicode * else: passed to sqlalchemy.Column :returns: >>> column_property(Column(Unicode(maxlen), nullable=False, server_default=''), info={type='string', validator=String(min=minlen, max=maxlen)}) :rtype: :class:`str` """ fvargs = dict(min=minlen, max=maxlen) fvargs.update({k: v for k, v in kwargs.items() if k in fvargs}) sqargs = dict(length=maxlen, collation=None, convert_unicode=True, unicode_error=None) sqargs.update({k: v for k, v in kwargs.items() if k in sqargs}) dwargs = dict(nullable=False, server_default='') dwargs.update({k: v for k, v in kwargs.items() if k not in fvargs and k not in sqargs}) args = isinstance(args, tuple) and args or [args] column = Column(sqUnicode(**sqargs), *args, **dwargs) column.validator = fvString(**fvargs) fmargs = {'type': 'Text', 'dataType': 'text'} fmargs.update(info and info or {}) if as_property: return column_property(column, info=fmargs) else: column.info.update(fmargs) return column, fmargs
def create_mapper(sample_tbl, sample_molecule_tbl, molecule_tbl, molecule_design_pool_tbl): "Mapper factory." s = sample_tbl sm = sample_molecule_tbl m = molecule_tbl mdp = molecule_design_pool_tbl s1 = sample_tbl.alias() # FIXME: The following construct introduces a dependency on string_agg # in the SQL engine. Consider a materialized view instead. mds_sel = select( [mdp.c.molecule_design_set_id], mdp.c.member_hash == select([func.md5( string_agg(cast(m.c.molecule_design_id, String), literal(';'), order_by=m.c.molecule_design_id)) ], from_obj=[s1.join(sm, and_(sm.c.sample_id == s1.c.sample_id, s1.c.sample_id == s.c.sample_id)) .join(m, m.c.molecule_id == sm.c.molecule_id) ]) \ .group_by(sm.c.sample_id)) m = mapper(Sample, sample_tbl, id_attribute='sample_id', properties=dict( container=relationship(Container, uselist=False, back_populates='sample'), sample_molecules= relationship(SampleMolecule, back_populates='sample', cascade='all,delete,delete-orphan', ), molecule_design_pool_id= column_property(coalesce(mds_sel.as_scalar(), null()), deferred=True), ), polymorphic_on=sample_tbl.c.sample_type, polymorphic_identity=SAMPLE_TYPES.BASIC ) # Listen to changes to the sample_type attribute. event.listen(Sample.sample_type, "set", check_set_sample_type) # pylint: disable=E1101 return m
def Boolean(default: bool=False, nullable=False, args: tuple=None, as_property: bool=True, info: dict=None, **kwargs): """ SQLAlchemy Column representing a boolean :param default: default of this column :param nullable: accept None -> NULL values :param as_property: Returns :class:`sqlalchemy.ColumnProperty` otherwise :class:`sqlalchemy.Column` :param args: positional arguments passed to the sqlalchemy.Column constructor :param info: Passed into info dictionary of the column for usage with form maker or similiar libaries :param kwargs: * true_values: list of true string values, passed to formencode validator * false_values: list of false string values, passed to formencode validator * create_constraint: create a CHECK constraint if required, passed to sqlalchemy.Boolean * else: passed to sqlalchemy.Column :returns: >>> column_property(Column(Boolean, nullable=False, default=default, server_default=default), info={type='boolean', validator=Boolean()}) :rtype: :class:`bool` """ fvargs = dict(true_values=fvBoolean.true_values, false_values=fvBoolean.false_values) fvargs.update({k: v for k, v in kwargs.items() if k in fvargs}) sqargs = dict(create_constraint=True) sqargs.update({k: v for k, v in kwargs.items() if k in sqargs}) dwargs = dict(default=default, nullable=nullable) if default is not None: dwargs['server_default'] = "%s" % int(default) dwargs.update({k: v for k, v in kwargs.items() if k not in fvargs and k not in sqargs}) args = isinstance(args, tuple) and args or [args] column = Column(sqBoolean(**sqargs), *args, **dwargs) column.validator = fvBoolean(**fvargs) fmargs = {'type': 'CheckBox'} fmargs.update(info and info or {}) if as_property: return column_property(column, info=fmargs) else: column.info.update(fmargs) return column, fmargs
def create_mapper(dbuser_tbl): "Mapper factory." m = mapper(User, dbuser_tbl, id_attribute='db_user_id', slug_expression= lambda cls: as_slug_expression(cls.directory_user_id), properties=dict( username=column_property( dbuser_tbl.c.username, comparator_factory=CaseInsensitiveComparator ), user_preferenceses=relationship(UserPreferences, back_populates='user', cascade='all, delete-orphan' ), ), ) return m
# Expression for hybrid ------------------------------------ @classmethod @IsLive.expression def _IsLive(cls): return case([((cls.LiveOn != None) & (cls.ClosedOn == None), True)], else_=False) validateManaged(BlogMapped.CreatedOn) # -------------------------------------------------------------------- from livedesk.meta.blog_post import BlogPostMapped BlogMapped.UpdatedOn = column_property( select([func.max(BlogPostMapped.UpdatedOn) ]).where(BlogPostMapped.Blog == BlogMapped.Id)) # -------------------------------------------------------------------- class BlogSourceDB(Base): ''' Provides the mapping for BlogSource. ''' __tablename__ = 'livedesk_blog_source' __table_args__ = (UniqueConstraint('fk_blog', 'fk_source', name='uix_blog_source'), dict(mysql_engine='InnoDB', mysql_charset='utf8'))
def test_should_columproperty_convert(): field = get_field_from_column( column_property( select([func.sum(func.cast(id, types.Integer))]).where(id == 1))) assert field.type == graphene.Int
def __declare_last__(cls): alias = cls.__table__.alias() cls.calc_is_current_version = column_property( select(func.max(alias.c.version_id) == cls.version_id).where( alias.c.id == cls.id))
polymorphic_identity=polymorphic_identity(domain.GroupMembership)) # !+HEAD_DOCUMENT_ITEM(mr, sep-2011) standardize name, "head", "document", "item" domain.GroupMembership.head = domain.GroupMembership.user mapper(domain.GroupMembershipRole, schema.group_membership_role, properties={"member": relation(domain.GroupMembership)}) # !+RENAME ParliamentMember mapper(domain.MemberOfParliament, schema.parliament_membership, inherits=domain.GroupMembership, primary_key=[schema.user_group_membership.c.membership_id], properties={ "start_date": column_property( schema.user_group_membership.c.start_date.label("start_date")), "end_date": column_property( schema.user_group_membership.c.end_date.label("end_date")), }, polymorphic_on=schema.user_group_membership.c.membership_type, polymorphic_identity=polymorphic_identity(domain.MemberOfParliament)) mapper(domain.Minister, inherits=domain.GroupMembership, polymorphic_on=schema.user_group_membership.c.membership_type, polymorphic_identity=polymorphic_identity(domain.Minister)) mapper(domain.CommitteeMember, inherits=domain.GroupMembership, polymorphic_on=schema.user_group_membership.c.membership_type,
class CustomRelease(Release): aliases = relationship("ReleaseAlias") asin = relationship("ReleaseMeta") medium_count = column_property( select([func.count(Medium.id)]).where(Medium.release_id == Release.id))
class Invoice(Base): __tablename__ = "payment_invoice" id = Column(Integer(), primary_key=True) payment_account_id = Column(Integer(), ForeignKey('payment_account.id'), nullable=False) _invoice_status = Enum("unbilled", "processing", "billed", "disregarded", "waiting_payment_processing", "paid", name="invoice_status") status = Column(_invoice_status, nullable=False, default="unbilled") timestamp = Column(DateTime(), nullable=False, default=now) due_date = Column(Date(), nullable=False) copied_from_id = Column(Integer(), ForeignKey('payment_invoice.id'), nullable=True) extra_fields = Column(JSONType(mutable=True), nullable=False, default={}) currency = Column(Unicode(), default="GBP", nullable=False) taxable = Column(Boolean(), nullable=False, default=True) _tax_rate_code = Enum("GB", "EU", "US", name="tax_rate_code") tax_rate_code = Column(_tax_rate_code, nullable=True, default="GB") tax_rate = Column(Numeric(precision=10, scale=4), nullable=False, default=0) lines = relationship("InvoiceLine", backref=backref('invoice') ) __to_dict__ = copy.deepcopy(Base.__to_dict__) __to_dict__.update({ 'default': { 'id' : None , 'payment_account_id': None , 'status' : None , 'timestamp' : None , 'due_date' : None , 'copied_from_id' : None , 'total_pre_tax' : None , 'total_tax' : None , 'total' : None , 'paid_total' : None , 'total_due' : None , 'currency' : None , }, }) __to_dict__.update({ 'full': copy.deepcopy(__to_dict__['default']) }) __to_dict__['full'].update({ 'payment_account' : lambda invoice: invoice.payment_account.to_dict() , 'lines' : lambda invoice: [line.to_dict() for line in invoice.lines] , 'transactions' : lambda invoice: [trans.to_dict() for trans in invoice.transactions] , 'processing' : None , #'config' : None , 'payment_options' : None, }) def __init__(self, payment_account=None): if payment_account: self.payment_account_id = payment_account.id self.currency = payment_account.currency self.taxable = payment_account.taxable self.tax_rate_code = payment_account.tax_rate_code if payment_account.taxable: self.tax_rate = tax_rates.get(self.tax_rate_code) self.config.update({'payment_account_config': payment_account.to_dict('invoice')}) _config = None total_pre_tax = column_property( select( [func.coalesce(func.sum(InvoiceLine.price_final),0)] ).where(InvoiceLine.invoice_id==id) ) total_tax = column_property( select( [func.round(total_pre_tax.columns[0] * tax_rate,2)] ) ) total = column_property( select( [func.round(total_pre_tax.columns[0] + total_tax.columns[0],2)] ) ) paid_total = column_property( select( [func.coalesce(func.sum(BillingTransaction.amount),0)] ).where(and_(BillingTransaction.invoice_id==id, BillingTransaction.status=="complete")) ) total_due = column_property( select( [ total.columns[0] - paid_total.columns[0] ] ) ) @property def processing(self): return len([txn for txn in self.transactions if txn.status == 'pending']) != 0 @property def config(self): if not self.extra_fields: self.extra_fields = {} if not self._config: self._config = _ConfigManager(self.extra_fields) return self._config @property def payment_options(self): if self.status != 'billed' or self.total_due == 0: return {} from civicboom.lib.payment import get_payment_options return get_payment_options(self.payment_account, check_key="invoice_button")
class Release(db.ModelBase): __tablename__ = "releases" @declared_attr def __table_args__(cls): # noqa return ( Index("release_created_idx", cls.created.desc()), Index("release_name_created_idx", cls.name, cls.created.desc()), Index("release_name_idx", cls.name), Index("release_pypi_hidden_idx", cls._pypi_hidden), Index("release_version_idx", cls.version), ) __repr__ = make_repr("name", "version") name = Column( Text, ForeignKey("packages.name", onupdate="CASCADE", ondelete="CASCADE"), primary_key=True, ) version = Column(Text, primary_key=True) canonical_version = Column(Text, nullable=False) is_prerelease = orm.column_property(func.pep440_is_prerelease(version)) author = Column(Text) author_email = Column(Text) maintainer = Column(Text) maintainer_email = Column(Text) home_page = Column(Text) license = Column(Text) summary = Column(Text) description_content_type = Column(Text) keywords = Column(Text) platform = Column(Text) download_url = Column(Text) _pypi_ordering = Column(Integer) _pypi_hidden = Column(Boolean) cheesecake_installability_id = Column( Integer, ForeignKey("cheesecake_main_indices.id") ) cheesecake_documentation_id = Column( Integer, ForeignKey("cheesecake_main_indices.id") ) cheesecake_code_kwalitee_id = Column( Integer, ForeignKey("cheesecake_main_indices.id") ) requires_python = Column(Text) description_from_readme = Column(Boolean) created = Column( DateTime(timezone=False), nullable=False, server_default=sql.func.now() ) # We defer this column because it is a very large column (it can be MB in # size) and we very rarely actually want to access it. Typically we only # need it when rendering the page for a single project, but many of our # queries only need to access a few of the attributes of a Release. Instead # of playing whack-a-mole and using load_only() or defer() on each of # those queries, deferring this here makes the default case more # performant. description = orm.deferred(Column(Text)) _classifiers = orm.relationship( Classifier, backref="project_releases", secondary=lambda: release_classifiers, order_by=Classifier.classifier, passive_deletes=True, ) classifiers = association_proxy("_classifiers", "classifier") files = orm.relationship( "File", backref="release", cascade="all, delete-orphan", lazy="dynamic", order_by=lambda: File.filename, passive_deletes=True, ) dependencies = orm.relationship("Dependency") _requires = _dependency_relation(DependencyKind.requires) requires = association_proxy("_requires", "specifier") _provides = _dependency_relation(DependencyKind.provides) provides = association_proxy("_provides", "specifier") _obsoletes = _dependency_relation(DependencyKind.obsoletes) obsoletes = association_proxy("_obsoletes", "specifier") _requires_dist = _dependency_relation(DependencyKind.requires_dist) requires_dist = association_proxy("_requires_dist", "specifier") _provides_dist = _dependency_relation(DependencyKind.provides_dist) provides_dist = association_proxy("_provides_dist", "specifier") _obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist) obsoletes_dist = association_proxy("_obsoletes_dist", "specifier") _requires_external = _dependency_relation(DependencyKind.requires_external) requires_external = association_proxy("_requires_external", "specifier") _project_urls = _dependency_relation(DependencyKind.project_url) project_urls = association_proxy("_project_urls", "specifier") uploader = orm.relationship( "User", secondary=lambda: JournalEntry.__table__, primaryjoin=lambda: ( (JournalEntry.name == orm.foreign(Release.name)) & (JournalEntry.version == orm.foreign(Release.version)) & (JournalEntry.action == "new release") ), secondaryjoin=lambda: ( (User.username == orm.foreign(JournalEntry._submitted_by)) ), order_by=lambda: JournalEntry.id.desc(), # TODO: We have uselist=False here which raises a warning because # multiple items were returned. This should only be temporary because # we should add a nullable FK to JournalEntry so we don't need to rely # on ordering and implicitly selecting the first object to make this # happen, uselist=False, viewonly=True, ) def __acl__(self): session = orm.object_session(self) acls = [(Allow, "group:admins", "admin")] # Get all of the users for this project. query = session.query(Role).filter(Role.project == self) query = query.options(orm.lazyload("project")) query = query.options(orm.joinedload("user").lazyload("emails")) for role in sorted( query.all(), key=lambda x: ["Owner", "Maintainer"].index(x.role_name) ): if role.role_name == "Owner": acls.append((Allow, str(role.user.id), ["manage", "upload"])) else: acls.append((Allow, str(role.user.id), ["upload"])) return acls @property def urls(self): _urls = OrderedDict() if self.home_page: _urls["Homepage"] = self.home_page for urlspec in self.project_urls: name, url = [x.strip() for x in urlspec.split(",", 1)] _urls[name] = url if self.download_url and "Download" not in _urls: _urls["Download"] = self.download_url return _urls @property def github_repo_info_url(self): for parsed in [urlparse(url) for url in self.urls.values()]: segments = parsed.path.strip("/").rstrip("/").split("/") if ( parsed.netloc == "github.com" or parsed.netloc == "www.github.com" ) and len(segments) >= 2: user_name, repo_name = segments[:2] return f"https://api.github.com/repos/{user_name}/{repo_name}" @property def has_meta(self): return any( [ self.license, self.keywords, self.author, self.author_email, self.maintainer, self.maintainer_email, self.requires_python, ] )
class Item(db.Model): """ Meant to model a specific item, like an instance of a security group. """ __tablename__ = "item" id = Column(Integer, primary_key=True) region = Column(String(32), index=True) name = Column( String(303), index=True ) # Max AWS name = 255 chars. Add 48 chars for ' (sg-12345678901234567 in vpc-12345678901234567)' arn = Column(Text(), nullable=True, index=True, unique=True) latest_revision_complete_hash = Column(String(32), index=True) latest_revision_durable_hash = Column(String(32), index=True) tech_id = Column(Integer, ForeignKey("technology.id"), nullable=False, index=True) account_id = Column(Integer, ForeignKey("account.id"), nullable=False, index=True) latest_revision_id = Column(Integer, nullable=True) comments = relationship("ItemComment", backref="revision", cascade="all, delete, delete-orphan", order_by="ItemComment.date_created") revisions = relationship("ItemRevision", backref="item", cascade="all, delete, delete-orphan", order_by="desc(ItemRevision.date_created)", lazy="dynamic") issues = relationship("ItemAudit", backref="item", cascade="all, delete, delete-orphan") cloudtrail_entries = relationship("CloudTrailEntry", backref="item", cascade="all, delete, delete-orphan", order_by="CloudTrailEntry.event_time") issues = relationship("ItemAudit", backref="item", cascade="all, delete, delete-orphan", foreign_keys="ItemAudit.item_id") exceptions = relationship("ExceptionLogs", backref="item", cascade="all, delete, delete-orphan") @hybrid_property def score(self): return db.session.query(func.cast(func.sum( ItemAudit.score), Integer)).filter( ItemAudit.item_id == self.id, ItemAudit.auditor_setting_id == AuditorSettings.id, AuditorSettings.disabled == False).one()[0] or 0 @score.expression def score(cls): return select([func.sum(ItemAudit.score)]). \ where(ItemAudit.item_id == cls.id). \ where(ItemAudit.auditor_setting_id == AuditorSettings.id). \ where(AuditorSettings.disabled == False). \ label('item_score') @hybrid_property def unjustified_score(self): return db.session.query(func.cast(func.sum( ItemAudit.score), Integer)).filter( ItemAudit.item_id == self.id, ItemAudit.justified == False, ItemAudit.auditor_setting_id == AuditorSettings.id, AuditorSettings.disabled == False).one()[0] or 0 @unjustified_score.expression def unjustified_score(cls): return select([func.sum(ItemAudit.score)]). \ where(ItemAudit.item_id == cls.id). \ where(ItemAudit.justified == False). \ where(ItemAudit.auditor_setting_id == AuditorSettings.id). \ where(AuditorSettings.disabled == False). \ label('item_unjustified_score') issue_count = column_property(select([func.count( ItemAudit.id)]).where(ItemAudit.item_id == id).where( ItemAudit.auditor_setting_id == AuditorSettings.id).where( AuditorSettings.disabled == False), deferred=True)
def _extract_model_attrs(metadata, model, sa_models): tables = metadata.tables name = model._meta.db_table qualname = (metadata.schema + "." + name) if metadata.schema else name table = tables[qualname] fks = [ t for t in model._meta.fields if isinstance(t, (ForeignKey, OneToOneField)) ] attrs = {} rel_fields = fks + list(model._meta.many_to_many) for f in model._meta.fields: if not isinstance(f, (ForeignKey, OneToOneField)): if f.model != model or f.column not in table.c: continue # Fields from parent model are not supported attrs[f.name] = orm.column_property(table.c[f.column]) for fk in rel_fields: if not fk.column in table.c and not isinstance(fk, ManyToManyField): continue if django.VERSION < (1, 8): parent_model = fk.related.parent_model else: parent_model = get_remote_field(fk).model parent_model_meta = parent_model._meta if parent_model_meta.proxy: continue p_table_name = parent_model_meta.db_table p_table_qualname = (metadata.schema + "." + p_table_name if metadata.schema else p_table_name) p_table = tables[p_table_qualname] p_name = parent_model_meta.pk.column if django.VERSION < (1, 9): disable_backref = fk.rel.related_name and fk.rel.related_name.endswith( "+") backref = (fk.rel.related_name.lower().strip("+") if fk.rel.related_name else None) else: disable_backref = (fk.remote_field.related_name and fk.remote_field.related_name.endswith("+")) backref = (fk.remote_field.related_name.lower().strip("+") if fk.remote_field.related_name else None) if not backref and not disable_backref: backref = model._meta.object_name.lower() if not isinstance(fk, OneToOneField): backref = backref + "_set" elif backref and isinstance(fk, OneToOneField): backref = orm.backref(backref, uselist=False) kwargs = {} if isinstance(fk, ManyToManyField): model_pk = model._meta.pk.column sec_table_name = get_remote_field(fk).field.m2m_db_table() sec_table_qualname = (metadata.schema + "." + sec_table_name if metadata.schema else sec_table_name) sec_table = tables[sec_table_qualname] sec_column = fk.m2m_column_name() p_sec_column = fk.m2m_reverse_name() kwargs.update( secondary=sec_table, primaryjoin=(sec_table.c[sec_column] == table.c[model_pk]), secondaryjoin=(sec_table.c[p_sec_column] == p_table.c[p_name]), ) if fk.model() != model: backref = None else: kwargs.update( foreign_keys=[table.c[fk.column]], primaryjoin=(table.c[fk.column] == p_table.c[p_name]), remote_side=p_table.c[p_name], ) if backref and not disable_backref: kwargs.update(backref=backref) attrs[fk.name] = orm.relationship(sa_models[parent_model], **kwargs) return attrs
def __declare_last__(cls): from . import UserCourse, CourseRole, LTIResourceLink, Group super(cls, cls).__declare_last__() cls.answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role != CourseRole.dropped, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))), deferred=True, group="counts") cls.student_answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role == CourseRole.student, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))), deferred=True, group="counts") # Comparable answer count # To be consistent with student_answer_count, we are not counting # answers from sys admin here cls.comparable_answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, Answer.comparable == True, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role != CourseRole.dropped, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))), deferred=True, group="counts") cls.comparison_example_count = column_property(select( [func.count(ComparisonExample.id)]).where( and_(ComparisonExample.assignment_id == cls.id, ComparisonExample.active == True)), deferred=True, group="counts") cls.all_compare_count = column_property(select([ func.count(Comparison.id) ]).where(and_(Comparison.assignment_id == cls.id)), deferred=True, group="counts") cls.compare_count = column_property(select([func.count( Comparison.id)]).where( and_(Comparison.assignment_id == cls.id, Comparison.completed == True)), deferred=True, group="counts") cls.self_evaluation_count = column_property(select( [func.count(AnswerComment.id)]).select_from( join(AnswerComment, Answer, AnswerComment.answer_id == Answer.id)).where( and_( AnswerComment.comment_type == AnswerCommentType.self_evaluation, AnswerComment.active == True, AnswerComment.answer_id == Answer.id, AnswerComment.draft == False, Answer.assignment_id == cls.id)), deferred=True, group="counts") cls.lti_resource_link_count = column_property(select([ func.count(LTIResourceLink.id) ]).where(LTIResourceLink.compair_assignment_id == cls.id), deferred=True, group="counts")
tutorial = relationship(Tutorial, backref='tutorial_preferences') penalty = Column(Integer) class LectureStudent(Base): __tablename__ = 'lecture_students' lecture_id = Column('lecture', Integer, ForeignKey(Lecture.id), primary_key=True) lecture = relationship(Lecture, backref=backref('lecture_students', lazy='dynamic')) student_id = Column('student', Integer, ForeignKey(User.id), primary_key=True) student = relationship(User, backref=backref('lecture_students', lazy='dynamic')) tutorial_id = Column('tutorial', Integer, ForeignKey(Tutorial.id)) tutorial = relationship(Tutorial, backref='lecture_students') Tutorial.student_count = column_property( sa.select([sa.func.count(LectureStudent.student_id)]).\ where(LectureStudent.tutorial_id==Tutorial.id), deferred=True ) class LectureRemovedStudent(Base): __tablename__ = 'lecture_removed_students' lecture_id = Column('lecture', Integer, ForeignKey(Lecture.id), primary_key=True) lecture = relationship(Lecture, backref=backref('lecture_removed_students', lazy='dynamic')) student_id = Column('student', Integer, ForeignKey(User.id), primary_key=True) student = relationship(User, backref=backref('lecture_removed_students', lazy='dynamic')) tutorial_id = Column('tutorial', Integer, ForeignKey(Tutorial.id)) tutorial = relationship(Tutorial, backref='lecture_removed_students') # class LectureTutor(Base): # __tablename__ = 'lecture_tutors'
def _mappers_configured(): # We create some column properties here since even with `declared_attr` # the code runs at import time, making it impossible/risky to import other # modules or reference the object itself in there. # The advantage of those column properties is that they behave like regular # (read-only) columns even though they are generated by subqueries. This # allows them to be loaded together with the rest of the data, avoiding # extra queries. To load them automatically you need to undefer them using # the `undefer` query option, e.g. `.options(undefer('chain_titles'))`. from indico.modules.events import Event # Category.effective_protection_mode -- the effective protection mode # (public/protected) of the category, even if it's inheriting it from its # parent category cte = Category.get_protection_cte() query = select([cte.c.protection_mode ]).where(cte.c.id == Category.id).correlate_except(cte) Category.effective_protection_mode = column_property(query, deferred=True) # Category.effective_icon_data -- the effective icon data of the category, # either set on the category itself or inherited from it cte = Category.get_icon_data_cte() query = (select([ db.func.json_build_object('source_id', cte.c.source_id, 'metadata', cte.c.icon_metadata) ]).where(cte.c.id == Category.id).correlate_except(cte)) Category.effective_icon_data = column_property(query, deferred=True) # Category.event_count -- the number of events in the category itself, # excluding deleted events query = (select([db.func.count(Event.id) ]).where((Event.category_id == Category.id) & ~Event.is_deleted).correlate_except(Event)) Category.event_count = column_property(query, deferred=True) # Category.has_events -- whether the category itself contains any # events, excluding deleted events query = (exists([1]).where((Event.category_id == Category.id) & ~Event.is_deleted).correlate_except(Event)) Category.has_events = column_property(query, deferred=True) # Category.chain_titles -- a list of the titles in the parent chain, # starting with the root category down to the current category. cte = Category.get_tree_cte('title') query = select([cte.c.path ]).where(cte.c.id == Category.id).correlate_except(cte) Category.chain_titles = column_property(query, deferred=True) # Category.chain -- a list of the ids and titles in the parent # chain, starting with the root category down to the current # category. Each chain entry is a dict containing 'id' and `title`. cte = Category.get_tree_cte(lambda cat: db.func.json_build_object( 'id', cat.id, 'title', cat.title)) query = select([cte.c.path ]).where(cte.c.id == Category.id).correlate_except(cte) Category.chain = column_property(query, deferred=True) # Category.deep_events_count -- the number of events in the category # or any child category (excluding deleted events) cte = Category.get_tree_cte() crit = db.and_(cte.c.id == Event.category_id, cte.c.path.contains(array([Category.id])), ~cte.c.is_deleted, ~Event.is_deleted) query = select([db.func.count()]).where(crit).correlate_except(Event) Category.deep_events_count = column_property(query, deferred=True) # Category.deep_children_count -- the number of subcategories in the # category or any child category (excluding deleted ones) cte = Category.get_tree_cte() crit = db.and_(cte.c.path.contains(array([Category.id])), cte.c.id != Category.id, ~cte.c.is_deleted) query = select([db.func.count()]).where(crit).correlate_except(cte) Category.deep_children_count = column_property(query, deferred=True)
class Hsp(DBBASE): r""" This class serializes and stores into the DB the various HSPs. It is directly connected to the Hit table, through the "hit_id" reference key. The Hit reference can be accessed through the hit_object attribute; back-reference (Hit to Hsps) is given by the "hsps" attribute. Keys: :return hit_id: Reference key for the Hit table :rtype hit_id: int :return counter: It indicates the progressive number of the HSP for the hit :rtype counter: int :return query_hsp_start: Start position on the query :rtype query_hsp_start; int :return query_hsp_end: End position on the query :rtype query_hsp_end: int :return target_hsp_start: Start position on the target :rtype target_hsp_start: int :return target_hsp_end: End position on the target :rtype target_hsp_end: int :return hsp_evalue: Evalue of the HSP :rtype hsp_evalue: float :return hsp_bits: Bit-score of the HSP :rtype hsp_bits: float :return hsp_identity: Identity (in %) of the alignment :rtype hsp_identity: float :return hsp_length: Length of the HSP :rtype hsp_length: int :return match: the match line between query and target, with the following specs: - If the position is a match/positive, keep the original value - If the position is a gap *for the query*, insert a - (dash) - If the position is a gap *for the target*, insert a _ (underscore) - If the position is a gap *for both*, insert a \ (backslash) An HSP row has the following constraints: - Counter,hit_id must be unique (and are primary keys) - The combination ("Hit_id","query_hsp_start","query_hsp_end", "target_hsp_start", "target_hsp_end") must be unique Moreover, the following properties are also present: :return query_object: The referenced Query :rtype query_object: Query :return target_object: The reference Target :rtype target_object: Target """ __tablename__ = "hsp" counter = Column(Integer) # Indicates the number of the HSP inside the hit query_id = Column(Integer, ForeignKey(Query.query_id), unique=False) target_id = Column(Integer, ForeignKey(Target.target_id), unique=False) pk_constraint = PrimaryKeyConstraint("counter", "query_id", "target_id", name="hsp_constraint") query_index = Index("hsp_query_idx", "query_id", unique=False) target_index = Index("hsp_target_idx", "target_id", unique=False) hsp_evalue_index = Index('hsp_evalue_idx', 'hsp_evalue', unique=False) combined_index = Index("hsp_combined_idx", "query_id", "target_id", unique=False) full_index = Index("hsp_full_idx", "counter", "query_id", "target_id", unique=True) query_hsp_start = Column(Integer) query_hsp_end = Column(Integer) query_frame = Column(Integer) target_hsp_start = Column(Integer) target_hsp_end = Column(Integer) target_frame = Column(Integer) uni_constraint = UniqueConstraint("query_id", "target_id", "query_hsp_start", "query_hsp_end", "target_hsp_start", "target_hsp_end") match = Column(String(10000)) hsp_evalue = Column(Float) hsp_bits = Column(Float) hsp_identity = Column(Float) hsp_positives = Column(Float) hsp_length = Column(Integer) query_object = relationship(Query, uselist=False) target_object = relationship(Target, uselist=False) query = column_property( select([Query.query_name]).where(Query.query_id == query_id)) query_length = column_property( select([Query.query_length]).where(Query.query_id == query_id)) target = select([Target.target_name]).where(Target.target_id == target_id) target_length = select([Target.target_length ]).where(Target.target_id == target_id) __table_args__ = (pk_constraint, query_index, target_index, combined_index, hsp_evalue_index) def __init__(self, hsp, counter, query_id, target_id, qmultiplier=1, tmultiplier=1): """ :param hsp: an hsp object from the serialized XML :param counter: integer which indicates the HSP position in the HSP list for the hit :type counter: int :param query_id: Foreign key for the Query table :type query_id: int :param target_id: Foreign key for the Target table :type target_id: int """ self.counter = counter hsp_dict, _, _ = prepare_hsp(hsp, counter, qmultiplier=qmultiplier, tmultiplier=tmultiplier) for key in hsp_dict: setattr(self, key, hsp_dict[key]) self.query_id = query_id self.target_id = target_id def __str__(self): """Simple printing function.""" line = [ self.query, self.target, self.query_hsp_start, self.query_hsp_end, self.target_hsp_start, self.target_hsp_end, self.hsp_evalue ] return "\t".join([str(x) for x in line]) # @profile @classmethod def as_dict_static(cls, state_obj): """Method to return a dict representation of the object. Necessary for storing. This method returns a dictionary *without any attribute that requires joined data*. As a static method, it is useful to be used outside of the class. :param state_obj: an instance of the HSP class or a collections.namedtuple object from a direct query :rtype : dict """ keys = [ "query_hsp_start", "query_hsp_end", "target_hsp_start", "target_hsp_end", "hsp_evalue", "hsp_bits", "match", "query_frame", "target_frame" ] state = dict().fromkeys(keys) for key in keys: state[key] = getattr(state_obj, key) return state def as_dict(self): """Method to return a dict representation of the object. Necessary for storing. This method returns a dictionary *without any attribute that requires joined data*. It is meant to be used only by the method as_dict of the Hit class.""" return self.as_dict_static(self) def as_full_dict(self): """Method to return a dict representation of the object. This method also checks query name and hit name, so it is slower than as_dict and used when it is necessary to retrieve data independently from Hit. """ state = self.as_dict() state["query"] = self.query state["target"] = self.target state["query_hsp_cov"] = self.query_hsp_cov state["target_hsp_cov"] = self.target_hsp_cov state["match"] = self.match return state @hybrid_property def query_hsp_cov(self): """This property returns the percentage of the query which is covered by the HSP.""" val = (self.query_hsp_end - self.query_hsp_start + 1) val /= self.query_length return val @hybrid_property def target_hsp_cov(self): """This property returns the percentage of the target which is covered by the HSP.""" val = (self.target_hsp_end - self.target_hsp_start + 1) val /= self.target_length return val
class Release(db.Model): __tablename__ = "releases" @declared_attr def __table_args__(cls): # noqa return ( Index("release_created_idx", cls.created.desc()), Index("release_project_created_idx", cls.project_id, cls.created.desc()), Index("release_version_idx", cls.version), UniqueConstraint("project_id", "version"), ) __repr__ = make_repr("project", "version") __parent__ = dotted_navigator("project") __name__ = dotted_navigator("version") project_id = Column( ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, ) version = Column(Text, nullable=False) canonical_version = Column(Text, nullable=False) is_prerelease = orm.column_property(func.pep440_is_prerelease(version)) author = Column(Text) author_email = Column(Text) maintainer = Column(Text) maintainer_email = Column(Text) home_page = Column(Text) license = Column(Text) summary = Column(Text) keywords = Column(Text) platform = Column(Text) download_url = Column(Text) _pypi_ordering = Column(Integer) requires_python = Column(Text) created = Column( DateTime(timezone=False), nullable=False, server_default=sql.func.now() ) description_id = Column( ForeignKey("release_descriptions.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, ) description = orm.relationship( "Description", backref=orm.backref( "release", cascade="all, delete-orphan", passive_deletes=True, passive_updates=True, single_parent=True, uselist=False, ), ) yanked = Column(Boolean, nullable=False, server_default=sql.false()) yanked_reason = Column(Text, nullable=False, server_default="") _classifiers = orm.relationship( Classifier, backref="project_releases", secondary=lambda: release_classifiers, order_by=Classifier.classifier, passive_deletes=True, ) classifiers = association_proxy("_classifiers", "classifier") files = orm.relationship( "File", backref="release", cascade="all, delete-orphan", lazy="dynamic", order_by=lambda: File.filename, passive_deletes=True, ) dependencies = orm.relationship( "Dependency", backref="release", cascade="all, delete-orphan", passive_deletes=True, ) _requires = _dependency_relation(DependencyKind.requires) requires = association_proxy("_requires", "specifier") _provides = _dependency_relation(DependencyKind.provides) provides = association_proxy("_provides", "specifier") _obsoletes = _dependency_relation(DependencyKind.obsoletes) obsoletes = association_proxy("_obsoletes", "specifier") _requires_dist = _dependency_relation(DependencyKind.requires_dist) requires_dist = association_proxy("_requires_dist", "specifier") _provides_dist = _dependency_relation(DependencyKind.provides_dist) provides_dist = association_proxy("_provides_dist", "specifier") _obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist) obsoletes_dist = association_proxy("_obsoletes_dist", "specifier") _requires_external = _dependency_relation(DependencyKind.requires_external) requires_external = association_proxy("_requires_external", "specifier") _project_urls = _dependency_relation(DependencyKind.project_url) project_urls = association_proxy("_project_urls", "specifier") uploader_id = Column( ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"), nullable=True, index=True, ) uploader = orm.relationship(User) uploaded_via = Column(Text) @property def urls(self): _urls = OrderedDict() if self.home_page: _urls["Homepage"] = self.home_page if self.download_url: _urls["Download"] = self.download_url for urlspec in self.project_urls: name, _, url = urlspec.partition(",") name = name.strip() url = url.strip() if name and url: _urls[name] = url return _urls @property def github_repo_info_url(self): for url in self.urls.values(): parsed = urlparse(url) segments = parsed.path.strip("/").split("/") if parsed.netloc in {"github.com", "www.github.com"} and len(segments) >= 2: user_name, repo_name = segments[:2] return f"https://api.github.com/repos/{user_name}/{repo_name}" @property def has_meta(self): return any( [ self.license, self.keywords, self.author, self.author_email, self.maintainer, self.maintainer_email, self.requires_python, ] )
class User(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) creation_date = db.Column(db.DateTime, default=func.now(), nullable=False) firstname = db.Column(db.String(32), unique=False, nullable=True) lastname = db.Column(db.String(32), unique=False, nullable=False) password = db.Column(db.String(256), unique=False, nullable=True) is_verified = db.Column(db.Boolean, nullable=False, default=False) # Column property for the active state active = column_property(select([Rank.active]) .where(and_(RankUpdate.user_id == id, Rank.id == RankUpdate.rank_id)) .order_by(RankUpdate.id.desc()) .limit(1) .as_scalar()) # Link to all purchases of a user. purchases = db.relationship( 'Purchase', lazy='dynamic', foreign_keys='Purchase.user_id' ) # Link to all deposits of a user. deposits = db.relationship( 'Deposit', lazy='dynamic', foreign_keys='Deposit.user_id' ) # Link to all refunds of a user. refunds = db.relationship( 'Refund', lazy='dynamic', foreign_keys='Refund.user_id' ) def __repr__(self): return f'<User {self.id}: {self.lastname}, {self.firstname}>' @hybrid_property def is_admin(self): au = (AdminUpdate.query .filter_by(user_id=self.id) .order_by(AdminUpdate.id.desc()) .first()) if au is None: return False return au.is_admin @hybrid_property def verification_date(self): verification = (UserVerification.query .filter(UserVerification.user_id == self.id) .first()) if verification: return verification.timestamp return None @hybrid_method def set_admin(self, is_admin, admin_id): if is_admin and self.password is None: raise UserNeedsPassword() if self.is_admin == is_admin: raise NothingHasChanged() au = AdminUpdate(is_admin=is_admin, admin_id=admin_id, user_id=self.id) db.session.add(au) @hybrid_method def verify(self, admin_id, rank_id): if self.is_verified: raise UserAlreadyVerified() self.is_verified = True uv = UserVerification(user_id=self.id, admin_id=admin_id) self.set_rank_id(rank_id, admin_id) db.session.add(uv) @hybrid_property def rank_id(self): ru = (RankUpdate.query .filter_by(user_id=self.id) .order_by(RankUpdate.id.desc()) .first()) if ru: return ru.rank_id return None @hybrid_method def set_rank_id(self, rank_id, admin_id): if self.rank_id == rank_id: raise NothingHasChanged() ru = RankUpdate(rank_id=rank_id, admin_id=admin_id, user_id=self.id) db.session.add(ru) @hybrid_property def rank(self): if self.rank_id: rank = Rank.query.filter(Rank.id == self.rank_id).first() if rank: return rank return None @hybrid_property def credit(self): p_amount = (db.session.query(func.sum(Purchase.price)) .filter(Purchase.user_id == self.id) .filter(Purchase.revoked.is_(False)) .scalar()) or 0 d_amount = (db.session.query(func.sum(Deposit.amount)) .filter(Deposit.user_id == self.id) .filter(Deposit.revoked.is_(False)) .scalar()) or 0 r_amount = (db.session.query(func.sum(Refund.total_price)) .filter(Refund.user_id == self.id) .filter(Refund.revoked.is_(False)) .scalar()) or 0 return d_amount + r_amount - p_amount @hybrid_property def favorites(self): """ Returns the product ids of the user's favorite products in descending order of number. Inactive products are ignored. Args: self: self Returns: ids: A list of the favorite product ids in descending order. """ result = (db.session.query(Purchase, Product) .filter(Product.id == Purchase.product_id) .filter(Product.active.is_(True)) .filter(Purchase.user_id == self.id) .filter(Purchase.revoked.is_(False)) .group_by(Purchase.product_id) .order_by(func.sum(Purchase.amount).desc()) .all()) return [purchase.product_id for purchase, _ in result]
class TRealisations(DB.Model): ''' Realisation of a circuit ''' __tablename__ = 't_realisations' __table_args__ = {'schema': 'oeasc_in', 'extend_existing': True} id_realisation = DB.Column(DB.Integer, primary_key=True) id_circuit = DB.Column(DB.Integer, DB.ForeignKey('oeasc_in.t_circuits.id_circuit')) serie = DB.Column(DB.Integer) groupes = DB.Column(DB.Integer) vent = DB.Column(DB.Unicode) temps = DB.Column(DB.Unicode) temperature = DB.Column(DB.Unicode) date_realisation = DB.Column(DB.Date) secteur = DB.relationship( TSecteurs, secondary="oeasc_in.t_circuits", primaryjoin="TRealisations.id_circuit == TCircuits.id_circuit", secondaryjoin="TCircuits.id_secteur == TSecteurs.id_secteur", uselist=False) circuit = DB.relationship(TCircuits, lazy='joined') observations = DB.relationship( TObservations, cascade="save-update, merge, delete, delete-orphan", lazy='joined') observers = DB.relationship( TObservers, secondary=cor_realisation_observer, lazy='joined', ) tags = DB.relationship(CorRealisationTag, cascade="save-update, merge, delete, delete-orphan", lazy='joined') observers_table = column_property( select([func.string_agg(TObservers.nom_observer, ', ')]).where( and_(TObservers.id_observer == CorRealisationObserver.id_observer, id_realisation == CorRealisationObserver.id_observer))) tags_table = column_property( select([ func.string_agg( func.concat(TTags.nom_tag, ' : ', case([(CorRealisationTag.valid, 'o')], else_='x')), ', ') ]).where( and_(CorRealisationTag.id_realisation == id_realisation, CorRealisationTag.id_tag == TTags.id_tag))) cerfs = column_property( select([TObservations.nb]).where( and_(TObservations.id_realisation == id_realisation, TObservations.id_espece == TEspeces.id_espece, TEspeces.nom_espece == 'Cerf'))) lievres = column_property( select([TObservations.nb]).where( and_(TObservations.id_realisation == id_realisation, TObservations.id_espece == TEspeces.id_espece, TEspeces.nom_espece == 'Lièvre'))) chevreuils = column_property( select([TObservations.nb]).where( and_(TObservations.id_realisation == id_realisation, TObservations.id_espece == TEspeces.id_espece, TEspeces.nom_espece == 'Chevreuil'))) renards = column_property( select([TObservations.nb]).where( and_(TObservations.id_realisation == id_realisation, TObservations.id_espece == TEspeces.id_espece, TEspeces.nom_espece == 'Renard')))
class Product(db.Model): __tablename__ = 'products' id = db.Column(db.Integer, primary_key=True) creation_date = db.Column(db.DateTime, default=func.now(), nullable=False) created_by = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) name = db.Column(db.String(64), unique=True, nullable=False) barcode = db.Column(db.String(32), unique=True, nullable=True) active = db.Column(db.Boolean, nullable=False, default=True) countable = db.Column(db.Boolean, nullable=False, default=True) revocable = db.Column(db.Boolean, nullable=False, default=True) image_upload_id = db.Column(db.Integer, db.ForeignKey('uploads.id'), nullable=True) tags = db.relationship('Tag', secondary=product_tag_assignments, backref=db.backref('products', lazy='dynamic')) # Column property for the price price = column_property(select([ProductPrice.price]) .where(ProductPrice.product_id == id) .order_by(ProductPrice.id.desc()) .limit(1) .as_scalar()) @validates('created_by') def validate_admin(self, key, created_by): user = User.query.filter(User.id == created_by).first() if not user or not user.is_admin: raise UnauthorizedAccess() return created_by @hybrid_property def imagename(self): upload = Upload.query.filter_by(id=self.image_upload_id).first() if upload: return upload.filename return None @hybrid_method def get_pricehistory(self, start_date=None, end_date=None): # If the time range parameters are not set, we use the creation date # and the current date as range. try: if start_date: start = datetime.datetime.fromtimestamp(start_date) else: start = self.creation_date if end_date: end = datetime.datetime.fromtimestamp(end_date) else: end = datetime.datetime.now() except ValueError: raise InvalidData() # Make sure that we select the whole day by shifting the selected # range to the very beginning of the start day and to the end of the # end day. start = start.replace(hour=0, minute=0, second=0, microsecond=0) end = end.replace(hour=23, minute=59, second=59, microsecond=999999) # Query the pricehistory in the given range res = (db.session.query(ProductPrice) .filter(ProductPrice.product_id == self.id) .filter(ProductPrice.timestamp.between(start, end)) .all()) # Map the result to a dictionary containing all price changes. return list(map(lambda p: { 'id': p.id, 'timestamp': p.timestamp, 'price': p.price }, res)) @hybrid_method def set_price(self, price, admin_id): productprice = ProductPrice( price=price, product_id=self.id, admin_id=admin_id ) db.session.add(productprice) def __repr__(self): return f'<Product {self.name}>'
class Project(omp.proj): __tablename__ = 'ompproj' __table_args__ = ( {'schema':'omp', 'extend_existing':False} ) observations = relationship(Obs, primaryjoin=omp.proj.projectid==jcmt.COMMON.project, foreign_keys=[jcmt.COMMON.project], order_by=jcmt.COMMON.date_obs) projusers = relationship(ProjUser, primaryjoin=omp.proj.projectid==ProjUser.projectid, foreign_keys=[ProjUser.projectid], order_by=[ProjUser.capacity.desc(),ProjUser.capacity_order.asc()], lazy='joined') queueinfo = relationship(ProjQueue, primaryjoin=omp.proj.projectid==omp.projqueue.projectid, foreign_keys=[omp.projqueue.projectid], lazy='joined') completion = column_property(100*(omp.proj.allocated-(omp.proj.remaining - omp.proj.pending))/omp.proj.allocated) used = column_property(omp.proj.allocated - (omp.proj.remaining - omp.proj.pending)) timecharged = relationship(TimeAcct, primaryjoin=omp.proj.projectid==omp.timeacct.projectid, foreign_keys=[omp.timeacct.projectid], order_by=omp.timeacct.date, backref=backref("project", uselist=False)) faults = relationship(omp.fault, secondary=omp.faultassoc.__table__, primaryjoin=omp.proj.projectid==omp.faultassoc.projectid, secondaryjoin=omp.fault.faultid==omp.faultassoc.faultid, foreign_keys=[omp.faultassoc.projectid, omp.fault.faultid]) msbs_done = relationship(MSBDone, primaryjoin=omp.proj.projectid==omp.msbdone.projectid, foreign_keys = omp.msbdone.projectid, order_by=omp.msbdone.date) msbs_scheduled = relationship(MSBScheduled, primaryjoin=omp.proj.projectid==omp.msb.projectid, foreign_keys=[omp.msb.projectid]) feedback = relationship(Feedback, primaryjoin=omp.proj.projectid==omp.feedback.projectid, foreign_keys=[omp.feedback.projectid], order_by=omp.feedback.date) def get_msb_done_summary(self): """ Return a summary of the msbs done, with observation counts.""" # Handle large number of results by breaking it down into separate queries with no join. msbids, obsids = zip(*object_session(self).query(Obs.msbid, func.count(Obs.obsid).label('obscount')).filter(Obs.project==self.projectid).filter(Obs.msbid!='CAL').filter(Obs.obs_type=='science').group_by(Obs.msbid).all()) obscount_dict = dict(zip(msbids, obsids)) msbquery = object_session(self).query(MSBDone).group_by(MSBDone.checksum, MSBDone.status) msbquery = msbquery.add_column(func.count('*').label('msb_count')) msbquery = msbquery.filter(MSBDone.projectid==self.projectid) msbquery = msbquery.filter(MSBDone.checksum != 'CAL') msbquery = msbquery.order_by(MSBDone.title, MSBDone.status) msbs = msbquery.all() #subquery = select([func.count(jcmt.COMMON.obsid).label('obscount')]).where(jcmt.COMMON.msbid==MSBDone.checksum).where(jcmt.COMMON.project==MSBDone.projectid).where(jcmt.COMMON.obs_type=='science').where(jcmt.COMMON.msbid!='CAL').as_scalar() #query = object_session(self).query(MSBDone, subquery).filter(MSBDone.checksum!='CAL') #query = query.add_column(subquery.c.obscount) #query = query.group_by(MSBDone.checksum, MSBDone.status) #query = query.add_column(func.count('*').label('msb_count')) #query = query.filter(MSBDone.projectid==self.projectid) #query = query.order_by(MSBDone.title, MSBDone.status) #results = query.limit(500).all() if len(msbs) > 0: results = [msbdone_summary(*(i[0].checksum, i[0].status, i[0].title, i[0].instrument, i[0].waveband, i[0].target, i[0].projectid, i[1], obscount_dict.get(i[0].checksum, 0))) for i in msbs] #results = [msbdone_summary(*(i[0].checksum, i[0].status, i[0].title, i[0].instrument, i[0].waveband, i[0].target, i[0].projectid), i[-1], i[-2]) for i in results] return results def __repr__(self): return "<{}({})>".format(self.__class__.__name__, self.projectid) @property def queues(self): return [i.country for i in self.queueinfo] @property def obs_without_msb(self): checksums = [i.checksum for i in self.msbs_done] return [i for i in self.observations if i.msbid not in checksums] @property def pis(self): return [ i for i in self.projusers if i.capacity=='PI'] @property def fops(self): return [i for i in self.projusers if i.capacity=='SUPPORT'] @property def cois(self): return [i for i in self.projusers if i.capacity=='COI'] def get_processing_jobs(self, tasks=['jcmt-nightly'], obsinfo=None, instinfo=None): observations = self.observations if obsinfo: observations = [o for o in observations if has_all_attr(o, obsinfo)] if instinfo: subsystems = [s for o in observations for s in o._inst_info if has_all_attr(s, instinfo) ] else: subsystems = [s for o in observations for s in o._inst_info] return [i for s in subsystems for i in s.processing_jobs if (i.task in tasks or not tasks)] def msbs_repeats(self): """ Get msbs as dictionary with all observations done by msb checksum?. """ msb_checksums = set([i.checksum for i in proj.msbs_done]) #for i in msb_checksums raise NotImplementedError def get_dates_obsactivity(self, byshift=False, observations=None): """Get the dates where observations happened/time was charged. Return an ordered dictionary with the keys as days that either observations were taken or time was charged (or both). The items contain a tuple, first item is lists of times charged that day, second item is list of observations. """ print(observations) if not observations: observations = self.observations if not self.timecharged and not observations: return None timeresults = dict() for t in self.timecharged: date = t.date.date() r = timeresults.get(date, {'all-confirmed': 0.0, 'all-pending': 0.0}) if t.confirmed: r['all-confirmed'] = r['all-confirmed'] + t.timespent/(60.0*60) else: r['all-pending'] = r['all-pending'] + t.timespent/(60.0*60) # Ensure it works if multiple shifts in one night: shouldn't happen, but could. if byshift: shifttop = r.get('shifts', {}) shiftr = shifttop.get(t.shifttype, {'pending': 0.0, 'confirmed':0.0}) if t.confirmed: shiftr['confirmed'] += t.timespent/(60.0*60) else: shiftr['pending'] += t.timespent/(60.0*60.) if t.comment: shiftr['comment'] = t.comment shifttop[t.shifttype] = shiftr r['shifts'] = shifttop timeresults[date] = r # Now for observations: want time spent on sky by ompstatus (and shift? and band?) obsresults = dict() for o in self.observations: date = o.date_obs.date() r = obsresults.get(date, {}) key = (date, o.instrument, o.band, o.ompstatus) r[key] = r.get(key, 0.0) + o.duration/(60*60.0) obsresults[date] = r dates = timeresults.keys() | obsresults.keys() results = OrderedDict() for d in sorted(list(dates)): results[d] = (timeresults.get(d, None), obsresults.get(d, None)) return results def get_secondary_jcmt_papers(self, direct_papers): """ Get all secondary JCMT papers that list a direct paper of this projecty as the source. """ bibcodes = [i.bibcode for i in direct_papers] from eaodb import pigwidgeon as pig from sqlalchemy import select, func, alias session = self._sa_instance_state.session searchid = 1 JCMT_PAPERTYPES = [i.id for i in session.query(pig.paper_types).filter( pig.paper_types.name_.in_(['JCMT Science Paper','JCMT Theory Paper'])).all()] BIBCODE_INFOSECTION = [i.id for i in session.query(pig.info_sections).filter( pig.info_sections.name_=='Data Source').all()] query = session.query(pig.papers) query = query.join(pig.comments, pig.papers.id==pig.comments.paper_id) query = query.join(pig.info_section_values, pig.info_section_values.comment_id==pig.comments.id) query = query.filter(pig.comments.id.in_(select([func.max(pig.comments.id)]).group_by(pig.comments.paper_id))) query = query.filter(func.upper(pig.info_section_values.entered_text).in_(bibcodes)) query = query.filter(pig.comments.search_id==searchid) query = query.join(pig.papertype_value, pig.comments.id==pig.papertype_value.comment_id) query = query.filter(pig.papertype_value.papertype_id.in_(JCMT_PAPERTYPES)) query = query.filter(pig.info_section_values.info_section_id.in_(BIBCODE_INFOSECTION)) query = query.order_by(pig.papers.refereed, pig.papers.pubdate) query = query.options(joinedload(pig.papers.authors)) papers = query.all() return papers def get_direct_jcmt_papers(self): """ Get JCMT Science/Theory papers where this project is listed as the source of data. """ from eaodb import pigwidgeon as pig from sqlalchemy import select, func, alias session = self._sa_instance_state.session searchid = 1 JCMT_PAPERTYPES = [i.id for i in session.query(pig.paper_types).filter( pig.paper_types.name_.in_(['JCMT Science Paper','JCMT Theory Paper'])).all()] PROJECT_INFOSECTIONS = [i.id for i in session.query(pig.info_sections).filter( pig.info_sections.name_.in_(["Project Codes (PI/JLS/LAP)", "Project Codes (archival/other)"])).all()] all_projectid = [self.projectid] extra_project_names = lap_project_aliases.get(self.projectid, None) if extra_project_names: all_projectid.append(extra_project_names) #c2 = alias(pig.comments) #intjoin = select([func.max(pig.comments.id)]).where( # pig.comments.paper_id==pig.papers.id).correlate(pig.papers) query = session.query(pig.papers) query = query.join(pig.comments, pig.papers.id==pig.comments.paper_id) query = query.join(pig.info_section_values, pig.info_section_values.comment_id==pig.comments.id) query = query.filter(pig.comments.id.in_(select([func.max(pig.comments.id)]).group_by(pig.comments.paper_id))) query = query.filter(func.upper(pig.info_section_values.entered_text).in_(all_projectid)) query = query.filter(pig.comments.search_id==searchid) query = query.join(pig.papertype_value, pig.comments.id==pig.papertype_value.comment_id) query = query.filter(pig.papertype_value.papertype_id.in_(JCMT_PAPERTYPES)) query = query.filter(pig.info_section_values.info_section_id.in_(PROJECT_INFOSECTIONS)) query = query.order_by(pig.papers.refereed, pig.papers.pubdate) query = query.options(joinedload(pig.papers.authors)) papers = query.all() return papers
class InvoiceLine(Base): __tablename__ = "payment_invoice_line" id = Column(Integer(), primary_key=True) invoice_id = Column(Integer(), ForeignKey('payment_invoice.id'), nullable=False) service_id = Column(Integer(), ForeignKey('payment_service.id'), nullable=False) title = Column(Unicode(), nullable=False) price = Column(Numeric(precision=10, scale=2), nullable=False) quantity = Column(Integer(), nullable=False, default=1) discount = Column(Numeric(precision=10, scale=2), nullable=False, default=0) start_date = Column(Date(), nullable=True) note = Column(Unicode(), nullable=True) extra_fields = Column(JSONType(mutable=True), nullable=False, default={}) service = relationship("Service") __to_dict__ = copy.deepcopy(Base.__to_dict__) __to_dict__.update({ 'default': { 'id' : None , 'invoice_id' : None , 'service_id' : None , 'title' : None , 'price' : None , 'quantity' : None , 'discount' : None , 'note' : None , 'price_final' : None , }, }) def __init__(self, invoice=None, service=None, payment_account_service=None, start_date=None): if payment_account_service: self.service = payment_account_service.service self.note = payment_account_service.note self.discount = payment_account_service.discount if service: self.service = service self.title = self.service.title self.price = self.service.get_price(invoice.currency, invoice.payment_account.frequency) self.extra_fields = self.service.extra_fields if invoice: self.invoice = invoice self.start_date = start_date _config = None price_final = column_property( func.round(price * (1-discount) * quantity,2) ) # @property # def price_final(self): # return (self.price * (Decimal('1.00')-self.discount) * Decimal(self.quantity)).quantize(Decimal('1.00')) @property def config(self): if not self.extra_fields: self.extra_fields = {} if not self._config: self._config = _ConfigManager(self.extra_fields) return self._config
class Project(SitemapMixin, db.ModelBase): __tablename__ = "packages" __table_args__ = (CheckConstraint( "name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text", name="packages_valid_name", ), ) __repr__ = make_repr("name") name = Column(Text, primary_key=True, nullable=False) normalized_name = orm.column_property(func.normalize_pep426_name(name)) stable_version = Column(Text) autohide = Column(Boolean, server_default=sql.true()) comments = Column(Boolean, server_default=sql.true()) bugtrack_url = Column(Text) hosting_mode = Column(Text, nullable=False, server_default="pypi-only") created = Column( DateTime(timezone=False), nullable=False, server_default=sql.func.now(), ) has_docs = Column(Boolean) upload_limit = Column(Integer, nullable=True) users = orm.relationship( User, secondary=Role.__table__, backref="projects", ) releases = orm.relationship( "Release", backref="project", cascade="all, delete-orphan", order_by=lambda: Release._pypi_ordering.desc(), ) def __getitem__(self, version): session = orm.object_session(self) try: return (session.query(Release).filter((Release.project == self) & ( Release.version == version)).one()) except NoResultFound: raise KeyError from None def __acl__(self): session = orm.object_session(self) acls = [] # Get all of the users for this project. query = session.query(Role).filter(Role.project == self) query = query.options(orm.lazyload("project")) query = query.options(orm.joinedload("user").lazyload("emails")) for role in sorted( query.all(), key=lambda x: ["Owner", "Maintainer"].index(x.role_name)): acls.append((Allow, role.user.id, ["upload"])) return acls @property def documentation_url(self): # TODO: Move this into the database and elimnate the use of the # threadlocal here. request = get_current_request() # If the project doesn't have docs, then we'll just return a None here. if not self.has_docs: return return request.route_url("legacy.docs", project=self.name)
class Proposal(db.Model): __tablename__ = "proposal" id = db.Column(db.Integer(), primary_key=True) date_created = db.Column(db.DateTime) rfp_id = db.Column(db.Integer(), db.ForeignKey('rfp.id'), nullable=True) # Content info status = db.Column(db.String(255), nullable=False) title = db.Column(db.String(255), nullable=False) brief = db.Column(db.String(255), nullable=False) stage = db.Column(db.String(255), nullable=False) content = db.Column(db.Text, nullable=False) category = db.Column(db.String(255), nullable=False) date_approved = db.Column(db.DateTime) date_published = db.Column(db.DateTime) reject_reason = db.Column(db.String()) private = db.Column(db.Boolean, default=False, nullable=False) # Payment info target = db.Column(db.String(255), nullable=False) # Relations team = db.relationship( "User", secondary=proposal_team ) comments = db.relationship( Comment, backref="proposal", lazy=True, cascade="all, delete-orphan" ) updates = db.relationship( ProposalUpdate, backref="proposal", lazy=True, cascade="all, delete-orphan" ) milestones = db.relationship( "Milestone", backref="proposal", order_by="asc(Milestone.index)", lazy=True, cascade="all, delete-orphan" ) invites = db.relationship( ProposalTeamInvite, backref="proposal", lazy=True, cascade="all, delete-orphan" ) followers = db.relationship( "User", secondary=proposal_follower, back_populates="followed_proposals" ) followers_count = column_property( select([func.count(proposal_follower.c.proposal_id)]) .where(proposal_follower.c.proposal_id == id) .correlate_except(proposal_follower) ) comments_count = column_property( select([func.count(Comment.proposal_id)]) .where(Comment.proposal_id == id) .where(Comment.hidden != True) .correlate_except(Comment) ) def __init__( self, status: str = ProposalStatus.DRAFT, title: str = '', brief: str = '', content: str = '', stage: str = ProposalStage.PREVIEW, target: str = '0', category: str = '' ): self.id = gen_random_id(Proposal) self.date_created = datetime.datetime.now() self.status = status self.title = title self.brief = brief self.content = content self.category = category self.target = target self.stage = stage @staticmethod def simple_validate(proposal): # Validate fields to be database save-able. # Stricter validation is done in validate_publishable. stage = proposal.get('stage') category = proposal.get('category') if stage and not ProposalStage.includes(stage): raise ValidationException("Proposal stage {} is not a valid stage".format(stage)) if category and not Category.includes(category): raise ValidationException("Category {} not a valid category".format(category)) def validate_publishable_milestones(self): payout_total = 0.0 for i, milestone in enumerate(self.milestones): if milestone.immediate_payout and i != 0: raise ValidationException("Only the first milestone can have an immediate payout") if len(milestone.title) > 60: raise ValidationException("Milestone title cannot be longer than 60 chars") if len(milestone.content) > 200: raise ValidationException("Milestone content cannot be longer than 200 chars") try: p = float(milestone.payout_amount) if not p.is_integer(): raise ValidationException("Milestone payout must be whole numbers, no decimals") if p <= 0: raise ValidationException("Milestone payout must be greater than zero") except ValueError: raise ValidationException("Milestone payout percent must be a number") payout_total += p if payout_total != float(self.target): raise ValidationException("Payout of milestones must add up to proposal target") def validate_publishable(self): self.validate_publishable_milestones() # Require certain fields required_fields = ['title', 'content', 'brief', 'category', 'target'] for field in required_fields: if not hasattr(self, field): raise ValidationException("Proposal must have a {}".format(field)) # Stricter limits on certain fields if len(self.title) > 60: raise ValidationException("Proposal title cannot be longer than 60 characters") if len(self.brief) > 140: raise ValidationException("Brief cannot be longer than 140 characters") if len(self.content) > 250000: raise ValidationException("Content cannot be longer than 250,000 characters") # Then run through regular validation Proposal.simple_validate(vars(self)) # only do this when user submits for approval, there is a chance the dates will # be passed by the time admin approval / user publishing occurs def validate_milestone_dates(self): present = datetime.datetime.today().replace(day=1, hour=0, minute=0, second=0, microsecond=0) for milestone in self.milestones: if present > milestone.date_estimated: raise ValidationException("Milestone date estimate must be in the future ") @staticmethod def create(**kwargs): Proposal.simple_validate(kwargs) proposal = Proposal( **kwargs ) db.session.add(proposal) db.session.flush() return proposal @staticmethod def get_by_user(user, statuses=[ProposalStatus.LIVE]): from grant.utils.auth import get_authed_user authed = get_authed_user() status_filter = or_(Proposal.status == v for v in statuses) res = Proposal.query \ .join(proposal_team) \ .filter(proposal_team.c.user_id == user.id) \ .filter(status_filter) \ .all() # only team members get to see private proposals without_priv = [] for p in res: if p.private: if authed and authed.id in [t.id for t in p.team]: without_priv.append(p) else: without_priv.append(p) return without_priv def update( self, title: str = '', brief: str = '', category: str = '', content: str = '', target: str = '0', ): self.title = title[:255] self.brief = brief[:255] self.category = category self.content = content[:300000] self.target = target[:255] if target != '' else '0' Proposal.simple_validate(vars(self)) def send_admin_email(self, type: str): send_admin_email(type, { 'proposal': self, 'proposal_url': make_admin_url(f'/proposals/{self.id}'), }) def send_follower_email(self, type: str, email_args={}, url_suffix=''): for u in self.followers: send_email(u.email_address, type, { 'user': u, 'proposal': self, 'proposal_url': make_url(f'/proposals/{self.id}{url_suffix}'), **email_args }) # state: status (DRAFT || REJECTED) -> PENDING def submit_for_approval(self): self.validate_publishable() self.validate_milestone_dates() allowed_statuses = [ProposalStatus.DRAFT, ProposalStatus.REJECTED] # specific validation if self.status not in allowed_statuses: raise ValidationException(f"Proposal status must be draft or rejected to submit for approval") self.send_admin_email('admin_approval') self.status = ProposalStatus.PENDING db.session.add(self) db.session.flush() # state: status PENDING -> (APPROVED || REJECTED) def approve_pending(self, is_approve, reject_reason=None): self.validate_publishable() # specific validation if not self.status == ProposalStatus.PENDING: raise ValidationException(f"Proposal must be pending to approve or reject") if is_approve: self.status = ProposalStatus.APPROVED self.date_approved = datetime.datetime.now() for t in self.team: send_email(t.email_address, 'proposal_approved', { 'user': t, 'proposal': self, 'proposal_url': make_url(f'/proposals/{self.id}'), 'admin_note': 'Congratulations! Your proposal has been approved.' }) else: if not reject_reason: raise ValidationException("Please provide a reason for rejecting the proposal") self.status = ProposalStatus.REJECTED self.reject_reason = reject_reason for t in self.team: send_email(t.email_address, 'proposal_rejected', { 'user': t, 'proposal': self, 'proposal_url': make_url(f'/proposals/{self.id}'), 'admin_note': reject_reason }) # state: status APPROVE -> LIVE, stage PREVIEW -> WIP def publish(self): self.validate_publishable() # specific validation if not self.status == ProposalStatus.APPROVED: raise ValidationException(f"Proposal status must be approved") self.date_published = datetime.datetime.now() self.status = ProposalStatus.LIVE self.stage = ProposalStage.WIP def cancel(self): if self.status != ProposalStatus.LIVE: raise ValidationException("Cannot cancel a proposal until it's live") self.stage = ProposalStage.CANCELED db.session.add(self) db.session.flush() # Send emails to team & contributors for u in self.team: send_email(u.email_address, 'proposal_canceled', { 'proposal': self, 'support_url': make_url('/contact'), }) def follow(self, user, is_follow): if is_follow: self.followers.append(user) else: self.followers.remove(user) db.session.flush() @hybrid_property def is_failed(self): if not self.status == ProposalStatus.LIVE or not self.date_published: return False if self.stage == ProposalStage.FAILED or self.stage == ProposalStage.CANCELED: return True return False @hybrid_property def current_milestone(self): if self.milestones: for ms in self.milestones: if ms.stage != MilestoneStage.PAID: return ms return self.milestones[-1] # return last one if all PAID return None @hybrid_property def authed_follows(self): from grant.utils.auth import get_authed_user authed = get_authed_user() if not authed: return False res = db.session.query(proposal_follower) \ .filter_by(user_id=authed.id, proposal_id=self.id) \ .count() if res: return True return False
class Orf(DBBASE): """ Serialization class for ORFs derived from BED12 files. """ __tablename__ = "orf" orf_id = Column(Integer, primary_key=True) query_id = Column(Integer, ForeignKey(Query.query_id), unique=False) start = Column(Integer, nullable=False) end = Column(Integer, nullable=False) orf_name = Column(String(200)) strand = Column(CHAR) thick_start = Column(Integer, nullable=False) thick_end = Column(Integer, nullable=False) score = Column(Float) has_start_codon = Column(Boolean, nullable=True) has_stop_codon = Column(Boolean, nullable=True) cds_len = Column(Integer) phase = Column(Integer, nullable=False) __table_args__ = (Index("orf_index", "query_id", "thick_start", "thick_end"), Index("query_index", "query_id")) query_object = relationship(Query, uselist=False, backref=backref("orfs"), lazy="immediate") query = column_property( select([Query.query_name]).where(Query.query_id == query_id)) def __init__(self, bed12_object, query_id): if not isinstance(bed12_object, bed12.BED12): raise TypeError("Invalid data type!") self.query_id = query_id self.start = bed12_object.start self.end = bed12_object.end self.thick_start = bed12_object.thick_start self.thick_end = bed12_object.thick_end self.orf_name = bed12_object.name self.strand = bed12_object.strand self.score = bed12_object.score self.has_start_codon = bed12_object.has_start_codon self.has_stop_codon = bed12_object.has_stop_codon self.cds_len = bed12_object.cds_len self.phase = bed12_object.phase def __str__(self): return "{chrom}\t{start}\t{end}".format(chrom=self.query, start=self.start, end=self.end) @classmethod def as_bed12_static(cls, state, query_name): """Class method to transform the mapper into a BED12 object. Usable from outside the class. :param state: the original state derived from the mapping. :param query_name: the name of the query, retrieved from the Query associated object/table. """ __bed12 = bed12.BED12() __bed12.header = False __bed12.query = __bed12.chrom = query_name __bed12.start = state.start __bed12.end = state.end __bed12.name = state.orf_name __bed12.score = state.score __bed12.strand = state.strand __bed12.thick_start = state.thick_start __bed12.thick_end = state.thick_end __bed12.rgb = 0 __bed12.block_count = 1 __bed12.block_sizes = [state.end] __bed12.block_starts = [0] __bed12.transcriptomic = True __bed12.phase = state.phase # Verbose block, but it is necessary as raw extraction from SQL # yields 0/1 instead of True/False if state.has_start_codon: __bed12.has_start_codon = True else: __bed12.has_start_codon = False if state.has_stop_codon: __bed12.has_stop_codon = True else: __bed12.has_stop_codon = False return __bed12 def as_bed12(self): """Method to transform the mapper into a BED12 object.""" return self.as_bed12_static(self, self.query)
def send_post_to_socket(mapper, connection, target): target.post.send_to_changes() @event.listens_for(LikedPost, 'after_update', propagate=True) def send_post_to_socket_ts(mapper, connection, target): if not inspect(target).unmodified_intersection(('tombstone_date')): target.db.expire(target.post, ['like_count']) target.post.send_to_changes() _lpt = LikedPost.__table__ _actt = Action.__table__ Content.like_count = column_property( select([func.count(_actt.c.id)]).where( (_lpt.c.id == _actt.c.id) & (_lpt.c.post_id == Content.__table__.c.id) & (_actt.c.type == LikedPost.__mapper_args__['polymorphic_identity']) & (_actt.c.tombstone_date == None)).correlate_except(_actt, _lpt)) class ExpandPost(UniqueActionOnPost): """ An expansion action on a post. """ __mapper_args__ = {'polymorphic_identity': 'version:ExpandPost_P'} verb = 'expanded' class CollapsePost(UniqueActionOnPost): """
class Comment(db.Model): __tablename__ = "comment" id = db.Column(db.Integer(), primary_key=True) date_created = db.Column(db.DateTime) content = db.Column(db.Text, nullable=False) hidden = db.Column(db.Boolean, nullable=False, default=False, server_default=db.text("FALSE")) reported = db.Column(db.Boolean, nullable=True, default=False, server_default=db.text("FALSE")) parent_comment_id = db.Column(db.Integer, db.ForeignKey("comment.id"), nullable=True) proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False) author = db.relationship("User", back_populates="comments") replies = db.relationship("Comment") likes = db.relationship( "User", secondary=comment_liker, back_populates="liked_comments" ) likes_count = column_property( select([func.count(comment_liker.c.comment_id)]) .where(comment_liker.c.comment_id == id) .correlate_except(comment_liker) ) def __init__(self, proposal_id, user_id, parent_comment_id, content): self.id = gen_random_id(Comment) self.proposal_id = proposal_id self.user_id = user_id self.parent_comment_id = parent_comment_id self.content = content[:5000] self.date_created = datetime.datetime.now() @staticmethod def get_by_user(user): return Comment.query \ .options(raiseload(Comment.replies)) \ .filter(Comment.user_id == user.id) \ .order_by(Comment.date_created.desc()) \ .all() def report(self, reported: bool): self.reported = reported db.session.add(self) def hide(self, hidden: bool): self.hidden = hidden db.session.add(self) @hybrid_property def authed_liked(self): from grant.utils.auth import get_authed_user authed = get_authed_user() if not authed: return False res = ( db.session.query(comment_liker) .filter_by(user_id=authed.id, comment_id=self.id) .count() ) if res: return True return False def like(self, user, is_liked): if is_liked: self.likes.append(user) else: self.likes.remove(user) db.session.flush()
def evaluate_property(self, prop): return column_property(prop.label(None), *self.args, **self.kwargs)
class TorsionDriveProcedureORM(ProcedureMixin, BaseResultORM): """ A torsion drive procedure """ __tablename__ = 'torsiondrive_procedure' id = Column(Integer, ForeignKey('base_result.id', ondelete='cascade'), primary_key=True) def __init__(self, **kwargs): kwargs.setdefault("version", 1) self.procedure = "torsiondrive" self.program = "torsiondrive" super().__init__(**kwargs) # input data (along with the mixin) # ids of the many to many relation initial_molecule = column_property( select([func.array_agg(torsion_init_mol_association.c.molecule_id)])\ .where(torsion_init_mol_association.c.torsion_id==id) ) # actual objects relation M2M, never loaded here initial_molecule_obj = relationship(MoleculeORM, secondary=torsion_init_mol_association, uselist=True, lazy='noload') optimization_spec = Column(JSON) # Output data final_energy_dict = Column(JSON) minimum_positions = Column(JSON) optimization_history_obj = relationship( OptimizationHistory, cascade="all, delete-orphan", #backref="torsiondrive_procedure", order_by=OptimizationHistory.position, collection_class=ordering_list('position'), lazy='selectin') @hybrid_property def optimization_history(self): """calculated property when accessed, not saved in the DB A view of the many to many relation in the form of a dict""" ret = {} try: for opt_history in self.optimization_history_obj: if opt_history.key in ret: ret[opt_history.key].append(str(opt_history.opt_id)) else: ret[opt_history.key] = [str(opt_history.opt_id)] except Exception as err: # raises exception of first access!! print(err) return ret @optimization_history.setter def optimization_history(self, dict_values): """A private copy of the opt history as a dict Key: list of optimization procedures""" return dict_values __table_args__ = ( Index('ix_torsion_drive_program', 'program'), # todo: needed for procedures? ) __mapper_args__ = { 'polymorphic_identity': 'torsiondrive_procedure', # to have separate select when querying BaseResultsORM 'polymorphic_load': 'selectin', } def update_relations(self, initial_molecule=None, optimization_history=None, **kwarg): # update torsion molecule relation self._update_many_to_many(torsion_init_mol_association, 'torsion_id', 'molecule_id', self.id, initial_molecule, self.initial_molecule) self.optimization_history_obj = [] for key in optimization_history: for opt_id in optimization_history[key]: opt_history = OptimizationHistory(torsion_id=int(self.id), opt_id=int(opt_id), key=key) self.optimization_history_obj.append(opt_history)
class Callfilter(Base): __tablename__ = 'callfilter' __table_args__ = ( PrimaryKeyConstraint('id'), UniqueConstraint('name'), ) id = Column(Integer, nullable=False) tenant_uuid = Column(String(36), ForeignKey('tenant.uuid', ondelete='CASCADE'), nullable=False) name = Column(String(128), nullable=False, server_default='') type = Column(enum.callfilter_type, nullable=False) bosssecretary = Column(enum.callfilter_bosssecretary) callfrom = Column(enum.callfilter_callfrom) ringseconds = Column(Integer, nullable=False, server_default='0') commented = Column(Integer, nullable=False, server_default='0') description = Column(Text) exten = column_property( select([Extension.exten]) .where(and_(Extension.typeval == 'bsfilter', Extension.commented == '0')) .correlate_except(Extension) .as_scalar() ) callfilter_dialactions = relationship( 'Dialaction', primaryjoin="""and_( Dialaction.category == 'callfilter', Dialaction.categoryval == cast(Callfilter.id, String ))""", cascade='all, delete-orphan', collection_class=attribute_mapped_collection('event'), foreign_keys='Dialaction.categoryval', ) caller_id = relationship( 'Callerid', primaryjoin="""and_( Callerid.type == 'callfilter', Callerid.typeval == Callfilter.id )""", foreign_keys='Callerid.typeval', cascade='all, delete-orphan', uselist=False, ) caller_id_mode = association_proxy( 'caller_id', 'mode', creator=lambda _mode: Callerid(type='callfilter', mode=_mode), ) caller_id_name = association_proxy( 'caller_id', 'name', creator=lambda _name: Callerid(type='callfilter', name=_name), ) recipients = relationship( 'Callfiltermember', primaryjoin="""and_( Callfiltermember.bstype == 'boss', Callfiltermember.callfilterid == Callfilter.id )""", foreign_keys='Callfiltermember.callfilterid', order_by='Callfiltermember.priority', collection_class=ordering_list('priority', reorder_on_append=True), cascade='all, delete-orphan', ) surrogates = relationship( 'Callfiltermember', primaryjoin="""and_( Callfiltermember.bstype == 'secretary', Callfiltermember.callfilterid == Callfilter.id )""", foreign_keys='Callfiltermember.callfilterid', order_by='Callfiltermember.priority', collection_class=ordering_list('priority', reorder_on_append=True), cascade='all, delete-orphan', ) @property def fallbacks(self): return self.callfilter_dialactions @hybrid_property def strategy(self): if self.bosssecretary == 'bossfirst-serial': return 'all-recipients-then-linear-surrogates' elif self.bosssecretary == 'bossfirst-simult': return 'all-recipients-then-all-surrogates' elif self.bosssecretary == 'secretary-serial': return 'linear-surrogates-then-all-recipients' elif self.bosssecretary == 'secretary-simult': return 'all-surrogates-then-all-recipients' else: return self.bosssecretary @strategy.setter def strategy(self, value): if value == 'all-recipients-then-linear-surrogates': self.bosssecretary = 'bossfirst-serial' elif value == 'all-recipients-then-all-surrogates': self.bosssecretary = 'bossfirst-simult' elif value == 'linear-surrogates-then-all-recipients': self.bosssecretary = 'secretary-serial' elif value == 'all-surrogates-then-all-recipients': self.bosssecretary = 'secretary-simult' else: self.bosssecretary = value @hybrid_property def surrogates_timeout(self): if self.ringseconds == 0: return None return self.ringseconds @surrogates_timeout.setter def surrogates_timeout(self, value): if value is None: self.ringseconds = 0 else: self.ringseconds = value @hybrid_property def enabled(self): if self.commented is None: return None return self.commented == 0 @enabled.expression def enabled(cls): return not_(cast(cls.commented, Boolean)) @enabled.setter def enabled(self, value): self.commented = int(value is False)
class OptimizationProcedureORM(ProcedureMixin, BaseResultORM): """ An Optimization procedure """ __tablename__ = 'optimization_procedure' id = Column(Integer, ForeignKey('base_result.id', ondelete='cascade'), primary_key=True) def __init__(self, **kwargs): kwargs.setdefault("version", 1) self.procedure = "optimization" super().__init__(**kwargs) schema_version = Column(Integer, default=1) initial_molecule = Column(Integer, ForeignKey('molecule.id')) initial_molecule_obj = relationship(MoleculeORM, lazy='select', foreign_keys=initial_molecule) # # Results energies = Column(JSON) #Column(ARRAY(Float)) final_molecule = Column(Integer, ForeignKey('molecule.id')) final_molecule_obj = relationship(MoleculeORM, lazy='select', foreign_keys=final_molecule) # ids, calculated not stored in this table # NOTE: this won't work in SQLite since it returns ARRAYS, aggregate_order_by trajectory = column_property( select([ func.array_agg( aggregate_order_by(Trajectory.result_id, Trajectory.position)) ]).where(Trajectory.opt_id == id)) # array of objects (results) - Lazy - raise error of accessed trajectory_obj = relationship( Trajectory, cascade="all, delete-orphan", # backref="optimization_procedure", order_by=Trajectory.position, collection_class=ordering_list('position')) __mapper_args__ = { 'polymorphic_identity': 'optimization_procedure', # to have separate select when querying BaseResultsORM 'polymorphic_load': 'selectin', } __table_args__ = ( Index('ix_optimization_program', 'program'), # todo: needed for procedures? ) def update_relations(self, trajectory=None, **kwarg): # update optimization_results relations # self._update_many_to_many(opt_result_association, 'opt_id', 'result_id', # self.id, trajectory, self.trajectory) self.trajectory_obj = [] trajectory = [] if not trajectory else trajectory for result_id in trajectory: traj = Trajectory(opt_id=int(self.id), result_id=int(result_id)) self.trajectory_obj.append(traj)
class Project(SitemapMixin, db.ModelBase): __tablename__ = "packages" __table_args__ = ( CheckConstraint( "name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text", name="packages_valid_name", ), ) __repr__ = make_repr("name") name = Column(Text, primary_key=True, nullable=False) normalized_name = orm.column_property(func.normalize_pep426_name(name)) stable_version = Column(Text) autohide = Column(Boolean, server_default=sql.true()) comments = Column(Boolean, server_default=sql.true()) bugtrack_url = Column(Text) hosting_mode = Column(Text, nullable=False, server_default="pypi-only") created = Column( DateTime(timezone=False), nullable=False, server_default=sql.func.now() ) has_docs = Column(Boolean) upload_limit = Column(Integer, nullable=True) last_serial = Column(Integer, nullable=False, server_default=sql.text("0")) allow_legacy_files = Column(Boolean, nullable=False, server_default=sql.false()) zscore = Column(Float, nullable=True) users = orm.relationship(User, secondary=Role.__table__, backref="projects") releases = orm.relationship( "Release", backref="project", cascade="all, delete-orphan", order_by=lambda: Release._pypi_ordering.desc(), passive_deletes=True, ) def __getitem__(self, version): session = orm.object_session(self) canonical_version = packaging.utils.canonicalize_version(version) try: return ( session.query(Release) .filter( (Release.project == self) & (Release.canonical_version == canonical_version) ) .one() ) except MultipleResultsFound: # There are multiple releases of this project which have the same # canonical version that were uploaded before we checked for # canonical version equivalence, so return the exact match instead try: return ( session.query(Release) .filter((Release.project == self) & (Release.version == version)) .one() ) except NoResultFound: # There are multiple releases of this project which have the # same canonical version, but none that have the exact version # specified, so just 404 raise KeyError from None except NoResultFound: raise KeyError from None def __acl__(self): session = orm.object_session(self) acls = [(Allow, "group:admins", "admin")] # Get all of the users for this project. query = session.query(Role).filter(Role.project == self) query = query.options(orm.lazyload("project")) query = query.options(orm.joinedload("user").lazyload("emails")) for role in sorted( query.all(), key=lambda x: ["Owner", "Maintainer"].index(x.role_name) ): if role.role_name == "Owner": acls.append((Allow, str(role.user.id), ["manage", "upload"])) else: acls.append((Allow, str(role.user.id), ["upload"])) return acls @property def documentation_url(self): # TODO: Move this into the database and elimnate the use of the # threadlocal here. request = get_current_request() # If the project doesn't have docs, then we'll just return a None here. if not self.has_docs: return return request.route_url("legacy.docs", project=self.name) @property def all_versions(self): return ( orm.object_session(self) .query(Release.version, Release.created, Release.is_prerelease) .filter(Release.project == self) .order_by(Release._pypi_ordering.desc()) .all() ) @property def latest_version(self): return ( orm.object_session(self) .query(Release.version, Release.created, Release.is_prerelease) .filter(Release.project == self) .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc()) .first() )
class Post(Content, TaggableEntity): """ A Post represents input into the broader discussion taking place on Assembl. It may be a response to another post, it may have responses, and its content may be of any type. """ __tablename__ = "post" id = Column(Integer, ForeignKey('content.id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True) # This is usually an email, but we won't enforce it because we get some # weird stuff from outside. message_id = Column(CoerceUnicode, nullable=False, index=True, doc=docs.Post.message_id) ancestry = Column(String, default="") __table_args__ = (Index('ix_%s_post_ancestry' % (Content.full_schema, ), 'ancestry', unique=False, postgresql_ops={'ancestry': 'varchar_pattern_ops'}), ) parent_id = Column(Integer, ForeignKey('post.id', ondelete='CASCADE', onupdate='SET NULL'), index=True) children = relationship( "Post", foreign_keys=[parent_id], backref=backref('parent', remote_side=[id]), ) publication_state = Column(PublicationStates.db_type(), nullable=False, server_default=PublicationStates.PUBLISHED.name) moderator_id = Column( Integer, ForeignKey('user.id', ondelete='SET NULL', onupdate='CASCADE'), nullable=True, ) moderated_on = Column(DateTime) moderation_text = Column(UnicodeText) moderator_comment = Column(UnicodeText) # For other moderators moderator = relationship( "User", foreign_keys=[moderator_id], backref=backref('posts_moderated'), ) # All the idea content links of the ancestors of this post idea_content_links_above_post = column_property( func.idea_content_links_above_post(id), deferred=True, expire_on_flush=False) creator_id = Column(Integer, ForeignKey('agent_profile.id'), nullable=False, index=True) creator = relationship(AgentProfile, foreign_keys=[creator_id], backref="posts_created", doc=docs.PostInterface.creator) share_count = Column(Integer, nullable=False, default=0, server_default='0') tags_associations_cls = PostsTagsAssociation __mapper_args__ = {'polymorphic_identity': 'post', 'with_polymorphic': '*'} def is_owner(self, user_id): return self.creator_id == user_id def get_descendants(self): assert self.id descendants = self.db.query(Post).filter( Post.parent_id == self.id).order_by(Content.creation_date) return descendants def get_top_post_in_thread(self): post = self while post.parent_id is not None: post = Post.get(post.parent_id) return post def increment_share_count(self): self.share_count += 1 def is_read(self): # TODO: Make it user-specific. return self.views is not None def get_url(self): from assembl.lib.frontend_urls import FrontendUrls frontendUrls = FrontendUrls(self.discussion) return frontendUrls.get_post_url(self) @staticmethod def shorten_text(text, target_len=120): if len(text) > target_len: text = text[:target_len].rsplit(' ', 1)[0].rstrip() + ' ' return text @staticmethod def shorten_html_text(text, target_len=120): shortened = False html_len = 2 * target_len while True: pure_text = sanitize_text(text[:html_len]) if html_len >= len(text) or len(pure_text) > target_len: shortened = html_len < len(text) text = pure_text break html_len += target_len text = Post.shorten_text(text) if shortened and text[-1] != ' ': text += ' ' return text def get_body_preview(self): if self.publication_state in moderated_publication_states: # TODO: Handle multilingual moderation return LangString.create(self.moderation_text, self.discussion.main_locale) elif self.publication_state in deleted_publication_states: return LangString.EMPTY(self.db) body = self.get_body() is_html = self.get_body_mime_type() == 'text/html' ls = LangString() shortened = False for entry in body.entries: if not entry.value: short = entry.value elif is_html: short = self.shorten_html_text(entry.value) else: short = self.shorten_text(entry.value) if short != entry.value: shortened = True # create a LangStringEntry object LangStringEntry(value=short, locale_id=entry.locale_id, langstring=ls) if shortened or is_html: return ls else: return body def get_original_body_preview(self): if self.publication_state in moderated_publication_states: # TODO: Handle multilingual moderation return self.moderation_text elif self.publication_state in deleted_publication_states: return LangString.EMPTY(self.db) body = self.get_body().first_original().value is_html = self.get_body_mime_type() == 'text/html' shortened = False if not body: short = body elif is_html: short = self.shorten_html_text(body) else: short = self.shorten_text(body) if short != body: shortened = True if shortened or is_html: return short else: return body def _set_ancestry(self, new_ancestry): self.ancestry = new_ancestry descendant_ancestry = "%s%d," % (self.ancestry, self.id) for descendant in self.get_descendants(): descendant._set_ancestry(descendant_ancestry) def set_parent(self, parent): self.parent = parent self.db.add(self) self.db.flush() self._set_ancestry("%s%d," % (parent.ancestry or '', parent.id)) def last_updated(self): ancestry_query_string = "%s%d,%%" % (self.ancestry or '', self.id) query = self.db.query(func.max( Content.creation_date)).select_from(Post).join(Content).filter( or_(Post.ancestry.like(ancestry_query_string), Post.id == self.id)) return query.scalar() def ancestor_ids(self): return [ int(ancestor_id) for ancestor_id in self.ancestry.split(',') if ancestor_id ] def ancestors(self): return [Post.get(ancestor_id) for ancestor_id in self.ancestor_ids] def prefetch_descendants(self): pass # TODO def visit_posts_depth_first(self, post_visitor): self.prefetch_descendants() self._visit_posts_depth_first(post_visitor, set()) def _visit_posts_depth_first(self, post_visitor, visited): if self in visited: # not necessary in a tree, but let's start to think graph. return False result = post_visitor.visit_post(self) visited.add(self) if result is not PostVisitor.CUT_VISIT: for child in self.children: child._visit_posts_depth_first(post_visitor, visited) def visit_posts_breadth_first(self, post_visitor): self.prefetch_descendants() result = post_visitor.visit_post(self) visited = {self} if result is not PostVisitor.CUT_VISIT: self._visit_posts_breadth_first(post_visitor, visited) def _visit_posts_breadth_first(self, post_visitor, visited): children = [] for child in self.children: if child in visited: continue result = post_visitor.visit_post(child) visited.add(child) if result != PostVisitor.CUT_VISIT: children.append(child) for child in children: child._visit_posts_breadth_first(post_visitor, visited) def has_next_sibling(self): if self.parent_id: return self != self.parent.children[-1] return False @property def has_live_child(self): for child in self.children: if not child.is_tombstone: return True def delete_post(self, cause): """Set the publication state to a deleted state Includes an optimization whereby deleted posts without live descendants are tombstoned. Should be resilient to deletion order.""" self.publication_state = cause if not self.has_live_child: self.is_tombstone = True # If ancestor is deleted without being tombstone, make it tombstone ancestor = self.parent while (ancestor and ancestor.publication_state in deleted_publication_states and not ancestor.is_tombstone and not ancestor.has_live_child): ancestor.is_tombstone = True ancestor = ancestor.parent self.tags = [] # As tombstones are an optimization in this case, # allow necromancy. can_be_resurrected = True def undelete_post(self): self.publication_state = PublicationStates.PUBLISHED ancestor = self while ancestor and ancestor.is_tombstone: ancestor.is_tombstone = False ancestor = ancestor.parent def get_subject(self): if self.publication_state in blocking_publication_states: return LangString.EMPTY() if self.subject: return super(Post, self).get_subject() def get_body(self): if self.publication_state in blocking_publication_states: return LangString.EMPTY() if self.body: return super(Post, self).get_body() def get_original_body_as_html(self): if self.publication_state in blocking_publication_states: return LangString.EMPTY(self.db) return super(Post, self).get_original_body_as_html() def get_body_as_text(self): if self.publication_state in blocking_publication_states: return LangString.EMPTY(self.db) return super(Post, self).get_body_as_text() def indirect_idea_content_links(self): from pyramid.threadlocal import get_current_request request = get_current_request() if request: return self.indirect_idea_content_links_with_cache() else: return self.indirect_idea_content_links_without_cache() def indirect_idea_content_links_without_cache(self): "Return all ideaContentLinks related to this post or its ancestors" from .idea_content_link import IdeaContentLink ancestors = filter(None, self.ancestry.split(",")) ancestors = [int(x) for x in ancestors] ancestors.append(self.id) return self.db.query(IdeaContentLink).filter( IdeaContentLink.content_id.in_(ancestors)).all() def filter_idea_content_links_r(self, idea_content_links): """Exclude positive links if a negative link points from the same idea to the same post or a post below. Works on dict representations of IdeaContentLink, a version with instances is TODO.""" from .idea_content_link import IdeaContentNegativeLink from collections import defaultdict icnl_polymap = { cls.external_typename() for cls in IdeaContentNegativeLink.get_subclasses() } neg_links = [ icl for icl in idea_content_links if icl["@type"] in icnl_polymap ] if not neg_links: return idea_content_links pos_links = [ icl for icl in idea_content_links if icl["@type"] not in icnl_polymap ] links = [] ancestor_ids = self.ancestry.split(",") ancestor_ids = [int(x or 0) for x in ancestor_ids] ancestor_ids[-1] = self.id neg_link_post_ids = defaultdict(list) for icl in neg_links: neg_link_post_ids[icl["idIdea"]].append( self.get_database_id(icl["idPost"])) for link in pos_links: idea_id = link["idIdea"] if idea_id in neg_link_post_ids: pos_post_id = self.get_database_id(link["idPost"]) for neg_post_id in neg_link_post_ids[idea_id]: if (ancestor_ids.index(neg_post_id) > ancestor_ids.index(pos_post_id)): break else: links.append(link) else: links.append(link) links.extend(neg_links) return links def indirect_idea_content_links_with_cache(self, links_above_post=None, filter=True): "Return all ideaContentLinks related to this post or its ancestors" # WIP: idea_content_links_above_post is still loaded separately # despite not being deferred. Deferring it hits a sqlalchemy bug. # Still appreciable performance gain using it instead of the orm, # and the ICL cache below. # TODO: move in path_utils? links_above_post = (self.idea_content_links_above_post if links_above_post is None else links_above_post) if not links_above_post: return [] from pyramid.threadlocal import get_current_request from .idea_content_link import IdeaContentLink from .idea import Idea icl_polymap = IdeaContentLink.__mapper__.polymorphic_map request = get_current_request() if getattr(request, "_idea_content_link_cache2", None) is None: if getattr(request, "_idea_content_link_cache1", None) is None: icl = with_polymorphic(IdeaContentLink, IdeaContentLink) co = with_polymorphic(Content, Content) request._idea_content_link_cache1 = { x[0]: x for x in self.db.query( icl.id, icl.idea_id, icl.content_id, icl.creator_id, icl.type, icl.creation_date).join(co).filter( co.discussion_id == self.discussion_id) } request._idea_content_link_cache2 = {} def icl_representation(id): if id not in request._idea_content_link_cache2: data = request._idea_content_link_cache1.get(id, None) if data is None: return None request._idea_content_link_cache2[id] = { "@id": IdeaContentLink.uri_generic(data[0]), "idIdea": Idea.uri_generic(data[1]), "idPost": Content.uri_generic(data[2]), "idCreator": AgentProfile.uri_generic(data[3]), "@type": icl_polymap[data[4]].class_.external_typename(), "created": data[5].isoformat() + "Z" } return request._idea_content_link_cache2[id] icls = [ icl_representation(int(id)) for id in links_above_post.strip(',').split(',') ] if filter: icls = self.filter_idea_content_links_r(icls) return icls def language_priors(self, translation_service): from .auth import User, UserLanguagePreferenceCollection priors = super(Post, self).language_priors(translation_service) creator = self.creator or AgentProfile.get(self.creator_id) if creator and isinstance(creator, User): # probably a language that the user knows try: prefs = UserLanguagePreferenceCollection(creator.id) known_languages = prefs.known_languages() except AssertionError: # user without prefs from pyramid.threadlocal import get_current_request request = get_current_request() if request: known_languages = [request.locale_name] else: return priors known_languages = [] known_languages = { translation_service.asKnownLocale(loc) for loc in known_languages } priors = { k: v * (1 if k in known_languages else 0.7) for (k, v) in priors.iteritems() } for lang in known_languages: if lang not in priors: priors[lang] = 1 return priors @classmethod def extra_collections(cls): from .idea_content_link import IdeaContentLink class IdeaContentLinkCollection(AbstractCollectionDefinition): def __init__(self, cls): super(IdeaContentLinkCollection, self).__init__(cls, IdeaContentLink) def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx): parent = owner_alias children = last_alias ancestors = filter(None, parent_instance.ancestry.split(",")) ancestors = [int(x) for x in ancestors] ancestors.append(parent_instance.id) return query.join(parent, children.content_id.in_(ancestors)) def decorate_instance(self, instance, parent_instance, assocs, user_id, ctx, kwargs): pass def contains(self, parent_instance, instance): return instance.content_id == parent_instance.id or ( str(instance.content_id) in parent_instance.ancestry.split(",")) return {'indirect_idea_content_links': IdeaContentLinkCollection(cls)} @classmethod def restrict_to_owners(cls, query, user_id): "filter query according to object owners" return query.filter(cls.creator_id == user_id) def is_bright_mirror_fiction(self): parent_ideas = self.get_ideas() if not parent_ideas: return False return parent_ideas[ 0].message_view_override == MessageView.brightMirror.value def get_ideas(self): ideas = [ link.idea for link in self.indirect_idea_content_links_without_cache() if link.__class__.__name__ == 'IdeaRelatedPostLink' ] return ideas def get_created_phase(self): from assembl.lib.frontend_urls import get_timeline_for_date ideas = self.get_ideas() if not ideas: # orphan post return get_timeline_for_date(self.discussion, self.creation_date) first_idea = ideas[0] return first_idea.get_associated_phase() @classmethod def graphene_type(cls): return 'Post'