class OdLink(BaseObject): """Link to OndernemingsDossier.""" __tablename__ = "od_link" id = schema.Column(types.Integer(), primary_key=True, autoincrement=True) session_id = schema.Column( types.Integer(), schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, ) session = orm.relation( "SurveySession", backref=orm.backref("od_link", uselist=False) ) vestigings_sleutel = schema.Column(types.String(), nullable=False, index=True) webservice = schema.Column(types.String(), nullable=False) version = schema.Column(types.Integer(), default=0, nullable=False) @property def wsdl_url(self): return self.webservice + "&wsdl"
class Course(Model): __tablename__ = "course" id = Column(sqla_types.Integer, primary_key=True) name = Column(sqla_types.String(255), nullable=False) # These are for better model form testing cost = Column(sqla_types.Numeric(5, 2), nullable=False) description = Column(sqla_types.Text, nullable=False) level = Column(sqla_types.Enum('Primary', 'Secondary')) has_prereqs = Column(sqla_types.Boolean, nullable=False) boolean_nullable = Column(sqla_types.Boolean, nullable=True) started = Column(sqla_types.DateTime, nullable=False) grade = Column(AnotherInteger, nullable=False)
class EmailAddress(types.TypeDecorator): impl = types.String(255) def process_bind_param(self, value, dialect): if value is not None: value = email_address.normalize(value, True) return value def process_result_value(self, value, dialect): if value is not None: value = email_address.normalize(value, False) return value
class Flowcell(Model): flowcell_id = Column(types.Integer, primary_key=True) flowcellname = Column(types.String(255), nullable=False, unique=True) flowcell_pos = Column(types.Enum("A", "B"), nullable=False) hiseqtype = Column(types.String(255), nullable=False) time = Column(types.DateTime) demux = orm.relationship("Demux", cascade="all", backref=orm.backref("flowcell")) @staticmethod def exists(flowcell_name: str) -> Optional[int]: """Checks if the Flowcell entry already exists""" try: flowcell: Flowcell = Flowcell.query.filter_by( flowcellname=flowcell_name).one() return flowcell.flowcell_id except NoResultFound: return None
class SomeModel(Base): id = Column(types.Integer, primary_key=True) a_boolean = Column(types.Boolean, nullable=False) a_date = Column(types.Date, unique=True) a_datetime = Column(types.DateTime) a_float = Column(types.Float) a_json = Column(types.JSON) another_json = Column(postgresql.JSON) a_pickle = Column(types.PickleType) a_string = Column(types.String(42), default='H2G2') _internal = Column(types.Integer) a_server_default_col = Column(types.Integer, server_default=sa.text('0'))
class AirmaterialCategory(Model, AuditModel): "航材类别的模型定义" # 为了兼容原外包实现的名称 __tablename__ = 'airmaterial_category' id = schema.Column(types.Integer, primary_key=True) # partNumber 航材件号 partNumber = schema.Column(types.String(255), nullable=False, unique=True) # name 航材名称 name = schema.Column(types.String(255), nullable=False) # category 航材类型 category = schema.Column(types.String(255), nullable=False) # miniStock 最低库存 minStock = schema.Column(types.Integer) # unit 航材单位 unit = schema.Column(types.String(255)) # applicableModel 适用机型 applicableModel = schema.Column(types.String(255)) # 是否有库存有效期 isOrNotHaveEffectiveDate = schema.Column(types.Boolean) # 是否有定期检查 isOrNotHavePeriodCheck = schema.Column(types.Boolean) # statusName 状态值 statusName = schema.Column(types.String(255)) @property def status(self): return self.statusName @status.setter def status(self, value): self.statusName = value
class ContentBlock(AbstractModel): __tablename__ = 'content_blocks' id = schema.Column(types.Integer, primary_key=True) message = schema.Column(types.Text, nullable=True) timestamp_label = schema.Column( types.DateTime(timezone=True), default=datetime.utcnow, index=True) inbox_message_id = schema.Column( types.Integer, schema.ForeignKey( 'inbox_messages.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=True) content = schema.Column(types.LargeBinary, nullable=False) binding_id = schema.Column(types.String(300), index=True) binding_subtype = schema.Column(types.String(300), index=True) collections = relationship( 'DataCollection', secondary=collection_to_content_block, backref='content_blocks', lazy='dynamic') @validates('collections', include_removes=True, include_backrefs=True) def _update_volume(self, key, collection, is_remove): if is_remove: collection.volume = collection.__class__.volume - 1 else: collection.volume = collection.__class__.volume + 1 return collection def __repr__(self): return ('ContentBlock(id={obj.id}, ' 'inbox_message={obj.inbox_message_id}, ' 'binding={obj.binding_subtype})').format(obj=self)
class Profile(DBModel): __tablename__ = 'profile' __table_args__ = {"schema": 'gentem'} updated_at = Column(types.DateTime, default=datetime.utcnow()) first_name = Column(types.String(255)) last_name = Column(types.String(255)) middle_initial = Column(types.String(255)) address = Column(types.String(255)) phone = Column(types.String(255)) dob = Column(types.Date) gender = Column(types.String(255)) member_id = Column(types.String(255)) email = Column(types.String(255))
class TrainingArchive(Model, AuditModel): "培训档案的模型定义" # 为了兼容原外包实现的名称 __tablename__ = 'train_record' def _id_generator(): return id_generator('PXDA', TrainingArchive, 'trainNumber') id = schema.Column(types.Integer, primary_key=True) trainNumber = schema.Column(types.String(255), default=_id_generator) userName = schema.Column(types.String(255)) trainRecordTime = schema.Column(types.DateTime) quarters = schema.Column(types.String(255)) trainRecordName = schema.Column(types.String(255)) trainRecordScore = schema.Column(types.String(255)) statusName = schema.Column(types.String(255)) trainRecordContent = schema.Column(types.Text) @property def status(self): return self.statusName @status.setter def status(self, value): self.statusName = value
class Notice(Model): "短消息的模型定义" # 为了兼容原外包实现的名称 __tablename__ = 'mynotice' id = schema.Column(types.Integer, primary_key=True) title = schema.Column(types.String(500), nullable=False) content = schema.Column(types.String(500), nullable=False) role = schema.Column(types.String(500)) recieveId = db.relationship('User', secondary=notification_users, backref=db.backref('uesrs', lazy='dynamic')) recieveName = schema.Column(types.String(255)) sendName = schema.Column(types.String(255)) stateName = schema.Column(types.String(255)) updateTime = schema.Column(types.DateTime) @property def status(self): return self.stateName @status.setter def status(self, value): self.stateName = value
def __init__(self, dbpath): log.info("Initializing DB at %s" % dbpath) self.dbpath = dbpath self._engine = create_engine('sqlite:///%s' % dbpath) self._metadata = MetaData() self._metadata.bind = self._engine self._benchmarks = Table( 'benchmarks', self._metadata, Column('checksum', sqltypes.String(32), primary_key=True), Column('name', sqltypes.String(200), nullable=False), Column('description', sqltypes.Text)) self._results = Table( 'results', self._metadata, Column('checksum', sqltypes.String(32), ForeignKey('benchmarks.checksum'), primary_key=True), Column('revision', sqltypes.String(50), primary_key=True), Column('timestamp', sqltypes.DateTime, nullable=False), Column('ncalls', sqltypes.String(50)), Column('timing', sqltypes.Float), Column('traceback', sqltypes.Text), ) self._blacklist = Table( 'blacklist', self._metadata, Column('revision', sqltypes.String(50), primary_key=True)) self._ensure_tables_created()
class DutchCompany(BaseObject): """Information about a Dutch company.""" __tablename__ = "dutch_company" id = schema.Column(types.Integer(), primary_key=True, autoincrement=True) session_id = schema.Column( types.Integer(), schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True, ) session = orm.relation( "SurveySession", cascade="all,delete-orphan", single_parent=True, backref=orm.backref("dutch_company", uselist=False, cascade="all"), ) title = schema.Column(types.Unicode(128)) address_visit_address = schema.Column(types.UnicodeText()) address_visit_postal = schema.Column(types.Unicode(16)) address_visit_city = schema.Column(types.Unicode(64)) address_postal_address = schema.Column(types.UnicodeText()) address_postal_postal = schema.Column(types.Unicode(16)) address_postal_city = schema.Column(types.Unicode(64)) email = schema.Column(types.String(128)) phone = schema.Column(types.String(32)) activity = schema.Column(types.Unicode(64)) submitter_name = schema.Column(types.Unicode(64)) submitter_function = schema.Column(types.Unicode(64)) department = schema.Column(types.Unicode(64)) location = schema.Column(types.Unicode(64)) submit_date = schema.Column(types.Date(), default=functions.now()) employees = schema.Column(Enum([None, "40h", "max25", "over25"])) absentee_percentage = schema.Column(types.Numeric(precision=5, scale=2)) accidents = schema.Column(types.Integer()) incapacitated_workers = schema.Column(types.Integer()) arbo_expert = schema.Column(types.Unicode(128)) works_council_approval = schema.Column(types.Date())
class AlleleSet(Base, AlleleSetMixIn): __tablename__ = 'allelesets' id = Column(types.Integer, primary_key=True) channel_id = Column(types.Integer, ForeignKey('channels.id', ondelete='CASCADE'), nullable=False) channel = relationship(Channel, uselist=False, backref=backref('allelesets', lazy='dynamic', passive_deletes=True)) # a channel can have several allele set for different revision numbers sample_id = Column(types.Integer, ForeignKey('samples.id', ondelete='CASCADE'), nullable=False) sample = relationship(Sample, uselist=False, backref=backref('allelesets', lazy='dynamic', passive_deletes=True)) """ link to sample """ marker_id = Column(types.Integer, ForeignKey('markers.id'), nullable=False) marker = relationship(Marker, uselist=False, backref=backref('allelesets', lazy='dynamic')) """ link to marker """ scanning_method = deferred(Column(types.String(32), nullable=False)) """ method used for scanning and generating this alleleset """ calling_method = deferred(Column(types.String(32), nullable=False)) """ method used for calling this alleleset """ binning_method = deferred(Column(types.String(32), nullable=False)) """ method used for binning this alleleset """ def new_allele(self, rtime, height, area, brtime, ertime, wrtime, srtime, beta, theta, type, method): allele = Allele( rtime = rtime, height = height, area = area, brtime = brtime, ertime = ertime, wrtime = wrtime, srtime = srtime, beta = beta, theta = theta, type = type, method = method ) allele.alleleset = self return allele
class Sample(Base, SampleMixIn): __tablename__ = 'samples' id = Column(types.Integer, primary_key=True) code = Column(types.String(64), nullable=False) type = Column(types.String(1), default='S') altcode = Column(types.String(16), nullable=True) # custom usage category = Column(types.Integer, nullable=False, default=0) # custom usage batch_id = Column(types.Integer, ForeignKey('batches.id', ondelete='CASCADE'), nullable=False) int1 = Column(types.Integer, nullable=False, default=-1) # custom usage int2 = Column(types.Integer, nullable=False, default=-1) # custom usage string1 = Column(types.String(16), nullable=False, default='') # custom usage string2 = Column(types.String(16), nullable=False, default='') # custom usage batch = relationship(Batch, uselist=False, backref=backref('samples', lazy='dynamic', cascade='save-update,delete')) remark = deferred(Column(types.String(1024), nullable=True)) __table_args__ = (UniqueConstraint('code', 'batch_id'), UniqueConstraint('altcode', 'batch_id')) def new_assay(self, raw_data, filename, status, panel=None): assay = Assay(raw_data=raw_data, filename=filename) if panel is None: panel = Panel.search('undefined', object_session(self)) assay.panel = panel assay.sample = self assay.status = status return assay
def upgrade_1032_to_1033(operations, metadata): from libres.db.models.types import JSON, UTCDateTime # if the quota limit has been renamed, the migration already went # through on this database (sites may share databases) allocations_table = Table('allocations', metadata, autoload=True) if 'quota_limit' in allocations_table.columns: return # add user-data json field to allocations operations.add_column('allocations', Column('data', JSON(), nullable=True)) # add timezone to allocations (required) operations.add_column('allocations', Column('timezone', types.String())) # add timezone to reservations (*not* required) operations.add_column('reservations', Column('timezone', types.String(), nullable=True)) # change type to try: operations.get_bind().execute("SET timezone='UTC'") for table in ('allocations', 'reserved_slots', 'reservations'): for column in ('created', 'modified'): operations.alter_column(table, column, type_=UTCDateTime(timezone=False)) finally: operations.get_bind().execute("RESET timezone") operations.execute("UPDATE allocations SET timezone = 'UTC'") operations.execute( "UPDATE reservations SET timezone = 'UTC' WHERE start IS NOT NULL") # rename reservation_quota_limit to quota_limit operations.alter_column('allocations', 'reservation_quota_limit', new_column_name='quota_limit')
class TrainigMaterial(Model, AuditModel): "培训资料的模型定义" # 为了兼容原外包实现的名称 __tablename__ = 'train_file_resource' def _id_generator(): return id_generator('PXZL', TrainigMaterial, 'trainNumber') id = schema.Column(types.Integer, primary_key=True) trainNumber = schema.Column(types.String(255), default=_id_generator) trainFileResourceType = schema.Column(types.String(255)) trainFileResourceName = schema.Column(types.String(255)) trainFileResourceContent = schema.Column(types.Text) addTime = schema.Column(types.DateTime) updateUser = schema.Column(types.String(255)) updTime = schema.Column(types.DateTime) statusName = schema.Column(types.String(255)) trainFileResourceUrl = schema.Column(types.String(1000)) @property def status(self): return self.statusName @status.setter def status(self, value): self.statusName = value
class Residence(Base): __tablename__ = "residence" __table_args__ = (sql_schema.UniqueConstraint("id", "community_id"), ) id = Column(sql_types.Integer, primary_key=True) community_id = Column(sql_types.Integer, ForeignKey("community.id"), nullable=False) unit_no = Column(sql_types.String(40)) street = Column(sql_types.String(80), nullable=False) locality = Column(sql_types.String(40), nullable=False) postcode = Column(sql_types.String(20), nullable=False) region = Column(sql_types.String(40), nullable=False) ownership_stake = Column(SqliteSafeDecimal) occupants = orm.relationship( "User", secondary="residence_occupancy", lazy=True, backref=orm.backref("residences", lazy=True), ) residence_charges = orm.relationship("BillingCharge", lazy=True, backref=orm.backref( "charged_residence", lazy=True)) recurring_charges = orm.relationship( "RecurringCharge", lazy=True, backref=orm.backref("charged_residence", lazy=True), ) def __repr__(self): return (f"Residence(" f"unit_no={self.unit_no}," f"street={self.street}," f"locality={self.locality}," f"postcode={self.postcode}," f"region={self.region}" f")")
def test_index_reflection_filtered_and_clustered( self, metadata, connection ): """ table with one filtered index and one clustered index so each index will have different dialect_options keys """ t1 = Table( "t", metadata, Column("id", Integer), Column("x", types.String(20)), Column("y", types.Integer), ) Index("idx_x", t1.c.x, mssql_clustered=True) Index("idx_y", t1.c.y, mssql_where=t1.c.y >= 5) metadata.create_all(connection) ind = testing.db.dialect.get_indexes(connection, "t", None) clustered_index = "" for ix in ind: if ix["dialect_options"]["mssql_clustered"]: clustered_index = ix["name"] eq_(clustered_index, "idx_x") filtered_indexes = [] for ix in ind: if "dialect_options" in ix: if "mssql_where" in ix["dialect_options"]: filtered_indexes.append( ix["dialect_options"]["mssql_where"] ) eq_(sorted(filtered_indexes), ["([y]>=(5))"]) t2 = Table("t", MetaData(), autoload_with=connection) clustered_idx = list( sorted(t2.indexes, key=lambda clustered_idx: clustered_idx.name) )[0] filtered_idx = list( sorted(t2.indexes, key=lambda filtered_idx: filtered_idx.name) )[1] self.assert_compile( CreateIndex(clustered_idx), "CREATE CLUSTERED INDEX idx_x ON t (x)" ) self.assert_compile( CreateIndex(filtered_idx), "CREATE NONCLUSTERED INDEX idx_y ON t (y) WHERE ([y]>=(5))", )
class TareaModel(bd.Model): __tablename__ = 't_tarea' tareaId = Column(name='tarea_id', nullable=False, type_=types.Integer, primary_key=True, unique=True, autoincrement=True) tareaTitulo = Column(name='tarea_titulo', nullable=False, type_=types.String(20)) tareaDescripcion = Column(name='tarea_descripcion', nullable=False, type_=types.TEXT) tareaEstado = Column(name='tarea_estado', nullable=False, type_=types.BOOLEAN) tareaFecha = Column(name='tarea_fecha', nullable=False, default=datetime.now(), type_=types.DateTime()) usuario = Column(ForeignKey('t_usuario.usuario_id'), name='usuario_id', nullable=False, type_=types.Integer) def __init__(self, titulo, descripcion, estado, usuario): self.tareaTitulo = titulo self.tareaDescripcion = descripcion self.tareaEstado = estado self.usuario = usuario def save(self): bd.session.add(self) bd.session.commit() def json(self): return { 'tarea_id': self.tareaId, 'tarea_titulo': self.tareaTitulo, 'tarea_descripcion': self.tareaDescripcion, 'tarea_estado': self.tareaEstado, 'tarea_fecha': str(self.tareaFecha) } def update(self, titulo, descripcion, estado, usuario): self.tareaTitulo = titulo self.tareaDescripcion = descripcion self.tareaEstado = estado self.usuario = usuario self.tareaFecha = datetime.now() self.save()
class Subscription(AbstractModel): __tablename__ = 'subscriptions' id = schema.Column(types.String(150), primary_key=True) collection_id = schema.Column( types.Integer, schema.ForeignKey( 'data_collections.id', onupdate='CASCADE', ondelete='CASCADE')) collection = relationship('DataCollection', backref='subscriptions') params = schema.Column(types.Text, nullable=True) # FIXME: proper enum type status = schema.Column(types.String(150)) service_id = schema.Column( types.String(150), schema.ForeignKey( 'services.id', onupdate="CASCADE", ondelete="CASCADE")) service = relationship('Service', backref='subscriptions')
class BizCircle(AlchemyMixin, Base): __tablename__ = 'biz_circles' id = Column(types.Integer, primary_key=True) # 一个商圈可能靠近多个行政区, 如: 西城区、东城区下都出现了安定门 city_id = Column(types.Integer, ForeignKey(City.id), nullable=False) district_id = Column(types.ARRAY(types.Integer, dimensions=1, as_tuple=True), nullable=False) name = Column(types.String(32), nullable=False) quan_pin = Column(types.String(100), nullable=False) communities_count = Column(types.Integer, nullable=False, default=0) updated_at = Column(types.DateTime, nullable=False, default=datetime.now) communities_updated_at = Column(types.DateTime) def __init__(self, city_id, district_id, info): self.id = int(info['bizcircle_id']) self.city_id = city_id self.district_id = [district_id] self.name = info['bizcircle_name'] self.quan_pin = info['bizcircle_quanpin']
class Student(Model): __tablename__ = "student" id = Column(sqla_types.Integer, primary_key=True) full_name = Column(sqla_types.String(255), nullable=False, unique=True) dob = Column(sqla_types.Date(), nullable=True) current_school_id = Column(sqla_types.Integer, ForeignKey(School.id), nullable=False) current_school = relationship(School, backref=backref('students')) courses = relationship( "Course", secondary=student_course, backref=backref("students", lazy='dynamic') )
def visit_column_comment(element: "ColumnComment", compiler: "PGDDLCompiler", **kw) -> str: ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" comment = (compiler.sql_compiler.render_literal_value( element.comment, sqltypes.String()) if element.comment is not None else "NULL") return ddl.format( table_name=format_table_name(compiler, element.table_name, element.schema), column_name=format_column_name(compiler, element.column_name), comment=comment, )
class IPAddress(types.TypeDecorator): """An SQLAlchemy type representing an IP-address.""" impl = types.String(39).with_variant(postgresql.INET(), 'postgresql') def process_bind_param(self, value, dialect): """Process/Formats the value before insert it into the db.""" if dialect.name == 'postgresql': return value # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened # form, not validate it. elif utils.is_valid_ipv6(value): return utils.get_shortened_ipv6(value) return value
def create_table(url, tablename, create=False): db = sa.create_engine(url, strategy='threadlocal') meta = sa.MetaData(db) table = sa.Table(tablename, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('key', types.String(64), nullable=False), sa.Column('stored_time', types.Integer, nullable=False), sa.Column('expiry_time', types.Integer, nullable=False), sa.Column('data', types.PickleType, nullable=False), sa.UniqueConstraint('key')) if create: table.create(checkfirst=True) return db, meta, table
class EnumType(types.TypeDecorator): impl = types.String(255) def __init__(self, enumTable = None, *args, **kwargs): super().__init__(*args, **kwargs) self.__enum__ = enumTable def process_bind_param(self, value, dialect): if value is None: return None return value.name def process_result_value(self, value, dialect): if value is None: return None return self.__enum__.__enum__[value]
def __init__(self, db=Defaults.DB, binary_fmt='b64', disk_threshold=1000, disk_path=Defaults.DATA_PATH, **kwargs): """ See :class:`.Database` for description of available arguments """ super(MonitorDb, self).__init__(table=self._TABLE, db=db, ncols=['pass_id', 'alert'], ntypes=[ sa_t.String(_MAX_STRING_LEN), sa_t.String(_MAX_STRING_LEN) ], binary_fmt=binary_fmt, disk_threshold=disk_threshold, disk_path=disk_path + '/' + self._TABLE, **kwargs)
class Transcript(BASE): """Set of non-overlapping exons. A :class:`Transcript` can *only* be related to a single gene. Args: id (str): unique transcript id (e.g. CCDS) gene_id (str): related gene chromosome (str): related contig id lenght (int): number of exon bases in transcript """ __tablename__ = 'transcript' id = Column(types.String(32), primary_key=True) gene_id = Column(types.Integer, index=True, nullable=False) gene_name = Column(types.String(32), index=True) chromosome = Column(types.String(10)) length = Column(types.Integer) stats = orm.relationship('TranscriptStat', backref='transcript')
def post_create_table(self, table): engine = getattr(table, 'engine', None) if not engine: raise exc.CompileError("No engine for table '%s'" % table.name) text = ' ENGINE = ' + self.process(engine) if table.comment is not None: literal = self.sql_compiler.render_literal_value( table.comment, sqltypes.String()) text += ' COMMENT ' + literal return text
class ISO3166Country(types.TypeDecorator): impl = types.String(2) def python_type(self) -> Type: return Country def process_bind_param(self, value: Optional[Country], dialect: Dialect) -> Optional[str]: if value is not None: return value.alpha2 def process_result_value(self, value: Optional[str], dialect: Dialect) -> Optional[Country]: if value is not None: return countries.get(value)