コード例 #1
0
ファイル: table.py プロジェクト: reubano/dataset
    def create_index(self, columns, name=None):
        """
        Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        self._check_dropped()
        if not name:
            sig = '||'.join(columns)

            # This is a work-around for a bug in <=0.6.1 which would create
            # indexes based on hash() rather than a proper hash.
            key = abs(hash(sig))
            name = 'ix_%s_%s' % (self.table.name, key)
            if name in self.indexes:
                return self.indexes[name]

            key = sha1(sig.encode('utf-8')).hexdigest()[:16]
            name = 'ix_%s_%s' % (self.table.name, key)

        if name in self.indexes:
            return self.indexes[name]
        try:
            self.database._acquire()
            columns = [self.table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(self.database.engine)
        except:
            idx = None
        finally:
            self.database._release()
        self.indexes[name] = idx
        return idx
コード例 #2
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    service_statuses = Table('service_statuses', meta, autoload=True)
    idx = Index("service_statuses_instance_id", service_statuses.c.instance_id)
    idx.drop()
コード例 #3
0
ファイル: table.py プロジェクト: smnorris/pgdb
 def create_index(self, columns, name=None, index_type="btree"):
     """
     Create an index to speed up queries on a table.
     If no ``name`` is given a random name is created.
     ::
         table.create_index(['name', 'country'])
     """
     self._check_dropped()
     if not name:
         sig = "||".join(columns + [index_type])
         # This is a work-around for a bug in <=0.6.1 which would create
         # indexes based on hash() rather than a proper hash.
         key = abs(hash(sig))
         name = "ix_%s_%s" % (self.table.name, key)
         if name in self.indexes:
             return self.indexes[name]
         key = sha1(sig.encode("utf-8")).hexdigest()[:16]
         name = "ix_%s_%s" % (self.table.name, key)
     if name in self.indexes:
         return self.indexes[name]
     # self.db._acquire()
     columns = [self.table.c[col] for col in columns]
     idx = Index(name, *columns, postgresql_using=index_type)
     idx.create(self.engine)
     # finally:
     #    self.db._release()
     self.indexes[name] = idx
     return idx
コード例 #4
0
ファイル: model.py プロジェクト: backgroundcheck/linkage
 def generate_key_index(self):
     for index in self.key.table.indexes:
         if len(index.columns) == 1:
             for col in index.columns:
                 if col == self.key:
                     return
     index = Index(self.index_name, self.key)
     index.create(self.config.engine)
コード例 #5
0
def upgrade(migrate_engine):
    LOG.info(_LI("Adding boolean column delayed_notify to table 'zones'"))
    meta.bind = migrate_engine
    zones_table = Table('zones', meta, autoload=True)
    col = Column('delayed_notify', Boolean(), default=False)
    col.create(zones_table)
    index = Index('delayed_notify', zones_table.c.delayed_notify)
    index.create(migrate_engine)
コード例 #6
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    service_statuses = Table('service_statuses', meta, autoload=True)
    idx = Index("service_statuses_instance_id", service_statuses.c.instance_id)

    try:
        idx.create()
    except OperationalError as e:
        logger.info(e)
コード例 #7
0
ファイル: 032_clusters.py プロジェクト: Hopebaytech/trove
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    Table('datastores', meta, autoload=True)
    Table('datastore_versions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    create_tables([clusters])
    instances.create_column(Column('cluster_id', String(36),
                                   ForeignKey("clusters.id")))
    instances.create_column(Column('shard_id', String(36)))
    instances.create_column(Column('type', String(64)))
    cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
    cluster_id_idx.create()
コード例 #8
0
ファイル: model.py プロジェクト: pudo/linkage
    def generate_key_index(self):
        key = self.key
        table = key.table
        if isinstance(table, Alias):
            table = table.original
            key = table.c[key.name]

        for index in table.indexes:
            if len(index.columns) == 1:
                for col in index.columns:
                    if col == key:
                        return
        index = Index(self.index_name, key)
        index.create(self.config.engine)
コード例 #9
0
ファイル: schema.py プロジェクト: rossjones/sqlaload
def create_index(engine, table, columns, name=None):
    with lock:
        if not name:
            sig = abs(hash('||'.join(columns)))
            name = 'ix_%s_%s' % (table.name, sig)
        if name in INDEXES:
            return INDEXES[name]
        try:
            columns = [table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(engine)
        except:
            idx = None
        INDEXES[name] = idx
        return idx
コード例 #10
0
ファイル: schema.py プロジェクト: jljs/sqlaload
def create_index(engine, table, columns, name=None):
    table = get_table(engine, table)
    with lock:
        if not name:
            sig = abs(hash("||".join(columns)))
            name = "ix_%s_%s" % (table.name, sig)
        if name in engine._indexes:
            return engine._indexes[name]
        try:
            columns = [table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(engine)
        except:
            idx = None
        engine._indexes[name] = idx
        return idx
コード例 #11
0
class Content(BASE, ModelBase):
    """Represents a content"""
    __tablename__ = 'contents'
    content_id = Column(BigInteger().with_variant(Integer, "sqlite"),
                        Sequence('CONTENT_ID_SEQ', schema=DEFAULT_SCHEMA_NAME),
                        primary_key=True)
    transform_id = Column(BigInteger().with_variant(Integer, "sqlite"))
    coll_id = Column(BigInteger().with_variant(Integer, "sqlite"))
    map_id = Column(BigInteger().with_variant(Integer, "sqlite"), default=0)
    scope = Column(String(SCOPE_LENGTH))
    name = Column(String(NAME_LENGTH))
    min_id = Column(Integer())
    max_id = Column(Integer())
    content_type = Column(EnumWithValue(ContentType))
    status = Column(EnumWithValue(ContentStatus))
    substatus = Column(EnumWithValue(ContentStatus))
    locking = Column(EnumWithValue(ContentLocking))
    bytes = Column(Integer())
    md5 = Column(String(32))
    adler32 = Column(String(8))
    processing_id = Column(Integer())
    storage_id = Column(Integer())
    retries = Column(Integer(), default=0)
    path = Column(String(4000))
    created_at = Column("created_at",
                        DateTime,
                        default=datetime.datetime.utcnow)
    updated_at = Column("updated_at",
                        DateTime,
                        default=datetime.datetime.utcnow,
                        onupdate=datetime.datetime.utcnow)
    accessed_at = Column("accessed_at",
                         DateTime,
                         default=datetime.datetime.utcnow,
                         onupdate=datetime.datetime.utcnow)
    expired_at = Column("expired_at", DateTime)
    content_metadata = Column(JSON())

    _table_args = (
        PrimaryKeyConstraint('content_id', name='CONTENTS_PK'),
        # UniqueConstraint('name', 'scope', 'coll_id', 'content_type', 'min_id', 'max_id', name='CONTENT_SCOPE_NAME_UQ'),
        # UniqueConstraint('name', 'scope', 'coll_id', 'min_id', 'max_id', name='CONTENT_SCOPE_NAME_UQ'),
        # UniqueConstraint('content_id', 'coll_id', name='CONTENTS_UQ'),
        UniqueConstraint('transform_id',
                         'coll_id',
                         'map_id',
                         name='CONTENT_ID_UQ'),
        ForeignKeyConstraint(['transform_id'], ['transforms.transform_id'],
                             name='CONTENTS_TRANSFORM_ID_FK'),
        ForeignKeyConstraint(['coll_id'], ['collections.coll_id'],
                             name='CONTENTS_COLL_ID_FK'),
        CheckConstraint('status IS NOT NULL', name='CONTENTS_STATUS_ID_NN'),
        CheckConstraint('coll_id IS NOT NULL', name='CONTENTS_COLL_ID_NN'),
        Index('CONTENTS_STATUS_UPDATED_IDX', 'status', 'locking', 'updated_at',
              'created_at'))
コード例 #12
0
ファイル: testartifact.py プロジェクト: simudream/changes
class TestArtifact(db.Model):
    """
    Represents any artifacts generated by a single run of a single test. used
    e.g. in server-selenium to store screenshots and large log dumps for
    later debugging.

    """
    __tablename__ = 'testartifact'
    __tableargs__ = (
        Index('idx_test_id', 'test_id'),
    )

    id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
    test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
    name = Column('name', String(length=256), nullable=False)
    type = Column(EnumType(TestArtifactType),
                  default=TestArtifactType.unknown,
                  nullable=False, server_default='0')
    file = Column(FileStorage(**TESTARTIFACT_STORAGE_OPTIONS))
    date_created = Column(DateTime, default=datetime.utcnow, nullable=False)

    test = relationship('TestCase', backref='artifacts')

    __repr__ = model_repr('name', 'type', 'file')

    def __init__(self, **kwargs):
        super(TestArtifact, self).__init__(**kwargs)
        if self.id is None:
            self.id = uuid.uuid4()
        if self.date_created is None:
            self.date_created = datetime.utcnow()
        if isinstance(self.type, str):
            self.type = TestArtifactType[self.type]
        if self.file is None:
            # TODO(dcramer): this is super hacky but not sure a better way to
            # do it with SQLAlchemy
            self.file = FileData({}, TESTARTIFACT_STORAGE_OPTIONS)

    def save_base64_content(self, base64):
        content = b64decode(base64)
        self.file.save(
            StringIO(content),
            '{0}/{1}_{2}'.format(
                self.test_id, self.id.hex, self.name
            ),
            self._get_content_type()
        )

    def _get_content_type(self):
        content_type, encoding = mimetypes.guess_type(self.name)
        if content_type == 'text/html':
            # upload html artifacts as plain text so the browser doesn't try to
            # render them when viewing them raw
            content_type = 'text/plain'
        return content_type
コード例 #13
0
class OutcallTrunk(Base):

    __tablename__ = 'outcalltrunk'
    __table_args__ = (
        PrimaryKeyConstraint('outcallid', 'trunkfeaturesid'),
        Index('outcalltrunk__idx__priority', 'priority'),
    )

    outcallid = Column(Integer, nullable=False, server_default='0')
    trunkfeaturesid = Column(Integer, nullable=False, server_default='0')
    priority = Column(Integer, nullable=False, server_default='0')
コード例 #14
0
ファイル: models.py プロジェクト: zghcxq/TelegramPython
class UserTAGInfo(db.Model):
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    tag_id = db.Column(db.Integer)
    user_id = db.Column(db.BigInteger)
    user_name = db.Column(db.String(255))
    user_hash = db.Column(db.BigInteger)
    group_id = db.Column(db.BigInteger)
    group_name = db.Column(db.String(255))
    group_hash = db.Column(db.BigInteger)
    Index(tag_id, group_name)
    UniqueConstraint(user_id, group_id)
コード例 #15
0
ファイル: task.py プロジェクト: XiChenn/pybossa
class Task(db.Model, DomainObject):
    '''An individual Task which can be performed by a user. A Task is
    associated to a project.
    '''
    __tablename__ = 'task'

    #: Task.ID
    id = Column(Integer, primary_key=True)
    #: UTC timestamp when the task was created.
    created = Column(Text, default=make_timestamp)
    #: Project.ID that this task is associated with.
    project_id = Column(Integer,
                        ForeignKey('project.id', ondelete='CASCADE'),
                        nullable=False)
    #: Task.state: ongoing or completed.
    state = Column(UnicodeText, default=u'ongoing')
    quorum = Column(Integer, default=0)
    #: If the task is a calibration task
    calibration = Column(Integer, default=0)
    #: Priority of the task from 0.0 to 1.0
    priority_0 = Column(Float, default=0)
    #: Task.info field in JSONB with the data for the task.
    info = Column(JSONB)
    #: Number of answers to collect for this task.
    n_answers = Column(Integer, default=1)
    #: Array of User IDs that favorited this task
    fav_user_ids = Column(MutableList.as_mutable(ARRAY(Integer)))
    #: completed task can be marked as exported=True after its exported
    exported = Column(Boolean, default=False)
    #: Task.user_pref field in JSONB with user preference data for the task.
    user_pref = Column(JSONB)
    #: Task.worker_pref field in JSONB with worker preference data for the task.
    worker_pref = Column(JSONB)
    #: Task.worker_filter field in JSONB with worker filter data for the task.
    worker_filter = Column(JSONB)
    #: Task.gold_answers field in JSONB to record golden answers for fields under Task.info.
    gold_answers = Column(JSONB)
    #: Task.expiration field to determine when a task should no longer be scheduled. As UTC timestamp without timezone
    expiration = Column(DateTime, nullable=True)

    task_runs = relationship(TaskRun,
                             cascade='all, delete, delete-orphan',
                             backref='task')

    def pct_status(self):
        """Returns the percentage of Tasks that are completed"""
        if self.n_answers != 0 and self.n_answers is not None:
            return float(len(self.task_runs)) / self.n_answers
        else:  # pragma: no cover
            return float(0)

    __table_args__ = (Index('task_info_idx',
                            sqlalchemy.text('md5(info::text)')), )
    __mapper_args__ = {"order_by": id}
コード例 #16
0
ファイル: models.py プロジェクト: ptouati/banner-creator
class User(db.Model):
    class Gender(enum.Enum):
        male = 0
        female = 1

    class UserRole(enum.Enum):
        user = 0
        designer = 1
        admin = 2

    class SocialNetwork(enum.Enum):
        google = 0
        facebook = 1

    __tablename__ = 'user'
    id = db.Column(db.Integer, primary_key=True)
    social_id = db.Column(db.String(255))
    social_type = db.Column(Enum(SocialNetwork), nullable=False)
    first_name = db.Column(db.String(255))
    last_name = db.Column(db.String(255))
    gender = db.Column(Enum(Gender))
    email = db.Column(db.String(255), unique=True)
    role = db.Column(Enum(UserRole), nullable=False)
    created_at = db.Column(db.DateTime,
                           nullable=False,
                           default=datetime.datetime.utcnow)
    active = db.Column(db.BOOLEAN, default=True, nullable=False)
    banners = db.relationship('Banner', backref='user')

    __table_args__ = (Index('ix_user_id_social_type', "social_type", "id"), )

    def is_authenticated(self):
        return True

    def is_active(self):
        return self.active

    def is_anonymous(self):
        return False

    def is_user(self):
        return self.role == User.UserRole.user

    def is_designer(self):
        return self.role == User.UserRole.designer

    def is_admin(self):
        return self.role == User.UserRole.admin

    def get_id(self):
        return unicode(self.id)

    def __repr__(self):
        return '<User %r>' % self.first_name
コード例 #17
0
ファイル: paginguser.py プロジェクト: jaunis/xivo-dao
class PagingUser(Base):

    __tablename__ = 'paginguser'
    __table_args__ = (
        PrimaryKeyConstraint('pagingid', 'userfeaturesid', 'caller'),
        Index('paginguser__idx__pagingid', 'pagingid'),
    )

    pagingid = Column(Integer, nullable=False, autoincrement=False)
    userfeaturesid = Column(Integer, nullable=False, autoincrement=False)
    caller = Column(Integer, nullable=False, autoincrement=False)
コード例 #18
0
ファイル: models.py プロジェクト: x0rzkov/hoaxy-backend
class AssTweetUrl(TableMixin, Base):
    """Association table to connect table `tweet` and `url`."""
    tweet_id = Column(
        Integer, ForeignKey('tweet.id', ondelete='CASCADE',
                            onupdate='CASCADE'))
    url_id = Column(
        Integer, ForeignKey('url.id', ondelete='CASCADE', onupdate='CASCADE'))
    __table_args__ = (UniqueConstraint('tweet_id',
                                       'url_id',
                                       name='tweet_url_uq'),
                      Index('url_tweet_idx', 'url_id', 'tweet_id'))
コード例 #19
0
class CallLogParticipant(Base):

    # NOTE: Until bullseye upgrade, this schema must match the one migrated from manage-db and the
    # one injected in integration tests
    __tablename__ = 'call_logd_call_log_participant'
    __table_args__ = (Index('call_logd_call_log_participant__idx__user_uuid',
                            'user_uuid'), )

    uuid = Column(
        UUIDType,
        server_default=text('uuid_generate_v4()'),
        primary_key=True,
    )
    call_log_id = Column(
        Integer,
        ForeignKey(
            'call_logd_call_log.id',
            name='call_logd_call_log_participant_call_log_id_fkey',
            ondelete='CASCADE',
        ),
    )
    user_uuid = Column(UUIDType, nullable=False)
    line_id = Column(Integer)
    role = Column(
        Enum(
            'source',
            'destination',
            name='call_logd_call_log_participant_role',
        ),
        nullable=False,
    )
    tags = Column(ARRAY(String(128)), nullable=False, server_default='{}')
    answered = Column(Boolean, nullable=False, server_default='false')

    call_log = relationship('CallLog', uselist=False, viewonly=True)

    @hybrid_property
    def peer_exten(self):
        if self.role == 'source':
            return self.call_log.requested_exten
        else:
            return self.call_log.source_exten

    @peer_exten.expression
    def peer_exten(cls):
        return case(
            [(
                cls.role == 'source',
                select([CallLog.requested_exten
                        ]).where(cls.call_log_id == CallLog.id).as_scalar(),
            )],
            else_=select([CallLog.source_exten
                          ]).where(cls.call_log_id == CallLog.id).as_scalar(),
        )
コード例 #20
0
class DbGroup(Base):
    """Class to store groups using SQLA backend."""

    __tablename__ = 'db_dbgroup'

    id = Column(Integer, primary_key=True)  # pylint: disable=invalid-name

    uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True)
    label = Column(String(255), index=True)

    type_string = Column(String(255), default='', index=True)

    time = Column(DateTime(timezone=True), default=timezone.now)
    description = Column(Text, nullable=True)

    extras = Column(JSONB, default=dict, nullable=False)

    user_id = Column(
        Integer,
        ForeignKey('db_dbuser.id',
                   ondelete='CASCADE',
                   deferrable=True,
                   initially='DEFERRED'))
    user = relationship('DbUser', backref=backref('dbgroups', cascade='merge'))

    dbnodes = relationship('DbNode',
                           secondary=table_groups_nodes,
                           backref='dbgroups',
                           lazy='dynamic')

    __table_args__ = (UniqueConstraint('label', 'type_string'), )

    Index('db_dbgroup_dbnodes_dbnode_id_idx', table_groups_nodes.c.dbnode_id)
    Index('db_dbgroup_dbnodes_dbgroup_id_idx', table_groups_nodes.c.dbgroup_id)

    @property
    def pk(self):
        return self.id

    def __str__(self):
        return '<DbGroup [type: {}] "{}">'.format(self.type_string, self.label)
コード例 #21
0
ファイル: 032_clusters.py プロジェクト: rumale/trove
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    Table('datastores', meta, autoload=True)
    Table('datastore_versions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)

    # since the downgrade is a no-op, an upgrade after a downgrade will
    # cause an exception because the tables already exist
    # we will catch that case and log an info message
    try:
        create_tables([clusters])

        instances.create_column(Column('cluster_id', String(36),
                                       ForeignKey("clusters.id")))
        instances.create_column(Column('shard_id', String(36)))
        instances.create_column(Column('type', String(64)))

        cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
        cluster_id_idx.create()
    except OperationalError as e:
        logger.info(e)
コード例 #22
0
ファイル: groupfeatures.py プロジェクト: jaunis/xivo-dao
class GroupFeatures(Base):

    __tablename__ = 'groupfeatures'
    __table_args__ = (
        PrimaryKeyConstraint('id'),
        Index('groupfeatures__idx__context', 'context'),
        Index('groupfeatures__idx__name', 'name'),
        Index('groupfeatures__idx__number', 'number'),
    )

    id = Column(Integer)
    name = Column(String(128), nullable=False)
    number = Column(String(40), nullable=False, server_default='')
    context = Column(String(39), nullable=False)
    transfer_user = Column(Integer, nullable=False, server_default='0')
    transfer_call = Column(Integer, nullable=False, server_default='0')
    write_caller = Column(Integer, nullable=False, server_default='0')
    write_calling = Column(Integer, nullable=False, server_default='0')
    timeout = Column(Integer, nullable=False, server_default='0')
    preprocess_subroutine = Column(String(39))
    deleted = Column(Integer, nullable=False, server_default='0')
コード例 #23
0
class Deploy(db.Model):
    __tablename__ = 'deploy'
    __table_args__ = (
        Index('idx_deploy_task_id', 'task_id'),
        Index('idx_deploy_app_id', 'app_id'),
        UniqueConstraint('task_id',
                         'app_id',
                         'environment',
                         'number',
                         name='unq_deploy_number'),
    )

    id = Column(Integer, primary_key=True)
    task_id = Column(Integer,
                     ForeignKey('task.id', ondelete='CASCADE'),
                     nullable=False)
    app_id = Column(Integer,
                    ForeignKey('app.id', ondelete='CASCADE'),
                    nullable=False)
    environment = Column(String(64), nullable=False, default='production')
    number = Column(Integer, nullable=False)
コード例 #24
0
ファイル: 032_clusters.py プロジェクト: vdialani/trove
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    Table('datastores', meta, autoload=True)
    Table('datastore_versions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)

    # since the downgrade is a no-op, an upgrade after a downgrade will
    # cause an exception because the tables already exist
    # we will catch that case and log an info message
    try:
        create_tables([clusters])

        instances.create_column(
            Column('cluster_id', String(36), ForeignKey("clusters.id")))
        instances.create_column(Column('shard_id', String(36)))
        instances.create_column(Column('type', String(64)))

        cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
        cluster_id_idx.create()
    except OperationalError as e:
        logger.info(e)
コード例 #25
0
class SchedulePath(Base):

    __tablename__ = 'schedule_path'
    __table_args__ = (
        PrimaryKeyConstraint('schedule_id', 'path', 'pathid'),
        Index('schedule_path_path', 'path', 'pathid'),
    )

    schedule_id = Column(Integer, autoincrement=False)
    path = Column(enum.schedule_path_type, nullable=False, autoincrement=False)
    pathid = Column(Integer, autoincrement=False)
    order = Column(Integer, nullable=False)
コード例 #26
0
ファイル: deploy.py プロジェクト: sandrociceros/freight
class Deploy(db.Model):
    __tablename__ = "deploy"
    __table_args__ = (
        Index("idx_deploy_task_id", "task_id"),
        Index("idx_deploy_app_id", "app_id"),
        UniqueConstraint("task_id",
                         "app_id",
                         "environment",
                         "number",
                         name="unq_deploy_number"),
    )

    id = Column(Integer, primary_key=True)
    task_id = Column(Integer,
                     ForeignKey("task.id", ondelete="CASCADE"),
                     nullable=False)
    app_id = Column(Integer,
                    ForeignKey("app.id", ondelete="CASCADE"),
                    nullable=False)
    environment = Column(String(64), nullable=False, default="production")
    number = Column(Integer, nullable=False)
コード例 #27
0
class SwitchboardMemberUser(Base):

    __tablename__ = 'switchboard_member_user'
    __table_args__ = (
        PrimaryKeyConstraint('switchboard_uuid', 'user_uuid'),
        Index('switchboard_member_user__idx__switchboard_uuid', 'switchboard_uuid'),
    )

    switchboard_uuid = Column(String(38), ForeignKey('switchboard.uuid'), nullable=False)
    user_uuid = Column(String(38), ForeignKey('userfeatures.uuid'), nullable=False)

    switchboard = relationship('Switchboard')
    user = relationship('UserFeatures')
コード例 #28
0
class Extension(Base):

    __tablename__ = 'extensions'
    __table_args__ = (
        PrimaryKeyConstraint('id'),
        UniqueConstraint('exten', 'context'),
        Index('extensions__idx__context', 'context'),
        Index('extensions__idx__exten', 'exten'),
        Index('extensions__idx__type', 'type'),
        Index('extensions__idx__typeval', 'typeval'),
    )

    id = Column(Integer)
    commented = Column(Integer, nullable=False, server_default='0')
    context = Column(String(39), nullable=False, server_default='')
    exten = Column(String(40), nullable=False, server_default='')
    type = Column(enum.extenumbers_type, nullable=False)
    typeval = Column(String(255), nullable=False, server_default='')

    @property
    def name(self):
        return self.typeval
コード例 #29
0
def init(db_user, db_passwd, db_host, db_name):
    DBBase.Engine = create_engine("mysql://%s:%s@%s/%s?charset=utf8" %
                                  (db_user, db_passwd, db_host, db_name),
                                  pool_recycle=-1,
                                  echo=False)

    Index('ix_followedInfo_col23456', FollowedInfo.device_token,
          FollowedInfo.flight_no, FollowedInfo.takeoff_airport,
          FollowedInfo.arrival_airport, FollowedInfo.schedule_takeoff_date)

    DBBase.Base.metadata.create_all(DBBase.Engine)
    DBBase.Session = scoped_session(
        sessionmaker(bind=DBBase.Engine, expire_on_commit=False))
コード例 #30
0
ファイル: table.py プロジェクト: aklaver/dataset
    def create_index(self, columns, name=None):
        """
        Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        self._check_dropped()
        with self.database.lock:
            if not name:
                sig = abs(hash('||'.join(columns)))
                name = 'ix_%s_%s' % (self.table.name, sig)
            if name in self.indexes:
                return self.indexes[name]
            try:
                columns = [self.table.c[c] for c in columns]
                idx = Index(name, *columns)
                idx.create(self.database.engine)
            except:
                idx = None
            self.indexes[name] = idx
            return idx
コード例 #31
0
class DeptMembership(MagModel):
    is_dept_head = Column(Boolean, default=False)
    is_poc = Column(Boolean, default=False)
    is_checklist_admin = Column(Boolean, default=False)
    attendee_id = Column(UUID, ForeignKey('attendee.id'))
    department_id = Column(UUID, ForeignKey('department.id'))

    __mapper_args__ = {'confirm_deleted_rows': False}
    __table_args__ = (
        UniqueConstraint('attendee_id', 'department_id'),
        Index('ix_dept_membership_attendee_id', 'attendee_id'),
        Index('ix_dept_membership_department_id', 'department_id'),
    )

    @hybrid_property
    def has_role(self):
        return self.has_inherent_role or self.has_dept_role

    @has_role.expression
    def has_role(cls):
        return or_(cls.has_inherent_role, cls.has_dept_role)

    @hybrid_property
    def has_inherent_role(self):
        return self.is_dept_head or self.is_poc or self.is_checklist_admin

    @has_inherent_role.expression
    def has_inherent_role(cls):
        return or_(cls.is_dept_head == True, cls.is_poc == True,
                   cls.is_checklist_admin == True)  # noqa: E712

    @hybrid_property
    def has_dept_role(self):
        return bool(self.dept_roles)

    @has_dept_role.expression
    def has_dept_role(cls):
        return exists().select_from(dept_membership_dept_role) \
            .where(cls.id == dept_membership_dept_role.c.dept_membership_id)
コード例 #32
0
ファイル: sqlalchemy.py プロジェクト: zhuhj89/spyne
def _convert_fake_table(cls, table):
    metadata = cls.Attributes.sqla_metadata
    table_name = cls.Attributes.table_name

    _table = table
    table_args, table_kwargs = sanitize_args(cls.Attributes.sqla_table_args)
    table = Table(table_name, metadata, *(tuple(table.columns) + table_args),
                  **table_kwargs)

    for index_args, index_kwargs in _table.indexes:
        Index(*index_args, **index_kwargs)

    return table
コード例 #33
0
ファイル: groups.py プロジェクト: mpastyl/websocket-console
def create_tables(engine):
    metadata = MetaData()
    columns = []
    columns.append(Column('owner', String(256), primary_key=True))
    columns.append(Column('name', String(256), primary_key=True))
    columns.append(Column('member', String(256), primary_key=True))
    groups = Table('groups', metadata, *columns, mysql_engine='InnoDB')

    # place an index on member
    Index('idx_groups_member', groups.c.member)

    metadata.create_all(engine)
    return metadata.sorted_tables
コード例 #34
0
 def _create_storm_database(self, revision):
     version_table = Table(
         'version', Model.metadata,
         Column('id', Integer, primary_key=True),
         Column('component', Unicode),
         Column('version', Unicode),
         )
     version_table.create(config.db.engine)
     config.db.store.execute(version_table.insert().values(
         component='schema', version=revision))
     config.db.commit()
     # Other Storm specific changes, those SQL statements hopefully work on
     # all DB engines...
     config.db.engine.execute(
         'ALTER TABLE mailinglist ADD COLUMN acceptable_aliases_id INT')
     Index('ix_user__user_id').drop(bind=config.db.engine)
     # Don't pollute our main metadata object, create a new one.
     md = MetaData()
     user_table = Model.metadata.tables['user'].tometadata(md)
     Index('ix_user_user_id', user_table.c._user_id).create(
         bind=config.db.engine)
     config.db.commit()
コード例 #35
0
ファイル: table.py プロジェクト: brettbeaudoin/dataset
    def create_index(self, columns, name=None):
        """
        Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        self._check_dropped()
        with self.database.lock:
            if not name:
                sig = abs(hash('||'.join(columns)))
                name = 'ix_%s_%s' % (self.table.name, sig)
            if name in self.indexes:
                return self.indexes[name]
            try:
                columns = [self.table.c[c] for c in columns]
                idx = Index(name, *columns)
                idx.create(self.database.engine)
            except:
                idx = None
            self.indexes[name] = idx
            return idx
コード例 #36
0
ファイル: queue_log.py プロジェクト: wazo-platform/xivo-dao
class QueueLog(Base):

    __tablename__ = 'queue_log'
    __table_args__ = (
        PrimaryKeyConstraint('id'),
        Index('queue_log__idx_agent', 'agent'),
        Index('queue_log__idx_callid', 'callid'),
        Index('queue_log__idx_event', 'event'),
        Index('queue_log__idx_time', 'time'),
    )

    time = Column(DateTime(timezone=True))
    callid = Column(String(80))
    queuename = Column(String(256))
    agent = Column(Text)
    event = Column(String(20))
    data1 = Column(Text)
    data2 = Column(Text)
    data3 = Column(Text)
    data4 = Column(Text)
    data5 = Column(Text)
    id = Column(Integer)
コード例 #37
0
def define_artifacts_table(meta):
    artifacts = Table('artifacts',
                      meta,
                      Column('id',
                             String(36),
                             primary_key=True,
                             nullable=False),
                      Column('name', String(255), nullable=False),
                      Column('type_name', String(255), nullable=False),
                      Column('type_version_prefix',
                             BigInteger(),
                             nullable=False),
                      Column('type_version_suffix', String(255)),
                      Column('type_version_meta', String(255)),
                      Column('version_prefix', BigInteger(), nullable=False),
                      Column('version_suffix', String(255)),
                      Column('version_meta', String(255)),
                      Column('description', Text()),
                      Column('visibility', String(32), nullable=False),
                      Column('state', String(32), nullable=False),
                      Column('owner', String(255), nullable=False),
                      Column('created_at', DateTime(), nullable=False),
                      Column('updated_at', DateTime(), nullable=False),
                      Column('deleted_at', DateTime()),
                      Column('published_at', DateTime()),
                      mysql_engine='InnoDB',
                      mysql_charset='utf8',
                      extend_existing=True)

    Index('ix_artifact_name_and_version', artifacts.c.name,
          artifacts.c.version_prefix, artifacts.c.version_suffix)
    Index('ix_artifact_type', artifacts.c.type_name,
          artifacts.c.type_version_prefix, artifacts.c.type_version_suffix)
    Index('ix_artifact_state', artifacts.c.state)
    Index('ix_artifact_owner', artifacts.c.owner)
    Index('ix_artifact_visibility', artifacts.c.visibility)

    return artifacts
コード例 #38
0
ファイル: ansisql.py プロジェクト: hgroll/yocto-autobuilder
    def visit_column(self, column):
        """Create a column (table already exists).

        :param column: column object
        :type column: :class:`sqlalchemy.Column` instance
        """
        if column.default is not None:
            self.traverse_single(column.default)

        table = self.start_alter_table(column)
        self.append("ADD ")
        self.append(self.get_column_specification(column))

        for cons in column.constraints:
            self.traverse_single(cons)
        self.execute()

        # ALTER TABLE STATEMENTS

        # add indexes and unique constraints
        if column.index_name:
            ix = Index(column.index_name,
                       column,
                       unique=bool(column.index_name or column.index))
            ix.create()
        elif column.unique_name:
            constraint.UniqueConstraint(column,
                                        name=column.unique_name).create()

        # SA bounds FK constraints to table, add manually
        for fk in column.foreign_keys:
            self.add_foreignkey(fk.constraint)

        # add primary key constraint if needed
        if column.primary_key_name:
            cons = constraint.PrimaryKeyConstraint(column,
                                                   name=column.primary_key_name)
            cons.create()
コード例 #39
0
class DbGroup(Base):
    __tablename__ = "db_dbgroup"

    id = Column(Integer, primary_key=True)

    uuid = Column(UUID(as_uuid=True), default=uuid_func)
    name = Column(String(255), index=True)

    type = Column(String(255), default="", index=True)

    time = Column(DateTime(timezone=True), default=timezone.now)
    description = Column(Text, nullable=True)

    user_id = Column(Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially="DEFERRED"))
    user = relationship('DbUser', backref=backref('dbgroups', cascade='merge'))

    dbnodes = relationship('DbNode', secondary=table_groups_nodes, backref="dbgroups", lazy='dynamic')

    __table_args__ = (
        UniqueConstraint('name', 'type'),
    )

    Index('db_dbgroup_dbnodes_dbnode_id_idx', table_groups_nodes.c.dbnode_id)
    Index('db_dbgroup_dbnodes_dbgroup_id_idx', table_groups_nodes.c.dbgroup_id)

    @property
    def pk(self):
        return self.id

    def __str__(self):
        if self.type:
            return '<DbGroup [type: {}] "{}">'.format(self.type, self.name)
        else:
            return '<DbGroup [user-defined] "{}">'.format(self.name)

    def get_aiida_class(self):
        from aiida.orm.implementation.sqlalchemy.group import Group
        return Group(dbgroup=self)
コード例 #40
0
ファイル: log.py プロジェクト: davej/changes
class LogChunk(db.Model):
    __tablename__ = 'logchunk'
    __table_args__ = (
        Index('idx_logchunk_project_id', 'project_id'),
        Index('idx_logchunk_build_id', 'job_id'),
        Index('idx_logchunk_source_id', 'source_id'),
        UniqueConstraint('source_id',
                         'offset',
                         name='unq_logchunk_source_offset'),
    )

    id = Column(GUID, primary_key=True, default=uuid.uuid4)
    job_id = Column(GUID,
                    ForeignKey('job.id', ondelete="CASCADE"),
                    nullable=False)
    project_id = Column(GUID,
                        ForeignKey('project.id', ondelete="CASCADE"),
                        nullable=False)
    source_id = Column(GUID,
                       ForeignKey('logsource.id', ondelete="CASCADE"),
                       nullable=False)
    # offset is sum(c.size for c in chunks_before_this)
    offset = Column(Integer, nullable=False)
    # size is len(text)
    size = Column(Integer, nullable=False)
    text = Column(Text, nullable=False)
    date_created = Column(DateTime, default=datetime.utcnow)

    job = relationship('Job')
    project = relationship('Project')
    source = relationship('LogSource')

    def __init__(self, **kwargs):
        super(LogChunk, self).__init__(**kwargs)
        if self.id is None:
            self.id = uuid.uuid4()
        if self.date_created is None:
            self.date_created = datetime.utcnow()
コード例 #41
0
class CobraWhiteList(db.Model):
    """
    Whitelist for project and rule
    """
    __tablename__ = 'whitelist'

    id = db.Column(INTEGER(unsigned=True),
                   primary_key=True,
                   autoincrement=True,
                   nullable=False)
    project_id = db.Column(db.Integer, default=None, nullable=False)
    rule_id = db.Column(db.Integer, default=None, nullable=False)
    path = db.Column(db.String(512), default=None, nullable=False)
    reason = db.Column(db.String(512), default=None, nullable=False)
    status = db.Column(TINYINT, default=None, nullable=False)
    created_at = db.Column(db.DateTime, default=None, nullable=False)
    updated_at = db.Column(db.DateTime, default=None, nullable=False)

    __table_args__ = (Index('ix_project_id_rule_id', project_id, rule_id), {
        "mysql_charset": "utf8mb4"
    })

    def __init__(self,
                 project_id,
                 rule_id,
                 path,
                 reason,
                 status,
                 created_at=None,
                 updated_at=None):
        self.project_id = project_id
        self.rule_id = rule_id
        self.path = path
        self.reason = reason
        self.status = status
        self.created_at = created_at
        self.updated_at = updated_at
        current_time = time.strftime('%Y-%m-%d %X', time.localtime())
        if created_at is None:
            self.created_at = current_time
        else:
            self.created_at = created_at
        if updated_at is None:
            self.updated_at = current_time
        else:
            self.updated_at = updated_at

    def __repr__(self):
        return "<CobraWhiteList %r-%r:%r>" % (self.project_id, self.rule_id,
                                              self.reason)
コード例 #42
0
class StaticVoicemail(Base):

    __tablename__ = 'staticvoicemail'
    __table_args__ = (PrimaryKeyConstraint('id'),
                      Index('staticvoicemail__idx__category', 'category'))

    id = Column(Integer, nullable=False)
    cat_metric = Column(Integer, nullable=False, server_default='0')
    var_metric = Column(Integer, nullable=False, server_default='0')
    commented = Column(Integer, nullable=False, server_default='0')
    filename = Column(String(128), nullable=False)
    category = Column(String(128), nullable=False)
    var_name = Column(String(128), nullable=False)
    var_val = Column(Text)
コード例 #43
0
ファイル: table.py プロジェクト: hx-yellowstar/dataset
    def create_index(self, columns, name=None, **kw):
        """Create an index to speed up queries on a table.

        If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        columns = [normalize_column_name(c) for c in ensure_tuple(columns)]
        with self.db.lock:
            if not self.exists:
                raise DatasetException("Table has not been created yet.")

            for column in columns:
                if not self.has_column(column):
                    return

            if not self.has_index(columns):
                self._threading_warn()
                name = name or index_name(self.name, columns)
                columns = [self.table.c[c] for c in columns]
                idx = Index(name, *columns, **kw)
                idx.create(self.db.executable)
コード例 #44
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id)
    tenant_id_idx.drop()

    deleted_idx = Index("instances_deleted", instances.c.deleted)
    deleted_idx.drop()
コード例 #45
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    backups = Table('backups', meta, autoload=True)
    backups_instance_id_idx = Index("backups_instance_id",
                                    backups.c.instance_id)
    backups_deleted_idx = Index("backups_deleted", backups.c.deleted)

    meta.bind = migrate_engine
    backups_instance_id_idx.drop()
    backups_deleted_idx.drop()
コード例 #46
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id)

    try:
        tenant_id_idx.create()
    except OperationalError as e:
        logger.info(e)

    deleted_idx = Index("instances_deleted", instances.c.deleted)
    try:
        deleted_idx.create()
    except OperationalError as e:
        logger.info(e)
コード例 #47
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    backups = Table('backups', meta, autoload=True)
    backups_instance_id_idx = Index("backups_instance_id",
                                    backups.c.instance_id)
    backups_deleted_idx = Index("backups_deleted", backups.c.deleted)

    try:
        backups_instance_id_idx.create()
    except OperationalError as e:
        logger.info(e)

    try:
        backups_deleted_idx.create()
    except OperationalError as e:
        logger.info(e)
コード例 #48
0
ファイル: store.py プロジェクト: NoemiNahomy/cubes
    def create_cube_aggregate(self, cube, table_name=None, dimensions=None,
                                 replace=False, create_index=False,
                                 schema=None):
        """Creates an aggregate table. If dimensions is `None` then all cube's
        dimensions are considered.

        Arguments:

        * `dimensions`: list of dimensions to use in the aggregated cuboid, if
          `None` then all cube dimensions are used
        """

        browser = SQLBrowser(cube, self, schema=schema)

        if browser.safe_labels:
            raise ConfigurationError("Aggregation does not work with "
                                     "safe_labels turned on")

        schema = schema or self.naming.aggregate_schema \
                    or self.naming.schema

        # TODO: this is very similar to the denormalization prep.
        table_name = table_name or self.naming.aggregate_table_name(cube.name)
        fact_name = cube.fact or self.naming.fact_table_name(cube.name)

        dimensions = dimensions or [dim.name for dim in cube.dimensions]

        if fact_name == table_name and schema == self.naming.schema:
            raise StoreError("Aggregation target is the same as fact")

        drilldown = []
        keys = []
        for dimref in dimensions:
            (dimname, hiername, level) = string_to_dimension_level(dimref)
            dimension = cube.dimension(dimname)
            hierarchy = dimension.hierarchy(hiername)
            levels = hierarchy.levels
            drilldown.append((dimension, hierarchy, levels[-1]))
            keys += [l.key for l in levels]

        cell = Cell(cube)
        drilldown = Drilldown(drilldown, cell)

        # Create statement of all dimension level keys for
        # getting structure for table creation
        (statement, _) = browser.aggregation_statement(
            cell,
            drilldown=drilldown,
            aggregates=cube.aggregates
        )

        # Create table
        table = self.create_table_from_statement(
            table_name,
            statement,
            schema=schema,
            replace=replace,
            insert=False
        )

        self.logger.info("Inserting...")

        insert = table.insert().from_select(statement.columns, statement)
        self.execute(insert)

        self.logger.info("Done")

        if create_index:
            self.logger.info("Creating indexes...")
            aggregated_columns = [a.name for a in cube.aggregates]
            for column in table.columns:
                if column.name in aggregated_columns:
                    continue

                name = "%s_%s_idx" % (table_name, column)
                self.logger.info("creating index: %s" % name)
                index = Index(name, column)
                index.create(self.connectable)

        self.logger.info("Done")
コード例 #49
0
def drop_index(index):
    if index_exists(index):
        index = Index(*index)
        index.drop()
コード例 #50
0
ファイル: store.py プロジェクト: dustinromey/cubes
    def create_cube_aggregate(self, browser, table_name=None, dimensions=None,
                              dimension_links=None, schema=None,
                              replace=False):
        """Creates an aggregate table. If dimensions is `None` then all cube's
        dimensions are considered.

        Arguments:

        * `dimensions`: list of dimensions to use in the aggregated cuboid, if
          `None` then all cube dimensions are used
        * `dimension_links`: list of dimensions that are required for each
          aggregation (for example a date dimension in most of the cases). The
          list should be a subset of `dimensions`.
        * `aggregates_prefix`: aggregated table prefix
        * `aggregates_schema`: schema where aggregates are stored

        """

        if browser.store != self:
            raise ArgumentError("Can create aggregate table only within "
                                "the same store")

        schema = schema or self.options.get("aggregates_schema", self.schema)
        # Just a shortcut
        cube = browser.cube

        prefix = self.options.get("aggregates_prefix", "")
        table_name = table_name or "%s_%s" % (prefix, cube.name)

        if dimensions:
            dimensions = [cube.dimension(dimension) for dimension in dimensions]
        else:
            dimensions = cube.dimensions

        builder = QueryBuilder(browser)

        if builder.snowflake.fact_name == table_name and builder.snowflake.schema == schema:
            raise ArgumentError("target is the same as source fact table")

        drilldown = []
        keys = None
        for dimension in dimensions:
            levels = dimension.hierarchy().levels
            drilldown.append((dimension, dimension.hierarchy(), levels[-1]))
            keys = [l.key for l in levels]

        cell = Cell(cube)
        drilldown = Drilldown(drilldown, cell)

        # Create statement of all dimension level keys for
        # getting structure for table creation
        statement = builder.aggregation_statement(
            cell,
            drilldown=drilldown,
            aggregates=cube.aggregates,
            attributes=keys
        )

        # Create table
        table = self.create_table_from_statement(
            table_name,
            statement,
            schema=schema,
            replace=replace,
            insert=False
        )

        self.logger.info("Inserting...")

        with self.connectable.begin() as connection:

            insert = InsertIntoAsSelect(table, statement,
                                        columns=statement.columns)

            connection.execute(insert)
        self.logger.info("Done")

        self.logger.info("Creating indexes...")

        aggregated_columns = [a.name for a in cube.aggregates]
        for column in table.columns:
            if column.name in aggregated_columns:
                continue

            name = "%s_%s_idx" % (table_name, column)
            self.logger.info("creating index: %s" % name)
            index = Index(name, column)
            index.create(self.connectable)

        self.logger.info("Done")