Пример #1
0
class Genre(db.Model):
    __ftcolumns__ = 'name, raw_desc'

    id = db.Column(db.Integer, primary_key=True)

    name = db.Column(db.String(4096))
    image = db.Column(db.String(4096))
    desc = db.Column(db.Text)
    raw_desc = db.Column(db.Text)

    games = db.relationship('Game', secondary=game_genre_assoc, back_populates='genres')
    direct_events = db.relationship('Event', secondary=event_genre_assoc)
    indirect_events = db.relationship('Event',
                                      viewonly=True,
                                      secondary=join(game_genre_assoc, event_game_assoc,
                                                     game_genre_assoc.c.game_id == event_game_assoc.c.game_id).alias())

    developers = db.relationship('Developer',
                                 viewonly=True,
                                 secondary=join(game_genre_assoc, game_developer_assoc,
                                                game_genre_assoc.c.game_id == game_developer_assoc.c.game_id).alias())

    def json(self):
        return {'id': self.id,
                'img': self.image,
                'name': self.name,
                'desc': self.desc,
                'games': [(game.id, game.primary_name) for game in self.games],
                'events': [(event.id, event.name) for event in self.events],
                'developers': [(developer.id, developer.name) for developer in self.developers]
                }

    @hybrid_property
    def events(self):
        return self.direct_events or self.indirect_events
Пример #2
0
def _get_select_waypoints_for_routes():
    waypoint_type = text('\'' + WAYPOINT_TYPE + '\'')
    route_type = text('\'' + ROUTE_TYPE + '\'')

    select_linked_waypoints = \
        select([
            Association.child_document_id.label('route_id'),
            Association.parent_document_id.label('waypoint_id')
        ]). \
        where(
            and_(
                Association.parent_document_type == waypoint_type,
                Association.child_document_type == route_type)). \
        cte('linked_waypoints')

    select_waypoint_parents = \
        select([
            select_linked_waypoints.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_linked_waypoints,
            Association,
            and_(
                Association.child_document_id ==
                select_linked_waypoints.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_parents')

    select_waypoint_grandparents = \
        select([
            select_waypoint_parents.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_waypoint_parents,
            Association,
            and_(
                Association.child_document_id ==
                select_waypoint_parents.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_grandparents')

    return union(
            select_linked_waypoints.select(),
            select_waypoint_parents.select(),
            select_waypoint_grandparents.select()
        ). \
        cte('all_waypoints')
Пример #3
0
def _get_select_waypoints_for_routes():
    waypoint_type = text('\'' + WAYPOINT_TYPE + '\'')
    route_type = text('\'' + ROUTE_TYPE + '\'')

    select_linked_waypoints = \
        select([
            Association.child_document_id.label('route_id'),
            Association.parent_document_id.label('waypoint_id')
        ]). \
        where(
            and_(
                Association.parent_document_type == waypoint_type,
                Association.child_document_type == route_type)). \
        cte('linked_waypoints')

    select_waypoint_parents = \
        select([
            select_linked_waypoints.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_linked_waypoints,
            Association,
            and_(
                Association.child_document_id ==
                select_linked_waypoints.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_parents')

    select_waypoint_grandparents = \
        select([
            select_waypoint_parents.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_waypoint_parents,
            Association,
            and_(
                Association.child_document_id ==
                select_waypoint_parents.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_grandparents')

    return union(
            select_linked_waypoints.select(),
            select_waypoint_parents.select(),
            select_waypoint_grandparents.select()
        ). \
        cte('all_waypoints')
Пример #4
0
def _resource_consumption_stats_view():
    time_difference_expr = func.sum(KojiTask.finished - KojiTask.started)
    time_difference = extract('EPOCH', time_difference_expr)
    time_difference_all = select([time_difference]).select_from(KojiTask)
    return (select([
        Package.name,
        KojiTask.arch,
        time_difference_expr.label('time'),
        cast(time_difference / time_difference_all,
             Float).label('time_percentage'),
    ]).select_from(
        join(
            join(Package, Build, Package.id == Build.package_id),
            KojiTask,
        )).group_by(Package.name, KojiTask.arch))
Пример #5
0
    def _build_query(self, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by', 'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil.build_expression())

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or ('table_card_group_by' in filter_values and filter_values['table_card_group_by']):
            group_having = "group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select(['doc_id', 'group_id', 'MAX(prop_value) + MIN(prop_value) as maxmin'] + filter_cols + external_cols,
                                from_obj='"fluff_FarmerRecordFluff"',
                                group_by=['doc_id', 'group_id'] + filter_cols + external_cols), name='x')
        s2 = alias(select(['group_id', '(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) as gender'] + table_card_group, from_obj='"fluff_FarmerRecordFluff"',
                                group_by=['group_id'] + table_card_group + having_group_by, having=group_having), name='y')
        return select(['COUNT(x.doc_id) as %s' % self.key] + self.group_by,
               group_by=['maxmin'] + filter_cols + self.group_by,
               having=" and ".join(having),
               from_obj=join(s1, s2, s1.c.group_id==s2.c.group_id)).params(filter_values)
Пример #6
0
def list_files(project_name):
    files = db.session.query(File.name).select_from(join(
        File, Project)).filter(Project.user_id == current_user.id,
                               Project.name == project_name).all()
    file_names = [f[0] for f in files]

    return jsonify(file_names)
Пример #7
0
    def build_query_to_populate(self, query, full_table, aggregate_table):
        insert_columns = [aggregate_table.c.join_key]
        fk = Column(self.key, Integer)
        geom = Column(self.geometry_column, Geometry())
        bins_table = Table(self.table, full_table.metadata, fk, geom)

        if self.join_custom_data: 
            extra_data = Table("extra_data", full_table.metadata, 
                    Column("verified", Boolean),
                    Column("timestamp", DateTime),
                    Column("client_ip", Integer),
                    Column("server_ip", Integer),
                    Column("location", Geometry("Point", srid=4326)),
                    keep_existing = True)

            joining = join(full_table, extra_data,
                    and_(extra_data.c.client_ip == full_table.c.client_ip,
                        extra_data.c.server_ip == full_table.c.server_ip,
                        extra_data.c.timestamp == full_table.c.time),
                    isouter = True)
            query = query.select_from(joining)
            location = case([(extra_data.c.verified, func.coalesce(extra_data.c.location, full_table.c.location))], else_ = full_table.c.location)
        else:
            location = full_table.c.location

        select_query = (query.select_from(bins_table)
             .where(ST_Intersects(location, geom))
             .column(fk)
             .group_by(fk))
        return insert_columns, select_query
Пример #8
0
    def build_query_to_populate(self, query, full_table, aggregate_table):
        insert_columns = [aggregate_table.c.join_key]
        fk = Column(self.key, Integer)
        geom = Column(self.geometry_column, Geometry())
        bins_table = Table(self.table, full_table.metadata, fk, geom)

        if self.join_custom_data: 
            extra_data = Table("extra_data", full_table.metadata, 
                    Column("timestamp", DateTime),
                    Column("verified", Boolean),
                    Column("bigquery_key", String),
                    Column("connection_type", String),
                    Column("advertised_download", Integer),
                    Column("actual_download", Float),
                    Column("advertised_upload", Integer),
                    Column("actual_upload", Float),
                    Column("min_rtt", Integer),
                    Column("location_type", String),
                    Column("cost_of_service", Integer),
                    Column("location", Geometry("Point", srid=4326)),
                    keep_existing = True)

            joining = join(full_table, extra_data,
                    and_(extra_data.c.bigquery_key == full_table.c.bigquery_key),
                    isouter = True)
            query = query.select_from(joining)
            location = case([(extra_data.c.verified, func.coalesce(extra_data.c.location, full_table.c.location))], else_ = full_table.c.location)
        else:
            location = full_table.c.location

        select_query = (query.select_from(bins_table)
             .where(ST_Intersects(location, geom))
             .column(fk)
             .group_by(fk))
        return insert_columns, select_query
Пример #9
0
class Developer(db.Model):
    __ftcolumns__ = 'name, raw_desc, website'

    id = db.Column(db.Integer, primary_key=True)

    name = db.Column(db.String(4096))
    image = db.Column(db.String(4096))
    desc = db.Column(db.Text)
    raw_desc = db.Column(db.Text)
    website = db.Column(db.String(4096))

    games = db.relationship('Game', secondary=game_developer_assoc, back_populates='developers')

    genres = db.relationship('Genre',
                             viewonly=True,
                             secondary=join(game_developer_assoc, game_genre_assoc,
                                            game_developer_assoc.c.game_id == game_genre_assoc.c.game_id).alias())

    def json(self):
        return {'id': self.id,
                'img': self.image,
                'name': self.name,
                'desc': self.desc,
                'games': [(game.id, game.primary_name) for game in self.games],
                'genres': [(genre.id, genre.name) for genre in self.genres],
                'website': self.website
                }
Пример #10
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by',
                                         'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id,
                           (sqlalchemy.func.max(table.c.prop_value) +
                            sqlalchemy.func.min(table.c.prop_value)).label('maxmin')] + filter_cols +
                          external_cols, from_obj=table,
                          group_by=([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id] +
                                    filter_cols + external_cols)), name='x')
        s2 = alias(
            select(
                [table.c.group_case_id,
                 sqlalchemy.cast(
                     cast(func.max(table.c.gender), Integer) + cast(func.min(table.c.gender), Integer), VARCHAR
                 ).label('gender')] + table_card_group,
                from_obj=table,
                group_by=[table.c.group_case_id] + table_card_group + having_group_by, having=group_having
            ), name='y'
        )
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select(
            [sqlalchemy.func.count(s1.c.doc_id).label(self.key)] + group_by,
            group_by=[s1.c.maxmin] + filter_cols + group_by,
            having=AND(having).build_expression(s1),
            from_obj=join(s1, s2, s1.c.group_case_id == s2.c.group_case_id)
        ).params(filter_values)
Пример #11
0
    def rebuild_idnum_index(cls, session: SqlASession,
                            indexed_at_utc: Pendulum) -> None:
        """
        Rebuilds the index entirely. Uses SQLAlchemy Core (not ORM) for speed.

        Args:
            session: an SQLAlchemy Session
            indexed_at_utc: current time in UTC
        """
        log.info("Rebuilding patient ID number index")
        # noinspection PyUnresolvedReferences
        indextable = PatientIdNumIndexEntry.__table__  # type: Table
        indexcols = indextable.columns
        # noinspection PyUnresolvedReferences
        idnumtable = PatientIdNum.__table__  # type: Table
        idnumcols = idnumtable.columns
        # noinspection PyUnresolvedReferences
        patienttable = Patient.__table__  # type: Table
        patientcols = patienttable.columns

        # Delete all entries
        session.execute(
            indextable.delete()
        )

        # Create new ones
        # noinspection PyProtectedMember,PyPep8
        session.execute(
            indextable.insert().from_select(
                # Target:
                [indexcols.idnum_pk,
                 indexcols.indexed_at_utc,
                 indexcols.patient_pk,
                 indexcols.which_idnum,
                 indexcols.idnum_value],
                # Source:
                (
                    select([idnumcols._pk,
                            literal(indexed_at_utc),
                            patientcols._pk,
                            idnumcols.which_idnum,
                            idnumcols.idnum_value])
                    .select_from(
                        join(
                            idnumtable,
                            patienttable,
                            and_(
                                idnumcols._device_id == patientcols._device_id,
                                idnumcols._era == patientcols._era,
                                idnumcols.patient_id == patientcols.id,
                            )
                        )
                    )
                    .where(idnumcols._current == True)
                    .where(patientcols._current == True)
                )
            )
        )
Пример #12
0
def upgrade():
    """Upgrade database schema and/or data, creating a new revision."""
    # connection = op.get_bind()
    src_dst_snapshots = join(relationships, snapshots,
                             relationships.c.destination_id == snapshots.c.id)
    sel1 = select([relationships.c.id]).select_from(src_dst_snapshots)\
      .where(and_(snapshots.c.child_type == relationships.c.source_type,  # noqa
             relationships.c.destination_type == "Snapshot"))

    dst_src_snapshots = join(relationships, snapshots,
                             relationships.c.source_id == snapshots.c.id)
    sel2 = select([relationships.c.id]).select_from(dst_src_snapshots)\
      .where(and_(snapshots.c.child_type == relationships.c.destination_type,  # noqa
             relationships.c.source_type == "Snapshot"))

    op.execute(relationships.delete().where(
        relationships.c.id.in_(
            sel1.union_all(sel2).alias("delete_it").select())))
Пример #13
0
def upgrade():
  """Upgrade database schema and/or data, creating a new revision."""
  # connection = op.get_bind()
  src_dst_snapshots = join(relationships, snapshots,
                           relationships.c.destination_id == snapshots.c.id)
  sel1 = select([relationships.c.id]).select_from(src_dst_snapshots)\
    .where(and_(snapshots.c.child_type == relationships.c.source_type,  # noqa
           relationships.c.destination_type == "Snapshot"))

  dst_src_snapshots = join(relationships, snapshots,
                           relationships.c.source_id == snapshots.c.id)
  sel2 = select([relationships.c.id]).select_from(dst_src_snapshots)\
    .where(and_(snapshots.c.child_type == relationships.c.destination_type,  # noqa
           relationships.c.source_type == "Snapshot"))

  op.execute(relationships.delete().where(relationships.c.id.in_(
      sel1.union_all(sel2).alias("delete_it").select()
  )))
Пример #14
0
def _last_build():
    max_expr = select([func.max(Build.id).label('mx')])\
               .group_by(Build.package_id).alias()
    joined = select([Build]).select_from(join(Build, max_expr,
                                              Build.id == max_expr.c.mx))\
             .alias()
    return relationship(mapper(Build, joined, non_primary=True),
                        uselist=False,
                        primaryjoin=(Package.id == joined.c.package_id))
Пример #15
0
    def migrate_data(self):
        join_expr = join(
            proposalhistory_table, proposal_table,
            proposalhistory_table.c.proposal_id == proposal_table.c.id)
        statement = select(
            [proposalhistory_table, proposal_table]).select_from(join_expr)
        result_rows = self.execute(statement).fetchall()

        for result_row in result_rows:
            PropsosalHistoryEntryMigrator(result_row).run()
Пример #16
0
    def migrate_data(self):
        join_expr = join(
            proposalhistory_table, proposal_table,
            proposalhistory_table.c.proposal_id == proposal_table.c.id)
        statement = select([proposalhistory_table,
                            proposal_table]).select_from(join_expr)
        result_rows = self.execute(statement).fetchall()

        for result_row in result_rows:
            PropsosalHistoryEntryMigrator(result_row).run()
Пример #17
0
def _resource_consumption_stats_view():
    time_difference_expr = func.sum(KojiTask.finished - KojiTask.started)
    time_difference = extract('EPOCH', time_difference_expr)
    time_difference_all = select([time_difference]).select_from(KojiTask)
    return (
        select([
            Package.name,
            KojiTask.arch,
            time_difference_expr.label('time'),
            cast(time_difference / time_difference_all, Float).label('time_percentage'),
        ])
        .select_from(
            join(
                join(Package, Build, Package.id == Build.package_id),
                KojiTask,
            )
        )
        .group_by(Package.name, KojiTask.arch)
    )
Пример #18
0
def unsubscribe(path):
    path = str(path)
    project_name, file_name = path.split('/', 1)
    file_id = db.session.query(File.id).select_from(join(
        File, Project)).filter(File.name == file_name,
                               Project.name == project_name,
                               Project.user_id == current_user.id).scalar()
    if file_id:
        leave_room("file:" + str(file_id), namespace="/files")
        print('>User {} unsubscribed from {} ({})'.format(
            current_user.username, path, "file:" + str(file_id)))
Пример #19
0
    def get_projects(self):
        data = join(db.user_projects, db.projects,
                db.user_projects.c.project_id == db.projects.c.id
            ).select(use_labels=True).where(
                db.user_projects.c.user_id == self.id
            ).execute()

        rows = data.fetchall()
        projects = []
        for row in rows:
            projects.append(Project.init(**row))

        return projects
Пример #20
0
    def get_all_for_project(cls, project_id):
        data = join(db.user_projects, db.users,
                db.user_projects.c.user_id == db.users.c.id
            ).select(use_labels=True).where(
                db.user_projects.c.project_id == project_id
            ).execute()

        rows = data.fetchall()
        users = []
        for row in rows:
            users.append(User.init(**row))

        return users
Пример #21
0
    def access_list_paths(self,
                          member,
                          prefix=None,
                          include_owned=False,
                          include_containers=True):
        """Return the list of paths granted to member.

        Keyword arguments:
        prefix -- return only paths starting with prefix (default None)
        include_owned -- return also paths owned by member (default False)
        include_containers -- return also container paths owned by member
                              (default True)

        """

        xfeatures_xfeaturevals = self.xfeatures.join(self.xfeaturevals)

        selectable = (self.groups.c.owner + ':' + self.groups.c.name)
        member_groups = select([selectable.label('value')],
                               self.groups.c.member == member)

        members = select([literal(member).label('value')])
        any = select([literal('*').label('value')])

        u = union(member_groups, members, any).alias()
        inner_join = join(xfeatures_xfeaturevals, u,
                          self.xfeaturevals.c.value == u.c.value)
        s = select([self.xfeatures.c.path], from_obj=[inner_join]).distinct()
        if prefix:
            like = lambda p: self.xfeatures.c.path.like(
                self.escape_like(p) + '%', escape=ESCAPE_CHAR)
            s = s.where(
                or_(*map(like,
                         self.access_inherit(prefix) or [prefix])))
        r = self.conn.execute(s)
        l = [row[0] for row in r.fetchall()]
        r.close()

        if include_owned:
            container_nodes = select(
                [self.nodes.c.node],
                self.nodes.c.parent == self.node_lookup(member))
            condition = self.nodes.c.parent.in_(container_nodes)
            if include_containers:
                condition = or_(condition,
                                self.nodes.c.node.in_(container_nodes))
            s = select([self.nodes.c.path], condition)
            r = self.conn.execute(s)
            l += [row[0] for row in r.fetchall() if row[0] not in l]
            r.close()
        return l
Пример #22
0
    def _collect_joins(self):
        """Collect joins and register joined tables. All tables used should be collected in this
        function."""

        self.logger.info("collecting joins and registering tables...")

        self.tables = {}
        self.expression = self.fact_table
        self.tables[self.fact_name] = self.fact_table

        if not self.cube.joins:
            self.logger.info("no joins")
            return

        for join in self.cube.joins:
            self.logger.debug("join: %s" % join)

            # Get master and detail table names and their respective keys that will be used for join
            master_name, master_key = self.split_field(join["master"])
            if not master_name:
                master_name = self.fact_name

            detail_name, detail_key = self.split_field(join["detail"])
            alias = join.get("alias")

            if not detail_name or detail_name == self.fact_name:
                raise ValueError(
                    "Detail table name should be present and should not be a fact table"
                )

            master_table = self.table(master_name)
            detail_table = self.register_table(detail_name,
                                               alias=alias,
                                               schema=self.schema)

            try:
                master_column = master_table.c[master_key]
            except:
                raise Exception('Unable to find master key "%s"."%s" ' %
                                (master_name, master_key))
            try:
                detail_column = detail_table.c[detail_key]
            except:
                raise Exception('Unable to find master key "%s"."%s" ' %
                                (detail_name, detail_key))

            onclause = master_column == detail_column

            self.expression = expression.join(self.expression,
                                              detail_table,
                                              onclause=onclause)
    def test_sql_expression_join(self):
        table_1 = self._make_table('table_1',
                                   Column('x', types.UInt32, primary_key=True))
        table_2 = self._make_table('table_2',
                                   Column('x', types.UInt32, primary_key=True))

        join = expression.join(table_1,
                               table_2,
                               onclause=table_1.c.x == table_2.c.x,
                               isouter=False)

        self.assertEqual(
            self.compile(join),
            'table_1 INNER JOIN table_2 ON table_1.x = table_2.x')
Пример #24
0
def associate(config):
    engine = create_engine(config.database_uri)
    metadata = MetaData()
    records = config.make_cache_table(metadata)
    extra_data = config.make_extra_data_table(metadata)
    metadata.create_all(engine)

    joining = join(records, extra_data, and_(extra_data.c.bigquery_key == records.c.bigquery_key))
    query = select([joining, extra_data.c.id.label('extra_data_id')])
    results = engine.execute(query)
    p = re.compile('^(.*):[0-9]{4,5}\.[cs]2[cs]_snaplog\.gz$')
    for result in results.fetchall():
        test_id = p.match(result.test_id).group(1)
        query = extra_data.update().where(extra_data.c.id == result.extra_data_id).values(bigquery_test_id = test_id)
        engine.execute(query)
Пример #25
0
def associate(config):
    engine = create_engine(config.database_uri)
    metadata = MetaData()
    records = config.make_cache_table(metadata)
    extra_data = config.make_extra_data_table(metadata)
    metadata.create_all(engine)

    joining = join(records, extra_data, and_(extra_data.c.bigquery_key == records.c.bigquery_key))
    query = select([joining, extra_data.c.id.label('extra_data_id')])
    results = engine.execute(query)
    p = re.compile('^(.*):[0-9]{4,5}\.[cs]2[cs]_snaplog\.gz$')
    for result in results.fetchall():
        test_id = p.match(result.test_id).group(1)
        query = extra_data.update().where(extra_data.c.id == result.extra_data_id).values(bigquery_test_id = test_id)
        engine.execute(query)
Пример #26
0
    def access_list_paths(self, member, prefix=None, include_owned=False,
                          include_containers=True):
        """Return the list of paths granted to member.

        Keyword arguments:
        prefix -- return only paths starting with prefix (default None)
        include_owned -- return also paths owned by member (default False)
        include_containers -- return also container paths owned by member
                              (default True)

        """

        xfeatures_xfeaturevals = self.xfeatures.join(self.xfeaturevals)

        selectable = (self.groups.c.owner + ':' + self.groups.c.name)
        member_groups = select([selectable.label('value')],
                               self.groups.c.member == member)

        members = select([literal(member).label('value')])
        any = select([literal('*').label('value')])

        u = union(member_groups, members, any).alias()
        inner_join = join(xfeatures_xfeaturevals, u,
                          self.xfeaturevals.c.value == u.c.value)
        s = select([self.xfeatures.c.path], from_obj=[inner_join]).distinct()
        if prefix:
            like = lambda p: self.xfeatures.c.path.like(
                self.escape_like(p) + '%', escape=ESCAPE_CHAR)
            s = s.where(or_(*map(like,
                                 self.access_inherit(prefix) or [prefix])))
        r = self.conn.execute(s)
        l = [row[0] for row in r.fetchall()]
        r.close()

        if include_owned:
            container_nodes = select(
                [self.nodes.c.node],
                self.nodes.c.parent == self.node_lookup(member))
            condition = self.nodes.c.parent.in_(container_nodes)
            if include_containers:
                condition = or_(condition,
                                self.nodes.c.node.in_(container_nodes))
            s = select([self.nodes.c.path], condition)
            r = self.conn.execute(s)
            l += [row[0] for row in r.fetchall() if row[0] not in l]
            r.close()
        return l
Пример #27
0
    def get_existing_id_lookup(self):
        url_table = table(
            "urls", column('id'), column('label'), column('contact_id'))

        contact_table = table(
            "contacts",
            column('id'), column('former_contact_id'))

        stmt = select([
            url_table.c.id, url_table.c.label, url_table.c.contact_id,
            contact_table.c.former_contact_id])
        stmt = stmt.select_from(
            join(url_table, contact_table,
                 url_table.c.contact_id == contact_table.c.id))

        return {self.get_identifier(gever_row): gever_row.id
                for gever_row in self.db_session.execute(stmt)}
Пример #28
0
    def get_existing_id_lookup(self):
        address_table = table("addresses", column('id'), column('label'),
                              column('contact_id'))

        contact_table = table("contacts", column('id'),
                              column('former_contact_id'))

        stmt = select([
            address_table.c.id, address_table.c.label,
            address_table.c.contact_id, contact_table.c.former_contact_id
        ])
        stmt = stmt.select_from(
            join(address_table, contact_table,
                 address_table.c.contact_id == contact_table.c.id))

        return {
            self.get_identifier(gever_row): gever_row.id
            for gever_row in self.db_session.execute(stmt)
        }
Пример #29
0
    def _collect_joins(self):
        """Collect joins and register joined tables. All tables used should be collected in this
        function."""

        self.logger.info("collecting joins and registering tables...")

        self.tables = {}
        self.expression = self.fact_table
        self.tables[self.fact_name] = self.fact_table

        if not self.cube.joins:
            self.logger.info("no joins")
            return

        for join in self.cube.joins:
            self.logger.debug("join: %s" % join)

            # Get master and detail table names and their respective keys that will be used for join
            master_name, master_key = self.split_field(join["master"])
            if not master_name:
                master_name = self.fact_name
                
            detail_name, detail_key = self.split_field(join["detail"])
            alias = join.get("alias")

            if not detail_name or detail_name == self.fact_name:
                raise ValueError("Detail table name should be present and should not be a fact table")

            master_table = self.table(master_name)
            detail_table = self.register_table(detail_name, alias = alias, schema = self.schema)

            try:
                master_column = master_table.c[master_key]
            except:
                raise Exception('Unable to find master key "%s"."%s" ' % (master_name, master_key))
            try:
                detail_column = detail_table.c[detail_key]
            except:
                raise Exception('Unable to find master key "%s"."%s" ' % (detail_name, detail_key))

            onclause = master_column == detail_column

            self.expression = expression.join(self.expression, detail_table, onclause = onclause)
Пример #30
0
class Event(db.Model):
    __ftcolumns__ = 'name, raw_desc, location, link'

    id = db.Column(db.Integer, primary_key=True)

    name = db.Column(db.String(4096))
    desc = db.Column(db.Text)
    raw_desc = db.Column(db.Text)

    location = db.Column(db.String(4096))
    link = db.Column(db.String(4096))
    time = db.Column(db.DateTime)

    games = db.relationship('Game', secondary=event_game_assoc)
    direct_genres = db.relationship('Genre', secondary=event_genre_assoc)
    indirect_genres = db.relationship('Genre',
                                      viewonly=True,
                                      secondary=join(event_game_assoc, game_genre_assoc,
                                                     event_game_assoc.c.game_id == game_genre_assoc.c.game_id).alias())

    def json(self):
        image = 'https://cf.geekdo-images.com/images/pic1657689_t.jpg'
        images = []
        images.extend(game.image for game in self.games if game.image != image)
        images.extend(genre.image for genre in self.genres if genre.image != image)
        if images:
            image = images[random.randrange(len(images))]

        return {'id': self.id,
                'name': self.name,
                'img': image,
                'desc': self.desc,
                'location': self.location,
                'time': self.time,
                'games': [(game.id, game.primary_name) for game in self.games],
                'genres': [(genre.id, genre.name) for genre in self.genres],
                'link': self.link
                }

    @hybrid_property
    def genres(self):
        return self.direct_genres or self.indirect_genres
Пример #31
0
    def build_query_to_populate(self, query, full_table, aggregate_table):
        insert_columns = [aggregate_table.c.join_key]
        fk = Column(self.key, Integer)
        geom = Column(self.geometry_column, Geometry())
        bins_table = Table(self.table, full_table.metadata, fk, geom)

        if self.join_custom_data:
            extra_data = Table("extra_data",
                               full_table.metadata,
                               Column("timestamp", DateTime),
                               Column("verified", Boolean),
                               Column("bigquery_key", String),
                               Column("bigquery_test_id", String),
                               Column("connection_type", String),
                               Column("advertised_download", Integer),
                               Column("actual_download", Float),
                               Column("advertised_upload", Integer),
                               Column("actual_upload", Float),
                               Column("min_rtt", Integer),
                               Column("location_type", String),
                               Column("cost_of_service", Integer),
                               Column("location", Geometry("Point",
                                                           srid=4326)),
                               keep_existing=True)

            joining = join(full_table,
                           extra_data,
                           and_(extra_data.c.bigquery_test_id == func.left(
                               full_table.c.test_id,
                               func.length(extra_data.c.bigquery_test_id))),
                           isouter=True)
            query = query.select_from(joining)
            location = case([(extra_data.c.verified,
                              func.coalesce(extra_data.c.location,
                                            full_table.c.location))],
                            else_=full_table.c.location)
        else:
            location = full_table.c.location

        select_query = (query.select_from(bins_table).where(
            ST_Intersects(location, geom)).column(fk).group_by(fk))
        return insert_columns, select_query
Пример #32
0
def joined(mapped, other):
    '''
    Creates a joined mapped for the provided mapped class with other class. The joined mapped class will be cached on
    the other class.
    
    @param mapped: class
        The mapped class to create the joined mapped class with.
    @param other: class
        The other class to create the joined mapping with.
    @return: class
        The joined mapped class.
    '''
    assert isclass(mapped), 'Invalid mapped class %s' % mapped
    assert isclass(other), 'Invalid other class %s' % other

    name = '%s%s' % (mapped.__name__, other.__name__)
    try:
        return getattr(mapped, name)
    except AttributeError:
        pass

    properties = {}
    mapping, omapping = mappingFor(mapped), mappingFor(other)
    for cp in mapping.iterate_properties:
        if not isinstance(cp, ColumnProperty) or not cp.key: continue
        assert isinstance(cp, ColumnProperty)
        properties['%s_%s' % (mapped.__name__, cp.key)] = column_property(
            getattr(mapping.c, cp.key))
    for cp in omapping.iterate_properties:
        if not isinstance(cp, ColumnProperty) or not cp.key: continue
        assert isinstance(cp, ColumnProperty)
        properties['%s_%s' % (other.__name__, cp.key)] = column_property(
            getattr(omapping.c, cp.key))

    clazz = type(name, (object, ), {})
    mapper(clazz,
           join(tableFor(mapped), tableFor(other)),
           properties=properties)
    setattr(mapped, name, clazz)

    return clazz
Пример #33
0
 def _loadAttributes(self):
     for row in self._connection.execute(
             ex.select([
                 md.InventoryClasses.c.class_namespace,
                 md.InventoryClasses.c.class_name,
                 md.InventoryClassAttributes
             ]).select_from(
                 ex.join(
                     md.InventoryClassAttributes, md.InventoryClasses,
                     md.InventoryClassAttributes.c.class_id ==
                     md.InventoryClasses.c.class_id)).where(
                         and_(
                             md.InventoryClasses.c.class_namespace ==
                             self._namespace,
                             md.InventoryClasses.c.class_name ==
                             self._class_name))):
         self._classId = row["class_id"]
         self._attributes[row["attr_key"]] = {}
         for i in [
                 "attr_name", "attr_type", "attr_default", "attr_mandatory"
         ]:
             self._attributes[row["attr_key"]][i] = row[i]
Пример #34
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by',
                                         'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or ('table_card_group_by' in filter_values and filter_values['table_card_group_by']):
            group_having = "group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select(['doc_id', 'group_id', 'MAX(prop_value) + MIN(prop_value) as maxmin'] + filter_cols + external_cols,
                                from_obj='"fluff_FarmerRecordFluff"',
                                group_by=['doc_id', 'group_id'] + filter_cols + external_cols), name='x')
        s2 = alias(select(['group_id', '(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) as gender'] + table_card_group, from_obj='"fluff_FarmerRecordFluff"',
                                group_by=['group_id'] + table_card_group + having_group_by, having=group_having), name='y')
        return select(['COUNT(x.doc_id) as %s' % self.key] + self.group_by,
               group_by=['maxmin'] + filter_cols + self.group_by,
               having=AND(having).build_expression(table),
               from_obj=join(s1, s2, s1.c.group_id==s2.c.group_id)).params(filter_values)
Пример #35
0
def get_trip_pk_to_path_map(route_pk):
    """
    Get a map of trip PK to the path of the trip for every trip in a route.

    The path here is a list of stop PKs.

    :param route_pk: the route's PK
    :return: map described above
    """
    statement = (sql.select(
        models.TripStopTime.trip_pk, models.TripStopTime.stop_pk).select_from(
            sql.join(
                models.TripStopTime,
                models.Trip)).where(models.Trip.route_pk == route_pk).order_by(
                    models.TripStopTime.trip_pk,
                    models.TripStopTime.stop_sequence))
    session = dbconnection.get_session()
    trip_pk_to_stop_pks = {}
    for trip_pk, stop_pk in session.execute(statement):
        if trip_pk not in trip_pk_to_stop_pks:
            trip_pk_to_stop_pks[trip_pk] = []
        trip_pk_to_stop_pks[trip_pk].append(stop_pk)
    return trip_pk_to_stop_pks
Пример #36
0
def _get_select_waypoints_for_outings_aggregated():
    """ Returns a select which retrieves for every outing the ids for the
    waypoints that are associated to routes associated to the outing. It
    also returns the parent and grand-parent of waypoints, so that when
    searching for outings for a waypoint, you also get the outings associated
    to child waypoints.

    E.g. when searching for outings in Calanques, you also get the outings
    associated to sub-sectors.
    """
    outing_type = text('\'' + OUTING_TYPE + '\'')
    route_type = text('\'' + ROUTE_TYPE + '\'')
    all_waypoints_for_routes = _get_select_waypoints_for_routes()
    waypoints_for_outings = \
        select([
            Association.child_document_id.label('outing_id'),
            all_waypoints_for_routes.c.waypoint_id
        ]). \
        select_from(join(
            Association,
            all_waypoints_for_routes,
            and_(
                Association.parent_document_id ==
                all_waypoints_for_routes.c.route_id,
                Association.parent_document_type == route_type,
                Association.child_document_type == outing_type
            ))). \
        cte('waypoints_for_outings')
    return \
        select([
            waypoints_for_outings.c.outing_id.label('outing_id'),
            func.array_agg(
                waypoints_for_outings.c.waypoint_id,
                type_=postgresql.ARRAY(Integer)).label('waypoint_ids')
        ]). \
        select_from(waypoints_for_outings). \
        group_by(waypoints_for_outings.c.outing_id)
Пример #37
0
def _get_select_waypoints_for_outings_aggregated():
    """ Returns a select which retrieves for every outing the ids for the
    waypoints that are associated to routes associated to the outing. It
    also returns the parent and grand-parent of waypoints, so that when
    searching for outings for a waypoint, you also get the outings associated
    to child waypoints.

    E.g. when searching for outings in Calanques, you also get the outings
    associated to sub-sectors.
    """
    outing_type = text('\'' + OUTING_TYPE + '\'')
    route_type = text('\'' + ROUTE_TYPE + '\'')
    all_waypoints_for_routes = _get_select_waypoints_for_routes()
    waypoints_for_outings = \
        select([
            Association.child_document_id.label('outing_id'),
            all_waypoints_for_routes.c.waypoint_id
        ]). \
        select_from(join(
            Association,
            all_waypoints_for_routes,
            and_(
                Association.parent_document_id ==
                all_waypoints_for_routes.c.route_id,
                Association.parent_document_type == route_type,
                Association.child_document_type == outing_type
            ))). \
        cte('waypoints_for_outings')
    return \
        select([
            waypoints_for_outings.c.outing_id.label('outing_id'),
            func.array_agg(
                waypoints_for_outings.c.waypoint_id,
                type_=postgresql.ARRAY(Integer)).label('waypoint_ids')
        ]). \
        select_from(waypoints_for_outings). \
        group_by(waypoints_for_outings.c.outing_id)
Пример #38
0
def joined(mapped, other):
    '''
    Creates a joined mapped for the provided mapped class with other class. The joined mapped class will be cached on
    the other class.
    
    @param mapped: class
        The mapped class to create the joined mapped class with.
    @param other: class
        The other class to create the joined mapping with.
    @return: class
        The joined mapped class.
    '''
    assert isclass(mapped), 'Invalid mapped class %s' % mapped
    assert isclass(other), 'Invalid other class %s' % other
    
    name = '%s%s' % (mapped.__name__, other.__name__)
    try: return getattr(mapped, name)
    except AttributeError: pass
    
    properties = {}
    mapping, omapping = mappingFor(mapped), mappingFor(other)
    for cp in mapping.iterate_properties:
        if not isinstance(cp, ColumnProperty) or not cp.key: continue
        assert isinstance(cp, ColumnProperty)
        properties['%s_%s' % (mapped.__name__, cp.key)] = column_property(getattr(mapping.c, cp.key))
    for cp in omapping.iterate_properties:
        if not isinstance(cp, ColumnProperty) or not cp.key: continue
        assert isinstance(cp, ColumnProperty)
        properties['%s_%s' % (other.__name__, cp.key)] = column_property(getattr(omapping.c, cp.key))
    
    
    clazz = type(name, (object,), {})
    mapper(clazz, join(tableFor(mapped), tableFor(other)), properties=properties)
    setattr(mapped, name, clazz)
    
    return clazz
Пример #39
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in [
                    'group', 'gender', 'group_leadership', 'disaggregate_by',
                    'table_card_group_by'
            ]:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values[
                'group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([
            table.c.doc_id, table.c.group_case_id, table.c.group_name,
            table.c.group_id,
            (sqlalchemy.func.max(table.c.prop_value) +
             sqlalchemy.func.min(table.c.prop_value)).label('maxmin')
        ] + filter_cols + external_cols,
                          from_obj=table,
                          group_by=([
                              table.c.doc_id, table.c.group_case_id,
                              table.c.group_name, table.c.group_id
                          ] + filter_cols + external_cols)),
                   name='x')
        s2 = alias(select([
            table.c.group_case_id,
            sqlalchemy.cast(
                cast(func.max(table.c.gender), Integer) +
                cast(func.min(table.c.gender), Integer),
                VARCHAR).label('gender')
        ] + table_card_group,
                          from_obj=table,
                          group_by=[table.c.group_case_id] + table_card_group +
                          having_group_by,
                          having=group_having),
                   name='y')
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select([sqlalchemy.func.count(s1.c.doc_id).label(self.key)] +
                      group_by,
                      group_by=[s1.c.maxmin] + filter_cols + group_by,
                      having=AND(having).build_expression(s1),
                      from_obj=join(s1, s2, s1.c.group_case_id ==
                                    s2.c.group_case_id)).params(filter_values)
Пример #40
0
 def identity_join_r(cls):
     return join(cls.identity_table, cls,
         (cls.identity_table.c.id == cls.base_id) & (cls.tombstone_date == None))
Пример #41
0
 def identity_join_r(cls):
     return join(cls.identity_table, cls,
                 (cls.identity_table.c.id == cls.base_id) &
                 (cls.tombstone_date == None))
Пример #42
0
 def _loadAttributes(self):
     for row in self._connection.execute(ex.select([md.InventoryClasses.c.class_namespace, md.InventoryClasses.c.class_name, md.InventoryClassAttributes]).select_from(ex.join(md.InventoryClassAttributes, md.InventoryClasses, md.InventoryClassAttributes.c.class_id == md.InventoryClasses.c.class_id)).where(and_(md.InventoryClasses.c.class_namespace == self._namespace, md.InventoryClasses.c.class_name == self._class_name))):
         self._classId = row["class_id"]
         self._attributes[row["attr_key"]] = {}
         for i in ["attr_name", "attr_type", "attr_default", "attr_mandatory"]:
             self._attributes[row["attr_key"]][i] = row[i]
Пример #43
0
    def generateCountsJs(self, target_fn):
        n_markers_in = (self.session.query(
            Species, func.count(distinct(
                Sequence.id_ortholog))).join(Sequence).group_by(Species).all())
        n_species = len(n_markers_in)  # how many species are there?

        out_str = "var species_key = [\n"
        out_str += ',\n'.join([
            "\t['%s', '%s', %d]" % (chr(64 + x[0].id), x[0].name, x[1])
            for x in n_markers_in
        ])
        out_str += "\n];\n\n"

        out_str += "var marker_sets_input = [ "

        # output single-species marker counts
        out_str += "{sets: ['%s'], size: %d}" % (
            chr(64 + n_markers_in[0][0].id), n_markers_in[0][1])
        for rec in n_markers_in[1:]:
            out_str += ",\n\t{sets: ['%s'], size: %d}" % (chr(64 + rec[0].id),
                                                          rec[1])

        # determine overlaps
        from sqlalchemy.sql.expression import alias, join, select
        seq_tab = Base.metadata.tables['sequences']
        aka = [alias(seq_tab) for n in range(n_species)]

        for n_levels in range(2, n_species + 1):
            cols = [
                func.count(distinct(aka[0].c.id_ortholog)), aka[0].c.id_species
            ]
            joins = aka[0]

            # build selected columns and joins
            for i in range(1, n_levels):
                cols += [aka[i].c.id_species]
                joins = join(joins, aka[i],
                             aka[0].c.id_ortholog == aka[i].c.id_ortholog)
            # create select statement on columns and joins
            stmt = select(cols).select_from(joins)
            # add filtering clauses
            for i in range(1, n_levels):
                stmt = stmt.where(
                    aka[i - 1].c.id_species < aka[i].c.id_species)
            # add grouping clauses
            for i in range(n_levels):
                stmt = stmt.group_by(aka[i].c.id_species)
            # execute query statement
            result = self.session.execute(stmt).fetchall()
            for rec in result:
                out_str += ",\n\t{sets: [%s], size: %d}" % (','.join(
                    ["'%s'" % chr(64 + rec[i + 1])
                     for i in range(n_levels)]), rec[0])

        out_str += "\n];\n\n"

        # load number of markers found for each species
        n_markers_out = (self.session.query(
            Species.name,
            func.count(distinct(PrimerSet.id_ortholog))).outerjoin(
                PrimerSet, Species.primer_sets).group_by(Species).all())
        out_str += "var species_markers_output = [\n"
        out_str += "\t{name: '%s', value: %d}" % (n_markers_out[0][0],
                                                  n_markers_out[0][1])
        for rec in n_markers_out[1:]:
            out_str += ",\n\t{name: '%s', value: %d}" % (rec[0], rec[1])
        out_str += "\n];\n"

        with open(target_fn, 'wt') as outfile:
            print(outfile.name)
            outfile.write(out_str)
Пример #44
0
    def generateCountsJs(self, target_fn):
        n_markers_in = (self.session.query(
            Species,
            func.count(distinct(Sequence.id_ortholog)))
            .join(Sequence)
            .group_by(Species)
            .all()
        )
        n_species = len(n_markers_in) # how many species are there?

        out_str  = "var species_key = [\n"
        out_str += ',\n'.join(["\t['%s', '%s', %d]" % (chr(64+x[0].id), x[0].name, x[1]) for x in n_markers_in])
        out_str += "\n];\n\n"

        out_str += "var marker_sets_input = [ "

        # output single-species marker counts
        out_str += "{sets: ['%s'], size: %d}" % (chr(64+n_markers_in[0][0].id), n_markers_in[0][1])
        for rec in n_markers_in[1:]:
            out_str += ",\n\t{sets: ['%s'], size: %d}" % (chr(64+rec[0].id), rec[1])

        # determine overlaps
        from sqlalchemy.sql.expression import alias, join, select
        seq_tab = Base.metadata.tables['sequences']
        aka = [alias(seq_tab) for n in range(n_species)]

        for n_levels in range(2,n_species+1):
            cols = [func.count(distinct(aka[0].c.id_ortholog)), aka[0].c.id_species]
            joins = aka[0]

            # build selected columns and joins
            for i in range(1,n_levels):
                cols += [aka[i].c.id_species]
                joins = join(joins, aka[i], aka[0].c.id_ortholog == aka[i].c.id_ortholog)
            # create select statement on columns and joins
            stmt = select(cols).select_from(joins)
            # add filtering clauses
            for i in range(1,n_levels):
                stmt = stmt.where(aka[i-1].c.id_species < aka[i].c.id_species)
            # add grouping clauses
            for i in range(n_levels):
                stmt = stmt.group_by(aka[i].c.id_species)
            # execute query statement
            result = self.session.execute(stmt).fetchall()
            for rec in result:
                out_str += ",\n\t{sets: [%s], size: %d}" % (','.join(["'%s'" % chr(64+rec[i+1]) for i in range(n_levels)]), rec[0])

        out_str += "\n];\n\n"

        # load number of markers found for each species
        n_markers_out = (self.session.query(
            Species.name,
            func.count(distinct(PrimerSet.id_ortholog)))
            .outerjoin(PrimerSet, Species.primer_sets)
            .group_by(Species)
            .all()
        )
        out_str += "var species_markers_output = [\n"
        out_str += "\t{name: '%s', value: %d}" % (n_markers_out[0][0], n_markers_out[0][1])
        for rec in n_markers_out[1:]:
            out_str += ",\n\t{name: '%s', value: %d}" % (rec[0], rec[1])
        out_str += "\n];\n"

        with open(target_fn, 'wt') as outfile:
            print(outfile.name)
            outfile.write(out_str)
Пример #45
0
 def join_(self, *args, **kwargs):
     return join(*args, **kwargs)
Пример #46
0
                    ob, 'first_name'), getattr(ob, 'last_name'))
        terms = []
        for t in tdict.keys():
            terms.append(
                vocabulary.SimpleTerm(value=t, token=t, title=tdict[t]))
        return vocabulary.SimpleVocabulary(terms)


# !+MODELS(mr, oct-2011) shouldn't this be elsewhere?
class PartyMembership(object):
    pass


party_membership = sql.join(
    schema.political_parties, schema.groups,
    schema.political_parties.c.party_id == schema.groups.c.group_id).join(
        schema.user_group_memberships,
        schema.groups.c.group_id == schema.user_group_memberships.c.group_id)

mapper(PartyMembership, party_membership)


class PIAssignmentSource(SpecializedSource):
    def constructQuery(self, context):
        session = Session()
        trusted = removeSecurityProxy(context)
        parliament_id = self._get_parliament_id(context)
        item_id = getattr(context, self.value_field, None)
        trusted = removeSecurityProxy(context)
        existing_item_ids = [assn.item_id for assn in trusted.values()]
        if item_id:
Пример #47
0
    def _build_query(self, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil.build_expression())
            elif fil.column_name not in [
                "group",
                "gender",
                "group_leadership",
                "disaggregate_by",
                "table_card_group_by",
            ]:
                if fil.column_name not in external_cols and fil.column_name != "maxmin":
                    filter_cols.append(fil.column_name)
                having.append(fil.build_expression())

        group_having = ""
        having_group_by = []
        if ("disaggregate_by" in filter_values and filter_values["disaggregate_by"] == "group") or (
            "table_card_group_by" in filter_values and filter_values["table_card_group_by"]
        ):
            group_having = "group_leadership='Y'"
            having_group_by.append("group_leadership")
        elif "group_leadership" in filter_values and filter_values["group_leadership"]:
            group_having = (
                "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :group_leadership and group_leadership='Y'"
            )
            having_group_by.append("group_leadership")
            filter_cols.append("group_leadership")
        elif "gender" in filter_values and filter_values["gender"]:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if "group_name" in self.group_by:
            table_card_group.append("group_name")
        s1 = alias(
            select(
                ["doc_id", "group_id", "MAX(prop_value) + MIN(prop_value) as maxmin"] + filter_cols + external_cols,
                from_obj='"fluff_FarmerRecordFluff"',
                group_by=["doc_id", "group_id"] + filter_cols + external_cols,
            ),
            name="x",
        )
        s2 = alias(
            select(
                ["group_id", "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) as gender"] + table_card_group,
                from_obj='"fluff_FarmerRecordFluff"',
                group_by=["group_id"] + table_card_group + having_group_by,
                having=group_having,
            ),
            name="y",
        )
        return select(
            ["COUNT(x.doc_id) as %s" % self.key] + self.group_by,
            group_by=["maxmin"] + filter_cols + self.group_by,
            having=" and ".join(having),
            from_obj=join(s1, s2, s1.c.group_id == s2.c.group_id),
        ).params(filter_values)
Пример #48
0
def get_data(project_name, file_name):
    file = db.session.query(File).select_from(join(File, Project)).filter(
        File.name == file_name, Project.name == project_name,
        Project.user_id == current_user.id).one()
    print(file)
    return send_file(file.get_path())
Пример #49
0
    def __init__(self,
                 db,
                 cohortClass,
                 dtcols=('feature_start_date', 'person_start_date',
                         'person_end_date'),
                 id_col='person_id',
                 time_col='feature_start_date',
                 feature_col='concept_name',
                 unique_id_col='example_id'):

        self._db = db
        self.cohortClass = cohortClass
        self._dtcols = dtcols

        self.id_col = id_col
        self.time_col = time_col
        self.feature_col = feature_col
        self.unique_id_col = unique_id_col

        self._temporal_features = []
        self._nontemporal_features = []

        self._temporal_feature_names = []
        self._temporal_feature_names_set = set()

        self._nontemporal_feature_names = []
        self._nontemporal_feature_names_set = set()

        self._spm_arr = []
        self.id_map = None
        self.id_map_rev = None
        self.concept_map = None
        self.concept_map_rev = None
        self.time_map = None
        self.time_map_rev = None

        # Map InspectOMOP tables to names we were already using with our own class definitions.
        ConditionOccurrence = db.inspector.tables['condition_occurrence']
        ProcedureOccurrence = db.inspector.tables['procedure_occurrence']
        DrugExposure = db.inspector.tables['drug_exposure']
        VisitOccurrence = db.inspector.tables['visit_occurrence']
        Provider = db.inspector.tables['provider']
        Concept = db.inspector.tables['concept']

        # SqlAlchemy feature definitions
        condition_features = select([
            self.cohortClass.example_id, ConditionOccurrence.person_id,
            (cast(ConditionOccurrence.condition_concept_id, String) +
             ' - condition - ' +
             case([(Concept.concept_name == None, 'no match')], else_ = Concept.concept_name)).label('concept_name'),
            ConditionOccurrence.condition_start_datetime.label(dtcols[0]),
            self.cohortClass.start_date.label(dtcols[1]),
            self.cohortClass.end_date.label(dtcols[2])
        ])\
        .select_from(
            join(ConditionOccurrence, self.cohortClass, ConditionOccurrence.person_id == self.cohortClass.person_id)\
            .join(Concept, Concept.concept_id == ConditionOccurrence.condition_concept_id)
        )

        procedure_features = select([
            self.cohortClass.example_id, ProcedureOccurrence.person_id,
            (cast(ProcedureOccurrence.procedure_concept_id, String) +
             ' - procedure - ' +
             case([(Concept.concept_name == None, 'no match')], else_ = Concept.concept_name)).label('concept_name'),
            ProcedureOccurrence.procedure_datetime.label(dtcols[0]),
            self.cohortClass.start_date.label(dtcols[1]),
            self.cohortClass.end_date.label(dtcols[2])
        ])\
        .select_from(
            join(ProcedureOccurrence, self.cohortClass, ProcedureOccurrence.person_id == self.cohortClass.person_id)\
            .join(Concept, Concept.concept_id == ProcedureOccurrence.procedure_concept_id)
        )

        drug_features = select([
            self.cohortClass.example_id, DrugExposure.person_id,
            (cast(DrugExposure.drug_concept_id, String) +
             ' - drug - ' +
             case([(Concept.concept_name == None, 'no match')], else_ = Concept.concept_name)).label('concept_name'),
            DrugExposure.drug_exposure_start_datetime.label(dtcols[0]),
            self.cohortClass.start_date.label(dtcols[1]),
            self.cohortClass.end_date.label(dtcols[2])
        ])\
        .select_from(
            join(DrugExposure, self.cohortClass, DrugExposure.person_id == self.cohortClass.person_id)\
            .join(Concept, Concept.concept_id == DrugExposure.drug_concept_id)
        )

        specialty_features = select([
            self.cohortClass.example_id, VisitOccurrence.person_id,
            (cast(Provider.specialty_concept_id, String) +
             ' - specialty - ' +
             case([(Concept.concept_name == None, 'no match')], else_ = Concept.concept_name)).label('concept_name'),
            VisitOccurrence.visit_start_date.label(dtcols[0]),
            self.cohortClass.start_date.label(dtcols[1]),
            self.cohortClass.end_date.label(dtcols[2])
        ])\
        .select_from(
            join(VisitOccurrence, self.cohortClass, VisitOccurrence.person_id == self.cohortClass.person_id)\
            .join(Provider, VisitOccurrence.provider_id == Provider.provider_id)\
            .join(Concept, Concept.concept_id == Provider.specialty_concept_id)
        )

        self.feature_dict = {
            'Conditions': {
                'sql': condition_features,
                'is_temporal': True
            },
            'Procedures': {
                'sql': procedure_features,
                'is_temporal': True
            },
            'Drugs': {
                'sql': drug_features,
                'is_temporal': True
            },
            'Specialty': {
                'sql': specialty_features,
                'is_temporal': True
            }
        }
Пример #50
0
print('time table:      {columns}'.format(columns=time.columns))
print('host table:      {columns}'.format(columns=host.columns))
print('account table:   {columns}'.format(columns=account.columns))
print('host_time table: {columns}'.format(columns=host_time.columns))

# Select all accounts.
print('Select all accounts:')
stmt = select([account.columns.id])
results = connection.execute(stmt).fetchall()
print('Number of accounts: {amnt}'.format(amnt=len(results)))

# Select all accounts and times & group by account.
user_accounts = account.columns.id.label('users')
total_seconds = func.sum(time.columns.seconds).label('seconds')
joined_on = join(
    account, time,
    account.columns.id == time.columns.account_id
)
stmt = select([user_accounts, total_seconds]) \
    .select_from(joined_on) \
    .group_by(user_accounts) \
    .order_by(desc(total_seconds))
results = connection.execute(stmt).fetchall()

# Create Pandas DataFrame from results.
index_value = list(range(1, len(results) + 1))
columns = results[0].keys()

df = pd.DataFrame(
    data=results,
    index=index_value,
    columns=columns
Пример #51
0
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import String
from blueshed.model_helpers.sqla_views import view
from sqlalchemy.sql.expression import select, join
from sqlalchemy.sql.functions import func


"""
    Because the library requires attributes in Person and Permission
    you can extend them like this:
"""

Person._token = Column(String(80))


'''
    An example View
'''
q = select([Person.id.label('id'), 
            Person.email.label('email'),
            func.count(Permission.id).label('permission_count')]).\
            select_from(join(Person,
                             person_permissions_permission,
                             Person.id==person_permissions_permission.c.permissions_id).\
                        join(Permission,
                             Permission.id==person_permissions_permission.c.permission_id)).\
            group_by(Permission.id)

PersonReport = view("person_report", Base.metadata, q)
                        
Пример #52
0
    def update_idnum_index_for_upload(
            cls,
            session: SqlASession,
            indexed_at_utc: Pendulum,
            tablechanges: UploadTableChanges) -> None:
        """
        Updates the index for a device's upload.

        - Deletes index entries for records that are on the way out.
        - Creates index entries for records that are on the way in.
        - Should be called after both the Patient and PatientIdNum tables are
          committed; see special ordering in
          :func:`camcops_server.cc_modules.client_api.commit_all`.

        Args:
            session:
                an SQLAlchemy Session
            indexed_at_utc:
                current time in UTC
            tablechanges:
                a :class:`camcops_server.cc_modules.cc_client_api_core.UploadTableChanges`
                object describing the changes to a table
        """  # noqa
        # noinspection PyUnresolvedReferences
        indextable = PatientIdNumIndexEntry.__table__  # type: Table
        indexcols = indextable.columns
        # noinspection PyUnresolvedReferences
        idnumtable = PatientIdNum.__table__  # type: Table
        idnumcols = idnumtable.columns
        # noinspection PyUnresolvedReferences
        patienttable = Patient.__table__  # type: Table
        patientcols = patienttable.columns

        # Delete the old
        removal_pks = tablechanges.idnum_delete_index_pks
        if removal_pks:
            log.debug("Deleting old ID number indexes: server PKs {}",
                      removal_pks)
            session.execute(
                indextable.delete()
                .where(indextable.c.idnum_pk.in_(removal_pks))
            )

        # Create the new
        addition_pks = tablechanges.idnum_add_index_pks
        if addition_pks:
            log.debug("Adding ID number indexes: server PKs {}", addition_pks)
            # noinspection PyPep8,PyProtectedMember
            session.execute(
                indextable.insert().from_select(
                    # Target:
                    [indexcols.idnum_pk,
                     indexcols.indexed_at_utc,
                     indexcols.patient_pk,
                     indexcols.which_idnum,
                     indexcols.idnum_value],
                    # Source:
                    (
                        select([idnumcols._pk,
                                literal(indexed_at_utc),
                                patientcols._pk,
                                idnumcols.which_idnum,
                                idnumcols.idnum_value])
                        .select_from(
                            join(
                                idnumtable,
                                patienttable,
                                and_(
                                    idnumcols._device_id == patientcols._device_id,  # noqa
                                    idnumcols._era == patientcols._era,
                                    idnumcols.patient_id == patientcols.id,
                                )
                            )
                        )
                        .where(idnumcols._pk.in_(addition_pks))
                        .where(patientcols._current == True)
                    )
                )
            )
    dump_query,
    production_session,
    LicensePool,
    DataSource,
    Edition,
    PresentationCalculationPolicy,
)

# Find all books where the edition associated with the LicensePool has a
# different medium from the presentation edition.
_db = production_session()

# Find all the LicensePools that aren't books.
subq = select([LicensePool.id]).select_from(
    join(LicensePool, Edition,
         and_(LicensePool.data_source_id==Edition.data_source_id,
              LicensePool.identifier_id==Edition.primary_identifier_id)
    )
).where(Edition.medium != Edition.BOOK_MEDIUM)

# Of those LicensePools, find every LicensePool whose presentation
# edition says it _is_ a book.
qu = _db.query(LicensePool).join(
    Edition, LicensePool.presentation_edition_id==Edition.id
).filter(LicensePool.id.in_(subq)).filter(Edition.medium == Edition.BOOK_MEDIUM)

print "Recalculating presentation edition for %d LicensePools." % qu.count()

for lp in qu:
    # Recalculate that LicensePool's presentation edition, and then its
    # work presentation.
    lp.set_presentation_edition()
Пример #54
0
        for t in tdict.keys():
            terms.append(
                vocabulary.SimpleTerm(
                    value = t, 
                    token = t,
                    title = tdict[t]
                   ))
        return vocabulary.SimpleVocabulary(terms)


# !+MODELS(mr, oct-2011) shouldn't this be elsewhere?
class PartyMembership(object):
    pass

party_membership = sql.join(schema.political_parties, schema.groups,
        schema.political_parties.c.party_id == schema.groups.c.group_id
    ).join(schema.user_group_memberships,
        schema.groups.c.group_id == schema.user_group_memberships.c.group_id)

mapper(PartyMembership, party_membership)


class PIAssignmentSource(SpecializedSource):
    
    def constructQuery(self, context):
        session= Session()
        trusted=removeSecurityProxy(context)
        parliament_id = self._get_parliament_id(context)
        item_id = getattr(context, self.value_field, None)
        trusted = removeSecurityProxy(context)
        existing_item_ids = [assn.item_id for assn in trusted.values()]
        if item_id:
Пример #55
0
Build.dependency_changes = relationship(
    AppliedChange,
    backref='build',
    primaryjoin=(Build.id == AppliedChange.build_id),
    order_by=AppliedChange.distance.nullslast(),
    passive_deletes=True,
)
ResolutionChange.problems = relationship(
    ResolutionProblem,
    backref='result',
    passive_deletes=True,
)
PackageGroup.package_count = column_property(
    select([func.count()],
           PackageGroupRelation.group_id == PackageGroup.id,
           join(BasePackage, PackageGroupRelation,
                PackageGroupRelation.base_id == BasePackage.id))
    .where(~BasePackage.all_blocked)
    .correlate(PackageGroup).as_scalar(),
    deferred=True)
# pylint: disable=E1101
BasePackage.groups = relationship(
    PackageGroup,
    secondary=PackageGroupRelation.__table__,
    secondaryjoin=(PackageGroup.id == PackageGroupRelation.group_id),
    primaryjoin=(PackageGroupRelation.base_id == BasePackage.id),
    order_by=PackageGroup.name,
    passive_deletes=True,
)
Package.groups = relationship(
    PackageGroup,
    secondary=PackageGroupRelation.__table__,