def test_annotate_unique_traversal(self):
        """test that items are copied only once during
        annotate, deannotate traversal

        #2453
        """
        table1 = table('table1', column('x'))
        table2 = table('table2', column('y'))
        a1 = table1.alias()
        s = select([a1.c.x]).select_from(
                a1.join(table2, a1.c.x==table2.c.y)
            )

        for sel in (
            sql_util._deep_deannotate(s),
            sql_util._deep_annotate(s, {'foo':'bar'}),
            visitors.cloned_traverse(s, {}, {}),
            visitors.replacement_traverse(s, {}, lambda x:None)
        ):
            # the columns clause isn't changed at all
            assert sel._raw_columns[0].table is a1
            # the from objects are internally consistent,
            # i.e. the Alias at position 0 is the same
            # Alias in the Join object in position 1
            assert sel._froms[0] is sel._froms[1].left
            eq_(str(s), str(sel))
def _insert_operation_form_translation():
    tb = table(
        'operation_form_translation',
        column('id', Integer),
        column('locale', String),
        column('name', String)
    )

    columns = [c.name for c in tb.columns]
    data = [
        (102, 'en', 'Execution'),
        (102, 'pt', 'Execução'),

        (103, 'en', 'Execution'),
        (103, 'pt', 'Execução'),

        (104, 'en', 'Execution'),
        (104, 'pt', 'Execução'),

        (105, 'en', 'Execution'),
        (105, 'pt', 'Execução'),

        (106, 'en', 'Execution'),
        (106, 'pt', 'Execução'),

        (107, 'en', 'Execution'),
        (107, 'pt', 'Execução'),

    ]
    rows = [dict(zip(columns, row)) for row in data]

    op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
    tb = table(
        'operation_form_field_translation',
        column('id', Integer),
        column('locale', String),
        column('label', String),
        column('help', String), )

    columns = ('id', 'locale', 'label', 'help')
    data = [
		#normalize
		(3078, 'en', 'Attributes', 'Attributes'),
		(3078, 'pt', 'Attributos', 'Colunas para serem consideradas'),
		(3079, 'en', 'Alias', 'Name of the new column'),
		(3079, 'pt', 'Alias', 'Nome para a nova coluna criada'),
		(3080, 'en', 'Normalization Type',   'Type of Normalization to perform.'),
		(3080, 'pt', 'Tipo de Normalização', 'Tipo de Normalização para ser feita.'),

        #feature-indexer'
		(3027,'en','Attributes', 'Attributes (features) to be indexed'),
		(3027,'pt','Atributos', 'Atributos (features) a ser indexados'),
		(3028,'en','Indexer type', 'Indexer type'),
		(3028,'pt','Tipo de indexador', 'Tipo de indexador'),
		(3029,'en','Name for new indexed attribute(s)', 'Name for new indexed attribute(s)'),
		(3029,'pt','Nome para novo(s) atributo(s) indexado(s)', 'Nome para novo(s) atributo(s) indexado(s)'),
	]
    rows = [dict(zip(columns, row)) for row in data]
    op.bulk_insert(tb, rows)
    def test_annotate_fromlist_preservation(self):
        """test the FROM list in select still works
        even when multiple annotate runs have created
        copies of the same selectable

        #2453, continued

        """
        table1 = table('table1', column('x'))
        table2 = table('table2', column('y'))
        a1 = table1.alias()
        s = select([a1.c.x]).select_from(
                a1.join(table2, a1.c.x==table2.c.y)
            )

        assert_s = select([select([s])])
        for fn in (
            sql_util._deep_deannotate,
            lambda s: sql_util._deep_annotate(s, {'foo':'bar'}),
            lambda s:visitors.cloned_traverse(s, {}, {}),
            lambda s:visitors.replacement_traverse(s, {}, lambda x:None)
        ):

            sel = fn(select([fn(select([fn(s)]))]))
            eq_(str(assert_s), str(sel))
def _insert_operation_operation_form():
    tb = table(
        'operation_operation_form',
        column('operation_id', Integer),
        column('operation_form_id', Integer))

    columns = [c.name for c in tb.columns]
    data = [
        [REGRESSION_MODEL, 102],
        [ISOTONIC_REGRESSION, 103],
        [AFT_SURVIVAL_REGRESSION, 104],
        [GBT_REGRESSOR, 105],
        [RANDOM_FOREST_REGRESSOR, 106],
        [GENERALIZED_LINEAR_REGRESSOR, 107],

        [REGRESSION_MODEL, 41],
        [ISOTONIC_REGRESSION, 41],
        [AFT_SURVIVAL_REGRESSION, 41],
        [GBT_REGRESSOR, 41],
        [RANDOM_FOREST_REGRESSOR, 41],
        [GENERALIZED_LINEAR_REGRESSOR, 41],
    ]
    rows = [dict(zip(columns, row)) for row in data]

    op.bulk_insert(tb, rows)
def upgrade():
    # Insert G-Cloud 9 lot records

    lot_table = table(
        'lots',
        column('name', sa.String),
        column('slug', sa.String),
        column('one_service_limit', sa.Boolean),
        column('data', sa.JSON)
    )

    op.bulk_insert(lot_table, [
        {
            'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
            'data': {"unitSingular": "service", "unitPlural": "services"}
        },
        {
            'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
            'data': {"unitSingular": "service", "unitPlural": "services"}
        },
        {
            'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
            'data': {"unitSingular": "service", "unitPlural": "services"}
        },
    ])
def _insert_operation_operation_form():
    tb = table(
        'operation_operation_form',
        column('operation_id', Integer),
        column('operation_form_id', Integer))

    columns = ('operation_id', 'operation_form_id')
    data = [
        (3022, 39),#normalize
		(3022, 40),
		(3022, 41),
		(3022, 43),
		(3022, 110),
		(3022, 3022),

		(3012, 39),#feature-indexer'
		(3012, 40),
		(3012, 41),
		(3012, 43),
		(3012, 110),
		(3012,3012),

    ]

    rows = [dict(zip(columns, row)) for row in data]
    op.bulk_insert(tb, rows)
예제 #8
0
 def test_update_returning(self):
     table1 = table(
         'mytable',
         column('myid', Integer),
         column('name', String(128)),
         column('description', String(128)))
     u = update(
         table1,
         values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
     self.assert_compile(u,
                         'UPDATE mytable SET name=:name OUTPUT '
                         'inserted.myid, inserted.name')
     u = update(table1, values=dict(name='foo')).returning(table1)
     self.assert_compile(u,
                         'UPDATE mytable SET name=:name OUTPUT '
                         'inserted.myid, inserted.name, '
                         'inserted.description')
     u = update(
         table1,
         values=dict(
             name='foo')).returning(table1).where(table1.c.name == 'bar')
     self.assert_compile(u,
                         'UPDATE mytable SET name=:name OUTPUT '
                         'inserted.myid, inserted.name, '
                         'inserted.description WHERE mytable.name = '
                         ':name_1')
     u = update(table1, values=dict(name='foo'
                                    )).returning(func.length(table1.c.name))
     self.assert_compile(u,
                         'UPDATE mytable SET name=:name OUTPUT '
                         'LEN(inserted.name) AS length_1')
예제 #9
0
    def test_limit_offset_with_correlated_order_by(self):
        t1 = table('t1', column('x', Integer), column('y', Integer))
        t2 = table('t2', column('x', Integer), column('y', Integer))

        order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
        s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
            .limit(10).offset(20)

        self.assert_compile(
            s,
            "SELECT anon_1.x, anon_1.y "
            "FROM (SELECT t1.x AS x, t1.y AS y, "
            "ROW_NUMBER() OVER (ORDER BY "
            "(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
            ") AS mssql_rn "
            "FROM t1 "
            "WHERE t1.x = :x_1) AS anon_1 "
            "WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
            checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
        )

        c = s.compile(dialect=mssql.MSDialect())
        eq_(len(c._result_columns), 2)
        assert t1.c.x in set(c._create_result_map()['x'][1])
        assert t1.c.y in set(c._create_result_map()['y'][1])
예제 #10
0
    def test_aliases_schemas(self):
        metadata = MetaData()
        table1 = table('mytable',
            column('myid', Integer),
            column('name', String),
            column('description', String),
        )

        table4 = Table(
            'remotetable', metadata,
            Column('rem_id', Integer, primary_key=True),
            Column('datatype_id', Integer),
            Column('value', String(20)),
            schema = 'remote_owner'
        )

        s = table4.select()
        c = s.compile(dialect=self.__dialect__)
        assert table4.c.rem_id in set(c.result_map['rem_id'][1])

        s = table4.select(use_labels=True)
        c = s.compile(dialect=self.__dialect__)
        print c.result_map
        assert table4.c.rem_id in set(c.result_map['remote_owner_remotetable_rem_id'][1])

        self.assert_compile(table4.select(), "SELECT remotetable_1.rem_id, remotetable_1.datatype_id, remotetable_1.value FROM remote_owner.remotetable AS remotetable_1")
        
        self.assert_compile(table4.select(use_labels=True), "SELECT remotetable_1.rem_id AS remote_owner_remotetable_rem_id, remotetable_1.datatype_id AS remote_owner_remotetable_datatype_id, remotetable_1.value AS remote_owner_remotetable_value FROM remote_owner.remotetable AS remotetable_1")

        self.assert_compile(table1.join(table4, table1.c.myid==table4.c.rem_id).select(), "SELECT mytable.myid, mytable.name, mytable.description, remotetable_1.rem_id, remotetable_1.datatype_id, remotetable_1.value FROM mytable JOIN remote_owner.remotetable AS remotetable_1 ON remotetable_1.rem_id = mytable.myid")
def _insert_operation_form_translation():
    tb = table(
        'operation_form_translation',
        column('id', Integer),
        column('locale', String),
        column('name', String))

    columns = ('id', 'locale', 'name')
    data = [
		(3024, 'en', 'Execution'),
		(3024, 'pt', 'Execução'),
        (3025, 'en', 'Execution'),
		(3025, 'pt', 'Execução'),
        (3026, 'en', 'Execution'),
		(3026, 'pt', 'Execução'),
        (3027, 'en', 'Execution'),
		(3027, 'pt', 'Execução'),
        (3028, 'en', 'Execution'),
		(3028, 'pt', 'Execução'),
        (3029, 'en', 'Execution'),
		(3029, 'pt', 'Execução'),
        (3030, 'en', 'Execution'),
		(3030, 'pt', 'Execução'),
        (3031, 'en', 'Execution'),
		(3031, 'pt', 'Execução'),
    ]
    rows = [dict(zip(columns, row)) for row in data]

    op.bulk_insert(tb, rows)
예제 #12
0
    def test_recursive(self):
        parts = table('parts',
            column('part'),
            column('sub_part'),
            column('quantity'),
        )

        included_parts = select([
                            parts.c.sub_part,
                            parts.c.part,
                            parts.c.quantity]).\
                            where(parts.c.part=='our part').\
                                cte(recursive=True)

        incl_alias = included_parts.alias()
        parts_alias = parts.alias()
        included_parts = included_parts.union(
            select([
                parts_alias.c.part,
                parts_alias.c.sub_part,
                parts_alias.c.quantity]).\
                where(parts_alias.c.part==incl_alias.c.sub_part)
            )

        s = select([
            included_parts.c.sub_part,
            func.sum(included_parts.c.quantity).label('total_quantity')]).\
            select_from(included_parts.join(
                    parts,included_parts.c.part==parts.c.part)).\
            group_by(included_parts.c.sub_part)
        self.assert_compile(s,
                "WITH RECURSIVE anon_1(sub_part, part, quantity) "
                "AS (SELECT parts.sub_part AS sub_part, parts.part "
                "AS part, parts.quantity AS quantity FROM parts "
                "WHERE parts.part = :part_1 UNION SELECT parts_1.part "
                "AS part, parts_1.sub_part AS sub_part, parts_1.quantity "
                "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
                "WHERE parts_1.part = anon_2.sub_part) "
                "SELECT anon_1.sub_part, "
                "sum(anon_1.quantity) AS total_quantity FROM anon_1 "
                "JOIN parts ON anon_1.part = parts.part "
                "GROUP BY anon_1.sub_part"
            )

        # quick check that the "WITH RECURSIVE" varies per
        # dialect
        self.assert_compile(s,
                "WITH anon_1(sub_part, part, quantity) "
                "AS (SELECT parts.sub_part AS sub_part, parts.part "
                "AS part, parts.quantity AS quantity FROM parts "
                "WHERE parts.part = :part_1 UNION SELECT parts_1.part "
                "AS part, parts_1.sub_part AS sub_part, parts_1.quantity "
                "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
                "WHERE parts_1.part = anon_2.sub_part) "
                "SELECT anon_1.sub_part, "
                "sum(anon_1.quantity) AS total_quantity FROM anon_1 "
                "JOIN parts ON anon_1.part = parts.part "
                "GROUP BY anon_1.sub_part",
                dialect=mssql.dialect()
            )
예제 #13
0
    def test_cube_operators(self):

        t = table('t', column('value'),
                  column('x'), column('y'), column('z'), column('q'))

        stmt = select([func.sum(t.c.value)])

        self.assert_compile(
            stmt.group_by(func.cube(t.c.x, t.c.y)),
            "SELECT sum(t.value) AS sum_1 FROM t GROUP BY CUBE(t.x, t.y)"
        )

        self.assert_compile(
            stmt.group_by(func.rollup(t.c.x, t.c.y)),
            "SELECT sum(t.value) AS sum_1 FROM t GROUP BY ROLLUP(t.x, t.y)"
        )

        self.assert_compile(
            stmt.group_by(
                func.grouping_sets(t.c.x, t.c.y)
            ),
            "SELECT sum(t.value) AS sum_1 FROM t "
            "GROUP BY GROUPING SETS(t.x, t.y)"
        )

        self.assert_compile(
            stmt.group_by(
                func.grouping_sets(
                    sql.tuple_(t.c.x, t.c.y),
                    sql.tuple_(t.c.z, t.c.q),
                )
            ),
            "SELECT sum(t.value) AS sum_1 FROM t GROUP BY "
            "GROUPING SETS((t.x, t.y), (t.z, t.q))"
        )
예제 #14
0
파일: query.py 프로젝트: champeric/powa-web
    def get(self, database, query):
        bs = block_size.c.block_size
        stmt = powa_getstatdata_detailed_db()
        stmt = stmt.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        stmt = stmt.alias()
        from_clause = outerjoin(powa_statements, stmt,
                           and_(powa_statements.c.queryid == stmt.c.queryid, powa_statements.c.dbid == stmt.c.dbid))
        c = stmt.c
        rblk = mulblock(sum(c.shared_blks_read).label("shared_blks_read"))
        wblk = mulblock(sum(c.shared_blks_hit).label("shared_blks_hit"))
        stmt = (select([
            column("query"),
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            rblk,
            wblk,
            (rblk + wblk).label("total_blks")])
            .select_from(from_clause)
            .where(powa_statements.c.queryid == bindparam("query"))
            .group_by(column("query"), bs))

        value = self.execute(stmt, params={
            "query": query,
            "database": database,
            "from": self.get_argument("from"),
            "to": self.get_argument("to")
        })
        if value.rowcount < 1:
            self.render("xhr.html", content="No data")
            return
        self.render("database/query/detail.html", stats=value.first())
예제 #15
0
    def test_annotations(self):
        """test that annotated clause constructs use the 
        decorated class' compiler.

        """

        t1 = table('t1', column('c1'), column('c2'))

        dispatch = Select._compiler_dispatch
        try:
            @compiles(Select)
            def compile(element, compiler, **kw):
                return "OVERRIDE"

            s1 = select([t1])
            self.assert_compile(
                s1, "OVERRIDE"
            )
            self.assert_compile(
                s1._annotate({}),
                "OVERRIDE"
            )
        finally:
            Select._compiler_dispatch = dispatch
            if hasattr(Select, '_compiler_dispatcher'):
                del Select._compiler_dispatcher
예제 #16
0
    def by_stream(self, query, operand, maybe_negate):
        # type: (Query, str, ConditionTransform) -> Query
        stream = get_stream(operand, self.user_profile.realm)
        if stream is None:
            raise BadNarrowOperator('unknown stream ' + operand)

        if self.user_profile.realm.is_zephyr_mirror_realm:
            # MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
            # (unsocial, ununsocial, social.d, etc)
            m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
            # Since the regex has a `.+` in it and "" is invalid as a
            # stream name, this will always match
            assert(m is not None)
            base_stream_name = m.group(1)

            matching_streams = get_active_streams(self.user_profile.realm).filter(
                name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
            matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
            recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
            cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
            return query.where(maybe_negate(cond))

        recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
        cond = column("recipient_id") == recipient.id
        return query.where(maybe_negate(cond))
예제 #17
0
    def _by_search_tsearch(self, query, operand, maybe_negate):
        # type: (Query, str, ConditionTransform) -> Query
        tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
        ts_locs_array = func.ts_match_locs_array
        query = query.column(ts_locs_array(literal("zulip.english_us_search"),
                                           column("rendered_content"),
                                           tsquery).label("content_matches"))
        # We HTML-escape the subject in Postgres to avoid doing a server round-trip
        query = query.column(ts_locs_array(literal("zulip.english_us_search"),
                                           func.escape_html(column("subject")),
                                           tsquery).label("subject_matches"))

        # Do quoted string matching.  We really want phrase
        # search here so we can ignore punctuation and do
        # stemming, but there isn't a standard phrase search
        # mechanism in Postgres
        for term in re.findall('"[^"]+"|\S+', operand):
            if term[0] == '"' and term[-1] == '"':
                term = term[1:-1]
                term = '%' + connection.ops.prep_for_like_query(term) + '%'
                cond = or_(column("content").ilike(term),
                           column("subject").ilike(term))
                query = query.where(maybe_negate(cond))

        cond = column("search_tsvector").op("@@")(tsquery)
        return query.where(maybe_negate(cond))
예제 #18
0
    def test_insert_returning(self):
        dialect = postgresql.dialect()
        table1 = table('mytable',
                       column('myid', Integer),
                       column('name', String(128)),
                       column('description', String(128)),
                       )

        i = insert(
            table1,
            values=dict(
                name='foo')).returning(
            table1.c.myid,
            table1.c.name)
        self.assert_compile(i,
                            'INSERT INTO mytable (name) VALUES '
                            '(%(name)s) RETURNING mytable.myid, '
                            'mytable.name', dialect=dialect)
        i = insert(table1, values=dict(name='foo')).returning(table1)
        self.assert_compile(i,
                            'INSERT INTO mytable (name) VALUES '
                            '(%(name)s) RETURNING mytable.myid, '
                            'mytable.name, mytable.description',
                            dialect=dialect)
        i = insert(table1, values=dict(name='foo'
                                       )).returning(func.length(table1.c.name))
        self.assert_compile(i,
                            'INSERT INTO mytable (name) VALUES '
                            '(%(name)s) RETURNING length(mytable.name) '
                            'AS length_1', dialect=dialect)
예제 #19
0
def upgrade():
  op.add_column('task_groups', sa.Column('sort_index',
                sa.String(length=250), nullable=False))
  op.add_column('cycle_task_groups', sa.Column('sort_index',
                sa.String(length=250), nullable=False))

  workflows_table = table(
      'workflows',
      column('id', sa.Integer)
  )

  task_groups_table = table(
      'task_groups',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('workflow_id', sa.Integer),
  )

  cycles_table = table(
      'cycles',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('workflow_id', sa.Integer),
  )

  cycle_task_groups_table = table(
      'cycle_task_groups',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('cycle_id', sa.Integer),
  )

  _set_sort_index(workflows_table, 'workflow_id', task_groups_table)
  _set_sort_index(cycles_table, 'cycle_id', cycle_task_groups_table)
def upgrade():
    op.execute('SET search_path TO mineturer')
    conn = op.get_bind()
    res = conn.execute('select userid, password from users')
    results = res.fetchall()

    users = table(
        'users',
        column('bcrypt_pwd', sa.String),
        column('userid', sa.Integer)
    )

    for result in results:
        userid = result[0]
        pwd = result[1]
        bcrypt_pwd = bcrypt.hashpw(
            pwd.encode('utf-8'),
            bcrypt.gensalt()
        )
        op.execute(
            users.update().where(
                users.c.userid == op.inline_literal(userid)
            ).values({
                'bcrypt_pwd': op.inline_literal(bcrypt_pwd)}
            )
        )

    op.execute('SET search_path TO public')
예제 #21
0
 def test_update_returning(self):
     dialect = postgresql.dialect()
     table1 = table(
         'mytable',
         column(
             'myid', Integer),
         column(
             'name', String(128)),
         column(
             'description', String(128)))
     u = update(
         table1,
         values=dict(
             name='foo')).returning(
         table1.c.myid,
         table1.c.name)
     self.assert_compile(u,
                         'UPDATE mytable SET name=%(name)s '
                         'RETURNING mytable.myid, mytable.name',
                         dialect=dialect)
     u = update(table1, values=dict(name='foo')).returning(table1)
     self.assert_compile(u,
                         'UPDATE mytable SET name=%(name)s '
                         'RETURNING mytable.myid, mytable.name, '
                         'mytable.description', dialect=dialect)
     u = update(table1, values=dict(name='foo'
                                    )).returning(func.length(table1.c.name))
     self.assert_compile(
         u,
         'UPDATE mytable SET name=%(name)s '
         'RETURNING length(mytable.name) AS length_1',
         dialect=dialect)
def upgrade():
    userexternalid = table('userexternalid',
        column('updated_at', sa.DateTime),
        column('last_used_at', sa.DateTime))
    op.add_column('userexternalid', sa.Column('last_used_at', sa.DateTime(), nullable=True))
    op.execute(userexternalid.update().values(last_used_at=userexternalid.c.updated_at))
    op.alter_column('userexternalid', 'last_used_at', nullable=False)
예제 #23
0
    def test_noorderby_parameters_insubquery(self):
        """test that the ms-sql dialect does not include ORDER BY
        positional parameters in subqueries"""

        table1 = table(
            "mytable",
            column("myid", Integer),
            column("name", String),
            column("description", String),
        )

        q = select(
            [table1.c.myid, sql.literal('bar').label('c1')],
            order_by=[table1.c.name + '-']
        ).alias("foo")
        crit = q.c.myid == table1.c.myid
        dialect = mssql.dialect()
        dialect.paramstyle = "qmark"
        dialect.positional = True
        self.assert_compile(
            select(["*"], crit),
            "SELECT * FROM (SELECT mytable.myid AS "
            "myid, ? AS c1 FROM mytable) AS foo, mytable WHERE "
            "foo.myid = mytable.myid",
            dialect=dialect,
            checkparams={'param_1': 'bar'},
            # if name_1 is included, too many parameters are passed to dbapi
            checkpositional=('bar', )
        )
def upgrade():
    ### commands auto generated by Alembic  ###
    op.create_table('employees_attendance',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('attendanceDate', sa.Date(), nullable=False),
    sa.Column('arriveTime', sa.Time(), nullable=False),
    sa.Column('leaveTime', sa.Time(), nullable=False),
    sa.Column('employee_id', sa.Integer(), nullable=False),
    sa.Column('createdBy_id', sa.Integer(), nullable=True),
    sa.Column('issueDateTime', sa.DateTime(), nullable=True),
    sa.ForeignKeyConstraint(['createdBy_id'], ['users.id'], ondelete='CASCADE'),
    sa.ForeignKeyConstraint(['employee_id'], ['users.id'], ondelete='CASCADE'),
    sa.PrimaryKeyConstraint('id')
    )



    abilities_table = table('abilities',
    column('id', Integer),
    column('name', String),
    )

    op.bulk_insert(abilities_table,
        [
            {'name': "employeeAttendances.list"},
            {'name': "employeeAttendances.show"},
            {'name': "employeeAttendances.delete"},
            {'name': "employeeAttendances.update"},
            {'name': "employeeAttendances.create"},

            {'name': "feedbacks.list"},
            {'name': "feedbacks.show"}

        ]
    )
예제 #25
0
 def test_delete_extra_froms(self):
     t1 = table("t1", column("c1"))
     t2 = table("t2", column("c1"))
     q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
     self.assert_compile(
         q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
     )
예제 #26
0
def genericize_imapaccount():
    class ImapAccount_(Base):
        __table__ = Base.metadata.tables['imapaccount']

    # Get data from columns-to-be-dropped
    with session_scope() as db_session:
        results = db_session.query(ImapAccount_.id,
                                   ImapAccount_.imap_host).all()

    to_insert = [dict(id=r[0], imap_host=r[1]) for r in results]

    # Rename table, add new columns.
    op.rename_table('imapaccount', 'account')
    op.add_column('account', sa.Column('type', sa.String(16)))

    # Create new table, insert data
    # The table
    op.create_table('imapaccount',
                    sa.Column('imap_host', sa.String(512)),
                    sa.Column('id', sa.Integer()),
                    sa.ForeignKeyConstraint(['id'], ['account.id'],
                                            ondelete='CASCADE'),
                    sa.PrimaryKeyConstraint('id'))

    # The ad-hoc table for insert
    table_ = table('imapaccount',
                   column('imap_host', sa.String()),
                   column('id', sa.Integer))
    if to_insert:
        op.bulk_insert(table_, to_insert)

    # Drop columns now
    op.drop_column('account', 'imap_host')
예제 #27
0
def genericize_thread():
    class Thread_(Base):
        __table__ = Base.metadata.tables['thread']

    # Get data from columns-to-be-dropped
    with session_scope() as db_session:
        results = db_session.query(Thread_.id, Thread_.g_thrid).all()

    to_insert = [dict(id=r[0], g_thrid=r[1]) for r in results]

    # Add new columns
    op.add_column('thread', sa.Column('type', sa.String(16)))

    # Create new table, insert data
    # The table
    op.create_table('imapthread',
                    sa.Column('g_thrid', sa.BigInteger(), nullable=True,
                              index=True),
                    sa.Column('id', sa.Integer()),
                    sa.ForeignKeyConstraint(['id'], ['thread.id'],
                                            ondelete='CASCADE'),
                    sa.PrimaryKeyConstraint('id'))

    # The ad-hoc table for insert
    table_ = table('imapthread',
                   column('g_thrid', sa.BigInteger),
                   column('id', sa.Integer))
    if to_insert:
        op.bulk_insert(table_, to_insert)

    # Drop columns now
    op.drop_column('thread', 'g_thrid')
예제 #28
0
 def test_insert_returning(self):
     table1 = table(
         "mytable",
         column("myid", Integer),
         column("name", String(128)),
         column("description", String(128)),
     )
     i = insert(table1, values=dict(name="foo")).returning(
         table1.c.myid, table1.c.name
     )
     self.assert_compile(
         i,
         "INSERT INTO mytable (name) OUTPUT "
         "inserted.myid, inserted.name VALUES "
         "(:name)",
     )
     i = insert(table1, values=dict(name="foo")).returning(table1)
     self.assert_compile(
         i,
         "INSERT INTO mytable (name) OUTPUT "
         "inserted.myid, inserted.name, "
         "inserted.description VALUES (:name)",
     )
     i = insert(table1, values=dict(name="foo")).returning(
         func.length(table1.c.name)
     )
     self.assert_compile(
         i,
         "INSERT INTO mytable (name) OUTPUT "
         "LEN(inserted.name) AS length_1 VALUES "
         "(:name)",
     )
예제 #29
0
def downgrade_imapthread():
    class ImapThread_(Base):
        __table__ = Base.metadata.tables['imapthread']

    # Get data from table-to-be-dropped
    with session_scope() as db_session:
        results = db_session.query(ImapThread_.id, ImapThread_.g_thrid).all()
    to_insert = [dict(id=r[0], g_thrid=r[1]) for r in results]

    # Drop columns, add new columns + insert data
    op.drop_column('thread', 'type')
    op.add_column('thread', sa.Column('g_thrid', sa.BigInteger(),
                                      nullable=True, index=True))
    table_ = table('thread',
                   column('g_thrid', sa.BigInteger),
                   column('id', sa.Integer))

    for r in to_insert:
        op.execute(
            table_.update().
            where(table_.c.id == r['id']).
            values({'g_thrid': r['g_thrid']})
        )

    # Drop table
    op.drop_table('imapthread')
예제 #30
0
 def test_delete_extra_froms(self):
     t1 = table('t1', column('c1'))
     t2 = table('t2', column('c1'))
     q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
     self.assert_compile(
         q, "DELETE FROM t1 USING t1, t2 WHERE t1.c1 = t2.c1"
     )
import sqlalchemy as sa

import lifeloopweb.db.models
import lifeloopweb.db.utils
from sqlalchemy.dialects import mysql

from sqlalchemy.sql import table, column
from sqlalchemy import String, Binary, Integer

# revision identifiers, used by Alembic.
revision = '314e77a901e4'
down_revision = '8caf035d7fea'
branch_labels = None
depends_on = None

age_ranges = table('age_ranges', column('id', Binary),
                   column('priority', Integer), column('description', String))


def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    conn = op.get_bind()
    conn.execute(age_ranges.update().where(
        age_ranges.c.description == 'Any Age: 13+').values(
            description='Any Age'))
    op.bulk_insert(age_ranges, [{
        'id': lifeloopweb.db.utils.generate_guid().bytes,
        'priority': 150,
        'description': 'Teens'
    }, {
        'id': lifeloopweb.db.utils.generate_guid().bytes,
def _insert_operation_form_field():
    tb = table(
        'operation_form_field',
        column('id', Integer),
        column('name', String),
        column('type', String),
        column('required', Integer),
        column('order', Integer),
        column('default', Text),
        column('suggested_widget', String),
        column('values_url', String),
        column('values', String),
        column('scope', String),
        column('form_id', Integer),
    )

    columns = ('id', 'name', 'type', 'required', 'order', 'default',
               'suggested_widget', 'values_url', 'values', 'scope', 'form_id')
    data = [
        #Dataset
        (5171, 'dataset', 'TEXT', 1, 1, None, 'text', None, None, 'EXECUTION',
         5171),
        #Train/Validation/test split
        (5172, 'train_validation_test_split', 'TEXT', 0, 2, '60%-20%-20%',
         'text', None, None, 'EXECUTION', 5172),
        #K-fold cross validation
        (5173, 'use_k_fold_cross_validation', 'INTEGER', 0, 3, None,
         'checkbox', None, None, 'EXECUTION', 5173),
        #Train/Validation/test split
        (5174, 'percent_of_train_data', 'INTEGER', 0, 4, None, 'integer', None,
         None, 'EXECUTION', 5174),
        #Shuffle data
        (5175, 'shuffle_data', 'INTEGER', 0, 5, None, 'checkbox', None, None,
         'EXECUTION', 5175),
        (5176, 'load_dataset_in_memory', 'TEXT', 0, 6, 'one batch at a time',
         'dropdown', None,
         json.dumps([
             {
                 "key": "one batch at a time",
                 "value": "one batch at a time"
             },
             {
                 "key": "full dataset",
                 "value": "full dataset"
             },
         ]), 'EXECUTION', 5176),
        (5177, 'seed', 'INTEGER', 0, 7, 17, 'integer', None, None, 'EXECUTION',
         5177),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.alter_column('course',
                    'isStandard',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('event',
                    'isOnCalendar',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('service',
                    'isCalendarBlocked',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsor',
                    'isLogoReceived',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsor',
                    'isSponsorThankedFB',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsor',
                    'isWebsiteUpdated',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.add_column('sponsorlevel',
                  sa.Column('treatment', sa.String(length=32), nullable=True))
    op.alter_column('sponsorlevel',
                    'display',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsorrace',
                    'display',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsorrace',
                    'isRDCertified',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('sponsortag',
                    'isBuiltIn',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('tag',
                    'isBuiltIn',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    op.alter_column('user',
                    'active',
                    existing_type=mysql.TINYINT(display_width=1),
                    type_=sa.Boolean(),
                    existing_nullable=True)
    # ### end Alembic commands ###

    sponsorlevel = table(
        'sponsorlevel',
        column('treatment', sa.String()),
    )
    op.execute(
        sponsorlevel.update().\
            values({'treatment':op.inline_literal('summarize')})
    )
예제 #34
0
    def get_sqla_query(  # sqla
        self,
        groupby,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        filter=None,  # noqa
        is_timeseries=True,
        timeseries_limit=15,
        timeseries_limit_metric=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        orderby=None,
        extras=None,
        columns=None,
        form_data=None,
        order_desc=True,
        prequeries=None,
        is_prequery=False,
    ):
        """Querying any sqla table from this common interface"""
        template_kwargs = {
            'from_dttm': from_dttm,
            'groupby': groupby,
            'metrics': metrics,
            'row_limit': row_limit,
            'to_dttm': to_dttm,
            'form_data': form_data,
        }
        template_processor = self.get_template_processor(**template_kwargs)
        db_engine_spec = self.database.db_engine_spec

        orderby = orderby or []

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        # Database spec supports join-free timeslot grouping
        time_groupby_inline = db_engine_spec.time_groupby_inline

        cols = {col.column_name: col for col in self.columns}
        metrics_dict = {m.metric_name: m for m in self.metrics}

        if not granularity and is_timeseries:
            raise Exception(
                _('Datetime column not provided as part table configuration '
                  'and is required by this type of chart'))
        if not groupby and not metrics and not columns:
            raise Exception(_('Empty query?'))
        for m in metrics:
            if m not in metrics_dict:
                raise Exception(_("Metric '{}' is not valid".format(m)))
        metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
        if metrics_exprs:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr = literal_column('COUNT(*)').label('ccount')

        select_exprs = []
        groupby_exprs = []

        if groupby:
            select_exprs = []
            inner_select_exprs = []
            inner_groupby_exprs = []
            for s in groupby:
                col = cols[s]
                outer = col.sqla_col
                inner = col.sqla_col.label(col.column_name + '__')

                groupby_exprs.append(outer)
                select_exprs.append(outer)
                inner_groupby_exprs.append(inner)
                inner_select_exprs.append(inner)
        elif columns:
            for s in columns:
                select_exprs.append(cols[s].sqla_col)
            metrics_exprs = []

        if granularity:
            dttm_col = cols[granularity]
            time_grain = extras.get('time_grain_sqla')
            time_filters = []

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs += [timestamp]

            # Use main dttm column to support index with secondary dttm columns
            if db_engine_spec.time_secondary_columns and \
                    self.main_dttm_col in self.dttm_cols and \
                    self.main_dttm_col != dttm_col.column_name:
                time_filters.append(cols[self.main_dttm_col].get_time_filter(
                    from_dttm, to_dttm))
            time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))

        select_exprs += metrics_exprs
        qry = sa.select(select_exprs)

        tbl = self.get_from_clause(template_processor, db_engine_spec)

        if not columns:
            qry = qry.group_by(*groupby_exprs)

        where_clause_and = []
        having_clause_and = []
        for flt in filter:
            if not all([flt.get(s) for s in ['col', 'op', 'val']]):
                continue
            col = flt['col']
            op = flt['op']
            eq = flt['val']
            col_obj = cols.get(col)
            if col_obj:
                if op in ('in', 'not in'):
                    values = []
                    for v in eq:
                        # For backwards compatibility and edge cases
                        # where a column data type might have changed
                        if isinstance(v, basestring):
                            v = v.strip("'").strip('"')
                            if col_obj.is_num:
                                v = utils.string_to_num(v)

                        # Removing empty strings and non numeric values
                        # targeting numeric columns
                        if v is not None:
                            values.append(v)
                    cond = col_obj.sqla_col.in_(values)
                    if op == 'not in':
                        cond = ~cond
                    where_clause_and.append(cond)
                else:
                    if col_obj.is_num:
                        eq = utils.string_to_num(flt['val'])
                    if op == '==':
                        where_clause_and.append(col_obj.sqla_col == eq)
                    elif op == '!=':
                        where_clause_and.append(col_obj.sqla_col != eq)
                    elif op == '>':
                        where_clause_and.append(col_obj.sqla_col > eq)
                    elif op == '<':
                        where_clause_and.append(col_obj.sqla_col < eq)
                    elif op == '>=':
                        where_clause_and.append(col_obj.sqla_col >= eq)
                    elif op == '<=':
                        where_clause_and.append(col_obj.sqla_col <= eq)
                    elif op == 'LIKE':
                        where_clause_and.append(col_obj.sqla_col.like(eq))
        if extras:
            where = extras.get('where')
            if where:
                where = template_processor.process_template(where)
                where_clause_and += [sa.text('({})'.format(where))]
            having = extras.get('having')
            if having:
                having = template_processor.process_template(having)
                having_clause_and += [sa.text('({})'.format(having))]
        if granularity:
            qry = qry.where(and_(*(time_filters + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))

        if not orderby and not columns:
            orderby = [(main_metric_expr, not order_desc)]

        for col, ascending in orderby:
            direction = asc if ascending else desc
            qry = qry.order_by(direction(col))

        if row_limit:
            qry = qry.limit(row_limit)

        if is_timeseries and \
                timeseries_limit and groupby and not time_groupby_inline:
            if self.database.db_engine_spec.inner_joins:
                # some sql dialects require for order by expressions
                # to also be in the select clause -- others, e.g. vertica,
                # require a unique inner alias
                inner_main_metric_expr = main_metric_expr.label('mme_inner__')
                inner_select_exprs += [inner_main_metric_expr]
                subq = select(inner_select_exprs)
                subq = subq.select_from(tbl)
                inner_time_filter = dttm_col.get_time_filter(
                    inner_from_dttm or from_dttm,
                    inner_to_dttm or to_dttm,
                )
                subq = subq.where(
                    and_(*(where_clause_and + [inner_time_filter])))
                subq = subq.group_by(*inner_groupby_exprs)

                ob = inner_main_metric_expr
                if timeseries_limit_metric:
                    timeseries_limit_metric = metrics_dict.get(
                        timeseries_limit_metric)
                    ob = timeseries_limit_metric.sqla_col
                direction = desc if order_desc else asc
                subq = subq.order_by(direction(ob))
                subq = subq.limit(timeseries_limit)

                on_clause = []
                for i, gb in enumerate(groupby):
                    on_clause.append(groupby_exprs[i] == column(gb + '__'))

                tbl = tbl.join(subq.alias(), and_(*on_clause))
            else:
                # run subquery to get top groups
                subquery_obj = {
                    'prequeries': prequeries,
                    'is_prequery': True,
                    'is_timeseries': False,
                    'row_limit': timeseries_limit,
                    'groupby': groupby,
                    'metrics': metrics,
                    'granularity': granularity,
                    'from_dttm': inner_from_dttm or from_dttm,
                    'to_dttm': inner_to_dttm or to_dttm,
                    'filter': filter,
                    'orderby': orderby,
                    'extras': extras,
                    'columns': columns,
                    'form_data': form_data,
                    'order_desc': True,
                }
                result = self.query(subquery_obj)
                dimensions = [c for c in result.df.columns if c not in metrics]
                top_groups = self._get_top_groups(result.df, dimensions)
                qry = qry.where(top_groups)

        return qry.select_from(tbl)
예제 #35
0
def upgrade():
    # Create an ad-hoc table to use for the insert statement.
    item_table = table('item',
                       column('model', String),
                       column('brand_id', Integer),
                       column('gender_id', Integer),
                       column('small_image_url', String),
                       column('medium_image_url', String),
                       column('type', String)
                       )
    # 5|scarpa|EUR
    op.bulk_insert(
        item_table, [
            {'model': 'quantic', 'brand_id': '5',
                'gender_id': '1', 'type': 'rock'},
            {'model': 'quantic', 'brand_id': '5',
                'gender_id': '2', 'type': 'rock'},
            {'model': 'rapid gtx', 'brand_id': '5',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'rapid gtx', 'brand_id': '5',
                'gender_id': '2', 'type': 'approach'},
            {'model': 'rapid', 'brand_id': '5',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'rapid', 'brand_id': '5',
                'gender_id': '2', 'type': 'approach'},
            {'model': 'kalipe gtx', 'brand_id': '5',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'kalipe gtx', 'brand_id': '5',
                'gender_id': '2', 'type': 'approach'},
            {'model': 'kalipe', 'brand_id': '5',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'kalipe', 'brand_id': '5',
                'gender_id': '2', 'type': 'approach'},   
            {'model': 'crux air', 'brand_id': '5',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'crux air', 'brand_id': '5',
                'gender_id': '2', 'type': 'approach'},                               
        ])

    # 3|la sportiva|EUR
    op.bulk_insert(
        item_table, [
            {'model': 'kubo', 'brand_id': '3',
                'gender_id': '1', 'type': 'rock'},
            {'model': 'kubo', 'brand_id': '3',
                'gender_id': '2', 'type': 'rock'},
                                      
        ])
    # 12|butora|US
    op.bulk_insert(
        item_table, [
            {'model': 'wing', 'brand_id': '12',
                'gender_id': '3', 'type': 'approach'},
            {'model': 'icarus', 'brand_id': '12',
                'gender_id': '3', 'type': 'approach'},
                                      
        ])
    # 2|five ten|US
    op.bulk_insert(
        item_table, [
            {'model': 'crawe', 'brand_id': '2',
                'gender_id': '1', 'type': 'rock'},
            {'model': 'crawe', 'brand_id': '2',
                'gender_id': '2', 'type': 'rock'},
            {'model': 'NIAD Lace', 'brand_id': '2',
                'gender_id': '1', 'type': 'rock'},
            {'model': 'NIAD Lace', 'brand_id': '2',
                'gender_id': '2', 'type': 'rock'},      
            {'model': 'NIAD VCS', 'brand_id': '2',
                'gender_id': '1', 'type': 'rock'},
            {'model': 'NIAD VCS', 'brand_id': '2',
                'gender_id': '2', 'type': 'rock'},                          
                                      
        ])

    # 27|black diamond|US
    op.bulk_insert(
        item_table, [
            {'model': 'fuel', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'fuel', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},      
            {'model': 'prime', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'prime', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},     
            {'model': 'session', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'session', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},       
            {'model': 'session suede', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'session suede', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},    
            {'model': 'mission lt', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'mission lt', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},     
            {'model': 'circuit', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'circuit', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},      
            {'model': 'technician', 'brand_id': '27',
                'gender_id': '1', 'type': 'approach'},
            {'model': 'technician', 'brand_id': '27',
                'gender_id': '2', 'type': 'approach'},                                                                                                      
        ])
예제 #36
0
 def test_quoting_initial_chars(self):
     self.assert_compile(column("_somecol"), '"_somecol"')
     self.assert_compile(column("$somecol"), '"$somecol"')
예제 #37
0
    def get_sqla_query(  # sqla
        self,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        columns=None,
        groupby=None,
        filter=None,
        is_timeseries=True,
        timeseries_limit=15,
        timeseries_limit_metric=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        orderby=None,
        extras=None,
        order_desc=True,
    ) -> SqlaQuery:
        """Querying any sqla table from this common interface"""
        template_kwargs = {
            "from_dttm": from_dttm,
            "groupby": groupby,
            "metrics": metrics,
            "row_limit": row_limit,
            "to_dttm": to_dttm,
            "filter": filter,
            "columns": {col.column_name: col
                        for col in self.columns},
        }
        is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE")
        template_kwargs.update(self.template_params_dict)
        extra_cache_keys: List[Any] = []
        template_kwargs["extra_cache_keys"] = extra_cache_keys
        template_processor = self.get_template_processor(**template_kwargs)
        db_engine_spec = self.database.db_engine_spec
        prequeries: List[str] = []

        orderby = orderby or []

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        # Database spec supports join-free timeslot grouping
        time_groupby_inline = db_engine_spec.time_groupby_inline

        cols: Dict[str,
                   Column] = {col.column_name: col
                              for col in self.columns}
        metrics_dict: Dict[str, SqlMetric] = {
            m.metric_name: m
            for m in self.metrics
        }

        if not granularity and is_timeseries:
            raise Exception(
                _("Datetime column not provided as part table configuration "
                  "and is required by this type of chart"))
        if (not metrics and not columns
                and (is_sip_38 or (not is_sip_38 and not groupby))):
            raise Exception(_("Empty query?"))
        metrics_exprs: List[ColumnElement] = []
        for m in metrics:
            if utils.is_adhoc_metric(m):
                metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
            elif m in metrics_dict:
                metrics_exprs.append(metrics_dict[m].get_sqla_col())
            else:
                raise Exception(
                    _("Metric '%(metric)s' does not exist", metric=m))
        if metrics_exprs:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
            main_metric_expr = self.make_sqla_column_compatible(
                main_metric_expr, label)

        select_exprs: List[Column] = []
        groupby_exprs_sans_timestamp: OrderedDict = OrderedDict()

        if (is_sip_38 and metrics and columns) or (not is_sip_38 and groupby):
            # dedup columns while preserving order
            groupby = list(dict.fromkeys(columns if is_sip_38 else groupby))

            select_exprs = []
            for s in groupby:
                if s in cols:
                    outer = cols[s].get_sqla_col()
                else:
                    outer = literal_column(f"({s})")
                    outer = self.make_sqla_column_compatible(outer, s)

                groupby_exprs_sans_timestamp[outer.name] = outer
                select_exprs.append(outer)
        elif columns:
            for s in columns:
                select_exprs.append(
                    cols[s].get_sqla_col() if s in cols else self.
                    make_sqla_column_compatible(literal_column(s)))
            metrics_exprs = []

        time_range_endpoints = extras.get("time_range_endpoints")
        groupby_exprs_with_timestamp = OrderedDict(
            groupby_exprs_sans_timestamp.items())
        if granularity:
            dttm_col = cols[granularity]
            time_grain = extras.get("time_grain_sqla")
            time_filters = []

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs_with_timestamp[timestamp.name] = timestamp

            # Use main dttm column to support index with secondary dttm columns.
            if (db_engine_spec.time_secondary_columns
                    and self.main_dttm_col in self.dttm_cols
                    and self.main_dttm_col != dttm_col.column_name):
                time_filters.append(cols[self.main_dttm_col].get_time_filter(
                    from_dttm, to_dttm, time_range_endpoints))
            time_filters.append(
                dttm_col.get_time_filter(from_dttm, to_dttm,
                                         time_range_endpoints))

        select_exprs += metrics_exprs

        labels_expected = [c._df_label_expected for c in select_exprs]

        select_exprs = db_engine_spec.make_select_compatible(
            groupby_exprs_with_timestamp.values(), select_exprs)
        qry = sa.select(select_exprs)

        tbl = self.get_from_clause(template_processor)

        if (is_sip_38 and metrics) or (not is_sip_38 and not columns):
            qry = qry.group_by(*groupby_exprs_with_timestamp.values())

        where_clause_and = []
        having_clause_and: List = []
        for flt in filter:
            if not all([flt.get(s) for s in ["col", "op"]]):
                continue
            col = flt["col"]
            op = flt["op"].upper()
            col_obj = cols.get(col)
            if col_obj:
                is_list_target = op in (
                    utils.FilterOperator.IN.value,
                    utils.FilterOperator.NOT_IN.value,
                )
                eq = self.filter_values_handler(
                    values=flt.get("val"),
                    target_column_is_numeric=col_obj.is_numeric,
                    is_list_target=is_list_target,
                )
                if op in (
                        utils.FilterOperator.IN.value,
                        utils.FilterOperator.NOT_IN.value,
                ):
                    cond = col_obj.get_sqla_col().in_(eq)
                    if isinstance(eq, str) and NULL_STRING in eq:
                        cond = or_(cond, col_obj.get_sqla_col() is None)
                    if op == utils.FilterOperator.NOT_IN.value:
                        cond = ~cond
                    where_clause_and.append(cond)
                else:
                    if col_obj.is_numeric:
                        eq = utils.cast_to_num(flt["val"])
                    if op == utils.FilterOperator.EQUALS.value:
                        where_clause_and.append(col_obj.get_sqla_col() == eq)
                    elif op == utils.FilterOperator.NOT_EQUALS.value:
                        where_clause_and.append(col_obj.get_sqla_col() != eq)
                    elif op == utils.FilterOperator.GREATER_THAN.value:
                        where_clause_and.append(col_obj.get_sqla_col() > eq)
                    elif op == utils.FilterOperator.LESS_THAN.value:
                        where_clause_and.append(col_obj.get_sqla_col() < eq)
                    elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
                        where_clause_and.append(col_obj.get_sqla_col() >= eq)
                    elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
                        where_clause_and.append(col_obj.get_sqla_col() <= eq)
                    elif op == utils.FilterOperator.LIKE.value:
                        where_clause_and.append(
                            col_obj.get_sqla_col().like(eq))
                    elif op == utils.FilterOperator.IS_NULL.value:
                        where_clause_and.append(col_obj.get_sqla_col() == None)
                    elif op == utils.FilterOperator.IS_NOT_NULL.value:
                        where_clause_and.append(col_obj.get_sqla_col() != None)
                    else:
                        raise Exception(
                            _("Invalid filter operation type: %(op)s", op=op))
        if config["ENABLE_ROW_LEVEL_SECURITY"]:
            where_clause_and += self._get_sqla_row_level_filters(
                template_processor)
        if extras:
            where = extras.get("where")
            if where:
                where = template_processor.process_template(where)
                where_clause_and += [sa.text("({})".format(where))]
            having = extras.get("having")
            if having:
                having = template_processor.process_template(having)
                having_clause_and += [sa.text("({})".format(having))]
        if granularity:
            qry = qry.where(and_(*(time_filters + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))

        if not orderby and ((is_sip_38 and metrics) or
                            (not is_sip_38 and not columns)):
            orderby = [(main_metric_expr, not order_desc)]

        # To ensure correct handling of the ORDER BY labeling we need to reference the
        # metric instance if defined in the SELECT clause.
        metrics_exprs_by_label = {m._label: m for m in metrics_exprs}

        for col, ascending in orderby:
            direction = asc if ascending else desc
            if utils.is_adhoc_metric(col):
                col = self.adhoc_metric_to_sqla(col, cols)
            elif col in cols:
                col = cols[col].get_sqla_col()

            if isinstance(col, Label) and col._label in metrics_exprs_by_label:
                col = metrics_exprs_by_label[col._label]

            qry = qry.order_by(direction(col))

        if row_limit:
            qry = qry.limit(row_limit)

        if (is_timeseries and timeseries_limit and not time_groupby_inline
                and ((is_sip_38 and columns) or (not is_sip_38 and groupby))):
            if self.database.db_engine_spec.allows_joins:
                # some sql dialects require for order by expressions
                # to also be in the select clause -- others, e.g. vertica,
                # require a unique inner alias
                inner_main_metric_expr = self.make_sqla_column_compatible(
                    main_metric_expr, "mme_inner__")
                inner_groupby_exprs = []
                inner_select_exprs = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    inner = self.make_sqla_column_compatible(
                        gby_obj, gby_name + "__")
                    inner_groupby_exprs.append(inner)
                    inner_select_exprs.append(inner)

                inner_select_exprs += [inner_main_metric_expr]
                subq = select(inner_select_exprs).select_from(tbl)
                inner_time_filter = dttm_col.get_time_filter(
                    inner_from_dttm or from_dttm,
                    inner_to_dttm or to_dttm,
                    time_range_endpoints,
                )
                subq = subq.where(
                    and_(*(where_clause_and + [inner_time_filter])))
                subq = subq.group_by(*inner_groupby_exprs)

                ob = inner_main_metric_expr
                if timeseries_limit_metric:
                    ob = self._get_timeseries_orderby(timeseries_limit_metric,
                                                      metrics_dict, cols)
                direction = desc if order_desc else asc
                subq = subq.order_by(direction(ob))
                subq = subq.limit(timeseries_limit)

                on_clause = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    # in this case the column name, not the alias, needs to be
                    # conditionally mutated, as it refers to the column alias in
                    # the inner query
                    col_name = db_engine_spec.make_label_compatible(gby_name +
                                                                    "__")
                    on_clause.append(gby_obj == column(col_name))

                tbl = tbl.join(subq.alias(), and_(*on_clause))
            else:
                if timeseries_limit_metric:
                    orderby = [(
                        self._get_timeseries_orderby(timeseries_limit_metric,
                                                     metrics_dict, cols),
                        False,
                    )]

                # run prequery to get top groups
                prequery_obj = {
                    "is_timeseries": False,
                    "row_limit": timeseries_limit,
                    "metrics": metrics,
                    "granularity": granularity,
                    "from_dttm": inner_from_dttm or from_dttm,
                    "to_dttm": inner_to_dttm or to_dttm,
                    "filter": filter,
                    "orderby": orderby,
                    "extras": extras,
                    "columns": columns,
                    "order_desc": True,
                }
                if not is_sip_38:
                    prequery_obj["groupby"] = groupby

                result = self.query(prequery_obj)
                prequeries.append(result.query)
                dimensions = [
                    c for c in result.df.columns
                    if c not in metrics and c in groupby_exprs_sans_timestamp
                ]
                top_groups = self._get_top_groups(
                    result.df, dimensions, groupby_exprs_sans_timestamp)
                qry = qry.where(top_groups)
        return SqlaQuery(
            extra_cache_keys=extra_cache_keys,
            labels_expected=labels_expected,
            sqla_query=qry.select_from(tbl),
            prequeries=prequeries,
        )
예제 #38
0
def load_world_bank_health_n_pop(only_metadata=False, force=False):
    """Loads the world bank health dataset, slices and a dashboard"""
    tbl_name = "wb_health_population"
    database = utils.get_example_database()
    table_exists = database.has_table_by_name(tbl_name)

    if not only_metadata and (not table_exists or force):
        data = get_example_data("countries.json.gz")
        pdf = pd.read_json(data)
        pdf.columns = [col.replace(".", "_") for col in pdf.columns]
        pdf.year = pd.to_datetime(pdf.year)
        pdf.to_sql(
            tbl_name,
            database.get_sqla_engine(),
            if_exists="replace",
            chunksize=50,
            dtype={
                "year": DateTime(),
                "country_code": String(3),
                "country_name": String(255),
                "region": String(255),
            },
            index=False,
        )

    print("Creating table [wb_health_population] reference")
    tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
    if not tbl:
        tbl = TBL(table_name=tbl_name)
    tbl.description = utils.readfile(
        os.path.join(EXAMPLES_FOLDER, "countries.md"))
    tbl.main_dttm_col = "year"
    tbl.database = database
    tbl.filter_select_enabled = True

    metrics = [
        "sum__SP_POP_TOTL",
        "sum__SH_DYN_AIDS",
        "sum__SH_DYN_AIDS",
        "sum__SP_RUR_TOTL_ZS",
        "sum__SP_DYN_LE00_IN",
        "sum__SP_RUR_TOTL",
    ]
    for m in metrics:
        if not any(col.metric_name == m for col in tbl.metrics):
            aggr_func = m[:3]
            col = str(column(m[5:]).compile(db.engine))
            tbl.metrics.append(
                SqlMetric(metric_name=m, expression=f"{aggr_func}({col})"))

    db.session.merge(tbl)
    db.session.commit()
    tbl.fetch_metadata()

    defaults = {
        "compare_lag": "10",
        "compare_suffix": "o10Y",
        "limit": "25",
        "granularity_sqla": "year",
        "groupby": [],
        "metric": "sum__SP_POP_TOTL",
        "metrics": ["sum__SP_POP_TOTL"],
        "row_limit": config["ROW_LIMIT"],
        "since": "2014-01-01",
        "until": "2014-01-02",
        "time_range": "2014-01-01 : 2014-01-02",
        "where": "",
        "markup_type": "markdown",
        "country_fieldtype": "cca3",
        "secondary_metric": {
            "aggregate": "SUM",
            "column": {
                "column_name": "SP_RUR_TOTL",
                "optionName": "_col_SP_RUR_TOTL",
                "type": "DOUBLE",
            },
            "expressionType": "SIMPLE",
            "hasCustomLabel": True,
            "label": "Rural Population",
        },
        "entity": "country_code",
        "show_bubbles": True,
    }

    print("Creating slices")
    slices = [
        Slice(
            slice_name="Region Filter",
            viz_type="filter_box",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="filter_box",
                date_filter=False,
                filter_configs=[
                    {
                        "asc": False,
                        "clearable": True,
                        "column": "region",
                        "key": "2s98dfu",
                        "metric": "sum__SP_POP_TOTL",
                        "multiple": True,
                    },
                    {
                        "asc": False,
                        "clearable": True,
                        "key": "li3j2lk",
                        "column": "country_name",
                        "metric": "sum__SP_POP_TOTL",
                        "multiple": True,
                    },
                ],
            ),
        ),
        Slice(
            slice_name="World's Population",
            viz_type="big_number",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                since="2000",
                viz_type="big_number",
                compare_lag="10",
                metric="sum__SP_POP_TOTL",
                compare_suffix="over 10Y",
            ),
        ),
        Slice(
            slice_name="Most Populated Countries",
            viz_type="table",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="table",
                metrics=["sum__SP_POP_TOTL"],
                groupby=["country_name"],
            ),
        ),
        Slice(
            slice_name="Growth Rate",
            viz_type="line",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="line",
                since="1960-01-01",
                metrics=["sum__SP_POP_TOTL"],
                num_period_compare="10",
                groupby=["country_name"],
            ),
        ),
        Slice(
            slice_name="% Rural",
            viz_type="world_map",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="world_map",
                metric="sum__SP_RUR_TOTL_ZS",
                num_period_compare="10",
            ),
        ),
        Slice(
            slice_name="Life Expectancy VS Rural %",
            viz_type="bubble",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="bubble",
                since="2011-01-01",
                until="2011-01-02",
                series="region",
                limit=0,
                entity="country_name",
                x="sum__SP_RUR_TOTL_ZS",
                y="sum__SP_DYN_LE00_IN",
                size="sum__SP_POP_TOTL",
                max_bubble_size="50",
                filters=[{
                    "col":
                    "country_code",
                    "val": [
                        "TCA",
                        "MNP",
                        "DMA",
                        "MHL",
                        "MCO",
                        "SXM",
                        "CYM",
                        "TUV",
                        "IMY",
                        "KNA",
                        "ASM",
                        "ADO",
                        "AMA",
                        "PLW",
                    ],
                    "op":
                    "not in",
                }],
            ),
        ),
        Slice(
            slice_name="Rural Breakdown",
            viz_type="sunburst",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                viz_type="sunburst",
                groupby=["region", "country_name"],
                since="2011-01-01",
                until="2011-01-01",
            ),
        ),
        Slice(
            slice_name="World's Pop Growth",
            viz_type="area",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                since="1960-01-01",
                until="now",
                viz_type="area",
                groupby=["region"],
            ),
        ),
        Slice(
            slice_name="Box plot",
            viz_type="box_plot",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                since="1960-01-01",
                until="now",
                whisker_options="Min/max (no outliers)",
                x_ticks_layout="staggered",
                viz_type="box_plot",
                groupby=["region"],
            ),
        ),
        Slice(
            slice_name="Treemap",
            viz_type="treemap",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                since="1960-01-01",
                until="now",
                viz_type="treemap",
                metrics=["sum__SP_POP_TOTL"],
                groupby=["region", "country_code"],
            ),
        ),
        Slice(
            slice_name="Parallel Coordinates",
            viz_type="para",
            datasource_type="table",
            datasource_id=tbl.id,
            params=get_slice_json(
                defaults,
                since="2011-01-01",
                until="2011-01-01",
                viz_type="para",
                limit=100,
                metrics=[
                    "sum__SP_POP_TOTL", "sum__SP_RUR_TOTL_ZS",
                    "sum__SH_DYN_AIDS"
                ],
                secondary_metric="sum__SP_POP_TOTL",
                series="country_name",
            ),
        ),
    ]
    misc_dash_slices.add(slices[-1].slice_name)
    for slc in slices:
        merge_slice(slc)

    print("Creating a World's Health Bank dashboard")
    dash_name = "World Bank's Data"
    slug = "world_health"
    dash = db.session.query(Dash).filter_by(slug=slug).first()

    if not dash:
        dash = Dash()
    dash.published = True
    js = textwrap.dedent("""\
{
    "CHART-36bfc934": {
        "children": [],
        "id": "CHART-36bfc934",
        "meta": {
            "chartId": 40,
            "height": 25,
            "sliceName": "Region Filter",
            "width": 2
        },
        "type": "CHART"
    },
    "CHART-37982887": {
        "children": [],
        "id": "CHART-37982887",
        "meta": {
            "chartId": 41,
            "height": 25,
            "sliceName": "World's Population",
            "width": 2
        },
        "type": "CHART"
    },
    "CHART-17e0f8d8": {
        "children": [],
        "id": "CHART-17e0f8d8",
        "meta": {
            "chartId": 42,
            "height": 92,
            "sliceName": "Most Populated Countries",
            "width": 3
        },
        "type": "CHART"
    },
    "CHART-2ee52f30": {
        "children": [],
        "id": "CHART-2ee52f30",
        "meta": {
            "chartId": 43,
            "height": 38,
            "sliceName": "Growth Rate",
            "width": 6
        },
        "type": "CHART"
    },
    "CHART-2d5b6871": {
        "children": [],
        "id": "CHART-2d5b6871",
        "meta": {
            "chartId": 44,
            "height": 52,
            "sliceName": "% Rural",
            "width": 7
        },
        "type": "CHART"
    },
    "CHART-0fd0d252": {
        "children": [],
        "id": "CHART-0fd0d252",
        "meta": {
            "chartId": 45,
            "height": 50,
            "sliceName": "Life Expectancy VS Rural %",
            "width": 8
        },
        "type": "CHART"
    },
    "CHART-97f4cb48": {
        "children": [],
        "id": "CHART-97f4cb48",
        "meta": {
            "chartId": 46,
            "height": 38,
            "sliceName": "Rural Breakdown",
            "width": 3
        },
        "type": "CHART"
    },
    "CHART-b5e05d6f": {
        "children": [],
        "id": "CHART-b5e05d6f",
        "meta": {
            "chartId": 47,
            "height": 50,
            "sliceName": "World's Pop Growth",
            "width": 4
        },
        "type": "CHART"
    },
    "CHART-e76e9f5f": {
        "children": [],
        "id": "CHART-e76e9f5f",
        "meta": {
            "chartId": 48,
            "height": 50,
            "sliceName": "Box plot",
            "width": 4
        },
        "type": "CHART"
    },
    "CHART-a4808bba": {
        "children": [],
        "id": "CHART-a4808bba",
        "meta": {
            "chartId": 49,
            "height": 50,
            "sliceName": "Treemap",
            "width": 8
        },
        "type": "CHART"
    },
    "COLUMN-071bbbad": {
        "children": [
            "ROW-1e064e3c",
            "ROW-afdefba9"
        ],
        "id": "COLUMN-071bbbad",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT",
            "width": 9
        },
        "type": "COLUMN"
    },
    "COLUMN-fe3914b8": {
        "children": [
            "CHART-36bfc934",
            "CHART-37982887"
        ],
        "id": "COLUMN-fe3914b8",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT",
            "width": 2
        },
        "type": "COLUMN"
    },
    "GRID_ID": {
        "children": [
            "ROW-46632bc2",
            "ROW-3fa26c5d",
            "ROW-812b3f13"
        ],
        "id": "GRID_ID",
        "type": "GRID"
    },
    "HEADER_ID": {
        "id": "HEADER_ID",
        "meta": {
            "text": "World's Bank Data"
        },
        "type": "HEADER"
    },
    "ROOT_ID": {
        "children": [
            "GRID_ID"
        ],
        "id": "ROOT_ID",
        "type": "ROOT"
    },
    "ROW-1e064e3c": {
        "children": [
            "COLUMN-fe3914b8",
            "CHART-2d5b6871"
        ],
        "id": "ROW-1e064e3c",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT"
        },
        "type": "ROW"
    },
    "ROW-3fa26c5d": {
        "children": [
            "CHART-b5e05d6f",
            "CHART-0fd0d252"
        ],
        "id": "ROW-3fa26c5d",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT"
        },
        "type": "ROW"
    },
    "ROW-46632bc2": {
        "children": [
            "COLUMN-071bbbad",
            "CHART-17e0f8d8"
        ],
        "id": "ROW-46632bc2",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT"
        },
        "type": "ROW"
    },
    "ROW-812b3f13": {
        "children": [
            "CHART-a4808bba",
            "CHART-e76e9f5f"
        ],
        "id": "ROW-812b3f13",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT"
        },
        "type": "ROW"
    },
    "ROW-afdefba9": {
        "children": [
            "CHART-2ee52f30",
            "CHART-97f4cb48"
        ],
        "id": "ROW-afdefba9",
        "meta": {
            "background": "BACKGROUND_TRANSPARENT"
        },
        "type": "ROW"
    },
    "DASHBOARD_VERSION_KEY": "v2"
}
    """)
    pos = json.loads(js)
    update_slice_ids(pos, slices)

    dash.dashboard_title = dash_name
    dash.position_json = json.dumps(pos, indent=4)
    dash.slug = slug

    dash.slices = slices[:-1]
    db.session.merge(dash)
    db.session.commit()
예제 #39
0
 def mute_cond(row: Dict[str, Any]) -> Selectable:
     recipient_id = row['recipient_id']
     topic_name = row['topic_name']
     stream_cond = column("recipient_id") == recipient_id
     topic_cond = topic_match_sa(topic_name)
     return and_(stream_cond, topic_cond)
예제 #40
0
Create Date: 2014-03-21 18:27:16.802081

"""

# revision identifiers, used by Alembic.
revision = '476e9e882141'
down_revision = '387cf5a9a3ad'

from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select, insert, and_, or_
import sqlalchemy as sa

context_implications_table = table(
    'context_implications',
    column('id', sa.Integer),
    column('context_id', sa.Integer),
    column('source_context_id', sa.Integer),
    column('context_scope', sa.String),
    column('source_context_scope', sa.String),
    column('updated_at', sa.DateTime),
    column('modified_by_id', sa.Integer),
)

contexts_table = table(
    'contexts',
    column('id', sa.Integer),
    column('related_object_id', sa.Integer),
    column('related_object_type', sa.String),
)
예제 #41
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###

    contacttype_table = table(
        'ContactTypes',
        column('name', String),
        column('description', String),
        column('isactive', Boolean),
    )
    op.execute(
        'Truncate table public."ContactTypes" RESTART IDENTITY CASCADE;commit;'
    )
    op.bulk_insert(contacttype_table, [
        {
            'name': 'Email',
            'description': 'Email',
            'isactive': True
        },
        {
            'name': 'Street Address',
            'description': 'Street Address',
            'isactive': True
        },
        {
            'name': 'Home Phone',
            'description': 'Home Phone',
            'isactive': True
        },
        {
            'name': 'Work Phone',
            'description': 'Work Phone',
            'isactive': True
        },
        {
            'name': 'Work Phone 2',
            'description': 'second work phone 2',
            'isactive': True
        },
        {
            'name': 'Mobile Phone',
            'description': 'Mobile Phone',
            'isactive': True
        },
        {
            'name': 'Other',
            'description': 'Other Contact information, if any',
            'isactive': True
        },
    ])

    op.create_table(
        'FOIRequestContactInformation',
        sa.Column('foirequestcontactid',
                  sa.Integer(),
                  autoincrement=True,
                  nullable=False),
        sa.Column('contactinformation', sa.String(length=500), nullable=False),
        sa.Column('dataformat', sa.String(length=40), nullable=True),
        sa.Column('created_at', sa.DateTime(), nullable=True),
        sa.Column('updated_at', sa.DateTime(), nullable=True),
        sa.Column('createdby', sa.String(length=120), nullable=True),
        sa.Column('updatedby', sa.String(length=120), nullable=True),
        sa.Column('contacttypeid', sa.Integer(), nullable=True),
        sa.Column('foirequest_id', sa.Integer(), nullable=True),
        sa.Column('foirequestversion_id', sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(
            ['contacttypeid'],
            ['ContactTypes.contacttypeid'],
        ),
        sa.ForeignKeyConstraint(
            ['foirequest_id', 'foirequestversion_id'],
            ['FOIRequests.foirequestid', 'FOIRequests.version'],
        ), sa.PrimaryKeyConstraint('foirequestcontactid'))
예제 #42
0
    def get_sqla_query(  # sqla
        self,
        groupby,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        filter=None,  # noqa
        is_timeseries=True,
        timeseries_limit=15,
        timeseries_limit_metric=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        orderby=None,
        extras=None,
        columns=None,
        order_desc=True,
    ):
        """Querying any sqla table from this common interface"""
        template_kwargs = {
            "from_dttm": from_dttm,
            "groupby": groupby,
            "metrics": metrics,
            "row_limit": row_limit,
            "to_dttm": to_dttm,
            "filter": filter,
            "columns": {col.column_name: col
                        for col in self.columns},
        }
        template_kwargs.update(self.template_params_dict)
        extra_cache_keys: List[Any] = []
        template_kwargs["extra_cache_keys"] = extra_cache_keys
        template_processor = self.get_template_processor(**template_kwargs)
        db_engine_spec = self.database.db_engine_spec
        prequeries: List[str] = []

        orderby = orderby or []

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        # Database spec supports join-free timeslot grouping
        time_groupby_inline = db_engine_spec.time_groupby_inline

        cols = {col.column_name: col for col in self.columns}
        metrics_dict = {m.metric_name: m for m in self.metrics}

        if not granularity and is_timeseries:
            raise Exception(
                _("Datetime column not provided as part table configuration "
                  "and is required by this type of chart"))
        if not groupby and not metrics and not columns:
            raise Exception(_("Empty query?"))
        metrics_exprs = []
        for m in metrics:
            if utils.is_adhoc_metric(m):
                metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
            elif m in metrics_dict:
                metrics_exprs.append(metrics_dict.get(m).get_sqla_col())
            else:
                raise Exception(
                    _("Metric '%(metric)s' does not exist", metric=m))
        if metrics_exprs:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
            main_metric_expr = self.make_sqla_column_compatible(
                main_metric_expr, label)

        select_exprs = []
        groupby_exprs_sans_timestamp = OrderedDict()

        if groupby:
            select_exprs = []
            for s in groupby:
                if s in cols:
                    outer = cols[s].get_sqla_col()
                else:
                    outer = literal_column(f"({s})")
                    outer = self.make_sqla_column_compatible(outer, s)

                groupby_exprs_sans_timestamp[outer.name] = outer
                select_exprs.append(outer)
        elif columns:
            for s in columns:
                select_exprs.append(
                    cols[s].get_sqla_col() if s in cols else self.
                    make_sqla_column_compatible(literal_column(s)))
            metrics_exprs = []

        groupby_exprs_with_timestamp = OrderedDict(
            groupby_exprs_sans_timestamp.items())
        if granularity:
            dttm_col = cols[granularity]
            time_grain = extras.get("time_grain_sqla")
            time_filters = []

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs_with_timestamp[timestamp.name] = timestamp

            # Use main dttm column to support index with secondary dttm columns
            if (db_engine_spec.time_secondary_columns
                    and self.main_dttm_col in self.dttm_cols
                    and self.main_dttm_col != dttm_col.column_name):
                time_filters.append(cols[self.main_dttm_col].get_time_filter(
                    from_dttm, to_dttm))
            time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))

        select_exprs += metrics_exprs

        labels_expected = [c._df_label_expected for c in select_exprs]

        select_exprs = db_engine_spec.make_select_compatible(
            groupby_exprs_with_timestamp.values(), select_exprs)
        qry = sa.select(select_exprs)

        tbl = self.get_from_clause(template_processor)

        if not columns:
            qry = qry.group_by(*groupby_exprs_with_timestamp.values())

        where_clause_and = []
        having_clause_and = []
        for flt in filter:
            if not all([flt.get(s) for s in ["col", "op"]]):
                continue
            col = flt["col"]
            op = flt["op"]
            col_obj = cols.get(col)
            if col_obj:
                is_list_target = op in ("in", "not in")
                eq = self.filter_values_handler(
                    flt.get("val"),
                    target_column_is_numeric=col_obj.is_num,
                    is_list_target=is_list_target,
                )
                if op in ("in", "not in"):
                    cond = col_obj.get_sqla_col().in_(eq)
                    if "<NULL>" in eq:
                        cond = or_(cond,
                                   col_obj.get_sqla_col() == None)  # noqa
                    if op == "not in":
                        cond = ~cond
                    where_clause_and.append(cond)
                else:
                    if col_obj.is_num:
                        eq = utils.string_to_num(flt["val"])
                    if op == "==":
                        where_clause_and.append(col_obj.get_sqla_col() == eq)
                    elif op == "!=":
                        where_clause_and.append(col_obj.get_sqla_col() != eq)
                    elif op == ">":
                        where_clause_and.append(col_obj.get_sqla_col() > eq)
                    elif op == "<":
                        where_clause_and.append(col_obj.get_sqla_col() < eq)
                    elif op == ">=":
                        where_clause_and.append(col_obj.get_sqla_col() >= eq)
                    elif op == "<=":
                        where_clause_and.append(col_obj.get_sqla_col() <= eq)
                    elif op == "LIKE":
                        where_clause_and.append(
                            col_obj.get_sqla_col().like(eq))
                    elif op == "IS NULL":
                        where_clause_and.append(
                            col_obj.get_sqla_col() == None)  # noqa
                    elif op == "IS NOT NULL":
                        where_clause_and.append(
                            col_obj.get_sqla_col() != None)  # noqa
        if extras:
            where = extras.get("where")
            if where:
                where = template_processor.process_template(where)
                where_clause_and += [sa.text("({})".format(where))]
            having = extras.get("having")
            if having:
                having = template_processor.process_template(having)
                having_clause_and += [sa.text("({})".format(having))]
        if granularity:
            qry = qry.where(and_(*(time_filters + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))

        if not orderby and not columns:
            orderby = [(main_metric_expr, not order_desc)]

        for col, ascending in orderby:
            direction = asc if ascending else desc
            if utils.is_adhoc_metric(col):
                col = self.adhoc_metric_to_sqla(col, cols)
            elif col in cols:
                col = cols[col].get_sqla_col()
            qry = qry.order_by(direction(col))

        if row_limit:
            qry = qry.limit(row_limit)

        if is_timeseries and timeseries_limit and groupby and not time_groupby_inline:
            if self.database.db_engine_spec.allows_joins:
                # some sql dialects require for order by expressions
                # to also be in the select clause -- others, e.g. vertica,
                # require a unique inner alias
                inner_main_metric_expr = self.make_sqla_column_compatible(
                    main_metric_expr, "mme_inner__")
                inner_groupby_exprs = []
                inner_select_exprs = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    inner = self.make_sqla_column_compatible(
                        gby_obj, gby_name + "__")
                    inner_groupby_exprs.append(inner)
                    inner_select_exprs.append(inner)

                inner_select_exprs += [inner_main_metric_expr]
                subq = select(inner_select_exprs).select_from(tbl)
                inner_time_filter = dttm_col.get_time_filter(
                    inner_from_dttm or from_dttm, inner_to_dttm or to_dttm)
                subq = subq.where(
                    and_(*(where_clause_and + [inner_time_filter])))
                subq = subq.group_by(*inner_groupby_exprs)

                ob = inner_main_metric_expr
                if timeseries_limit_metric:
                    ob = self._get_timeseries_orderby(timeseries_limit_metric,
                                                      metrics_dict, cols)
                direction = desc if order_desc else asc
                subq = subq.order_by(direction(ob))
                subq = subq.limit(timeseries_limit)

                on_clause = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    # in this case the column name, not the alias, needs to be
                    # conditionally mutated, as it refers to the column alias in
                    # the inner query
                    col_name = db_engine_spec.make_label_compatible(gby_name +
                                                                    "__")
                    on_clause.append(gby_obj == column(col_name))

                tbl = tbl.join(subq.alias(), and_(*on_clause))
            else:
                if timeseries_limit_metric:
                    orderby = [(
                        self._get_timeseries_orderby(timeseries_limit_metric,
                                                     metrics_dict, cols),
                        False,
                    )]

                # run prequery to get top groups
                prequery_obj = {
                    "is_timeseries": False,
                    "row_limit": timeseries_limit,
                    "groupby": groupby,
                    "metrics": metrics,
                    "granularity": granularity,
                    "from_dttm": inner_from_dttm or from_dttm,
                    "to_dttm": inner_to_dttm or to_dttm,
                    "filter": filter,
                    "orderby": orderby,
                    "extras": extras,
                    "columns": columns,
                    "order_desc": True,
                }
                result = self.query(prequery_obj)
                prequeries.append(result.query)
                dimensions = [
                    c for c in result.df.columns
                    if c not in metrics and c in groupby_exprs_sans_timestamp
                ]
                top_groups = self._get_top_groups(
                    result.df, dimensions, groupby_exprs_sans_timestamp)
                qry = qry.where(top_groups)

        return SqlaQuery(
            extra_cache_keys=extra_cache_keys,
            labels_expected=labels_expected,
            sqla_query=qry.select_from(tbl),
            prequeries=prequeries,
        )
예제 #43
0
    def fetch_metadata(self):
        """Fetches the metadata for the table and merges it in"""
        try:
            table = self.get_sqla_table_object()
        except Exception:
            raise Exception(
                "Table doesn't seem to exist in the specified database, "
                "couldn't fetch column information")

        TC = TableColumn  # noqa shortcut to class
        M = SqlMetric  # noqa
        metrics = []
        any_date_col = None
        for col in table.columns:
            try:
                datatype = "{}".format(col.type).upper()
            except Exception as e:
                datatype = "UNKNOWN"
                logging.error(
                    "Unrecognized data type in {}.{}".format(table, col.name))
                logging.exception(e)
            dbcol = (
                db.session
                .query(TC)
                .filter(TC.table == self)
                .filter(TC.column_name == col.name)
                .first()
            )
            db.session.flush()
            if not dbcol:
                dbcol = TableColumn(column_name=col.name, type=datatype)
                dbcol.groupby = dbcol.is_string
                dbcol.filterable = dbcol.is_string
                dbcol.sum = dbcol.is_num
                dbcol.avg = dbcol.is_num
                dbcol.is_dttm = dbcol.is_time

            db.session.merge(self)
            self.columns.append(dbcol)

            if not any_date_col and dbcol.is_time:
                any_date_col = col.name

            quoted = "{}".format(
                column(dbcol.column_name).compile(dialect=db.engine.dialect))
            if dbcol.sum:
                metrics.append(M(
                    metric_name='sum__' + dbcol.column_name,
                    verbose_name='sum__' + dbcol.column_name,
                    metric_type='sum',
                    expression="SUM({})".format(quoted)
                ))
            if dbcol.avg:
                metrics.append(M(
                    metric_name='avg__' + dbcol.column_name,
                    verbose_name='avg__' + dbcol.column_name,
                    metric_type='avg',
                    expression="AVG({})".format(quoted)
                ))
            if dbcol.max:
                metrics.append(M(
                    metric_name='max__' + dbcol.column_name,
                    verbose_name='max__' + dbcol.column_name,
                    metric_type='max',
                    expression="MAX({})".format(quoted)
                ))
            if dbcol.min:
                metrics.append(M(
                    metric_name='min__' + dbcol.column_name,
                    verbose_name='min__' + dbcol.column_name,
                    metric_type='min',
                    expression="MIN({})".format(quoted)
                ))
            if dbcol.count_distinct:
                metrics.append(M(
                    metric_name='count_distinct__' + dbcol.column_name,
                    verbose_name='count_distinct__' + dbcol.column_name,
                    metric_type='count_distinct',
                    expression="COUNT(DISTINCT {})".format(quoted)
                ))
            dbcol.type = datatype
            db.session.merge(self)
            db.session.commit()

        metrics.append(M(
            metric_name='count',
            verbose_name='COUNT(*)',
            metric_type='count',
            expression="COUNT(*)"
        ))
        for metric in metrics:
            m = (
                db.session.query(M)
                .filter(M.metric_name == metric.metric_name)
                .filter(M.table_id == self.id)
                .first()
            )
            metric.table_id = self.id
            if not m:
                db.session.add(metric)
                db.session.commit()
        if not self.main_dttm_col:
            self.main_dttm_col = any_date_col
"""Make cti directory fields format strings

Revision ID: 2bb6543883a6
Revises: 444b39e9aa32

"""

# revision identifiers, used by Alembic.
revision = '2bb6543883a6'
down_revision = '444b39e9aa32'

from alembic import op
from sqlalchemy import sql, and_

fields_table = sql.table('ctidirectoryfields', sql.column('dir_id'),
                         sql.column('fieldname'), sql.column('value'))


def upgrade():
    conn = op.get_bind()
    rows = conn.execute(
        sql.select([
            fields_table.c.dir_id, fields_table.c.fieldname,
            fields_table.c.value
        ]).where(~fields_table.c.value.like('{%}')))
    to_upgrade = {(row.dir_id, row.fieldname): row.value for row in rows}
    for (dir_id, fieldname), value in to_upgrade.items():
        new_value = '{%s}' % '} {'.join(value.split(' '))
        op.execute(fields_table.update().where(
            and_(fields_table.c.dir_id == dir_id,
                 fields_table.c.fieldname == fieldname)).values(
예제 #45
0
 def _table(name):
     return table(name, column('col1'), column('col2'), column('col3'))
예제 #46
0
def upgrade():
    # We repeat the data migration from migration c92f30c03b62, so we're sure
    # there are no NULL values left in the source column.
    connection = op.get_bind()

    # Inline table definition we can use in this migration.
    references = sql.table(
        'references', sql.column('id', sa.Integer()),
        sql.column('accession', sa.String(20)),
        sql.column(
            'source',
            sa.Enum('ncbi',
                    'ncbi_slice',
                    'lrg',
                    'url',
                    'upload',
                    name='reference_source')),
        sql.column('source_data', sa.String(255)),
        sql.column('geninfo_identifier', sa.String(13)),
        sql.column('slice_accession', sa.String(20)),
        sql.column('slice_start', sa.Integer()),
        sql.column('slice_stop', sa.Integer()),
        sql.column('slice_orientation',
                   sa.Enum('forward', 'reverse', name='slice_orientation')),
        sql.column('download_url', sa.String(255)))

    # Get all rows.
    result = connection.execute(references.select().with_only_columns([
        references.c.id, references.c.accession, references.c.source,
        references.c.source_data, references.c.geninfo_identifier,
        references.c.slice_accession, references.c.slice_start,
        references.c.slice_stop, references.c.slice_orientation,
        references.c.download_url
    ]))

    # Generate parameter values for the UPDATE query below.
    def update_params(r):
        data = None
        if r.source:
            source = r.source
            data = r.source_data
        if r.accession.startswith('LRG_'):
            source = 'lrg'
        elif r.slice_accession:
            source = 'ncbi_slice'
            data = '{}:{}:{}:{}'.format(r.slice_accession, r.slice_start,
                                        r.slice_stop, r.slice_orientation)
        elif r.download_url:
            source = 'url'
            data = r.download_url
        elif r.geninfo_identifier:
            source = 'ncbi'
        else:
            source = 'upload'
        return {'r_id': r.id, 'r_source': source, 'r_source_data': data}

    # Process a few rows at a time, since they will be read in memory.
    while True:
        chunk = result.fetchmany(1000)
        if not chunk:
            break

        # Populate `source` and `source_data` based on existing column values.
        statement = references.update().where(
            references.c.id == sql.bindparam('r_id')).values({
                'source':
                sql.bindparam('r_source'),
                'source_data':
                sql.bindparam('r_source_data')
            })

        # Execute UPDATE query for fetched rows.
        connection.execute(statement, [update_params(r) for r in chunk])

    # Unfortunately, SQLite doesn't support adding the NOT NULL constraint on
    # an existing column. We use batch_alter_table to workaround this.
    with op.batch_alter_table('references') as batch_op:
        batch_op.alter_column('source',
                              nullable=False,
                              existing_type=sa.Enum('ncbi',
                                                    'ncbi_slice',
                                                    'lrg',
                                                    'url',
                                                    'upload',
                                                    name='reference_source'))
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Set Port.pxe_enabled to True if NULL

Revision ID: f6fdb920c182
Revises: 5ea1b0d310e
Create Date: 2016-02-12 16:53:21.008580

"""

from alembic import op
from sqlalchemy import Boolean, String
from sqlalchemy.sql import table, column, null

# revision identifiers, used by Alembic.
revision = 'f6fdb920c182'
down_revision = '5ea1b0d310e'

port = table('ports', column('uuid', String(36)),
             column('pxe_enabled', Boolean()))


def upgrade():
    op.execute(port.update().where(port.c.pxe_enabled == null()).values(
        {'pxe_enabled': True}))
예제 #48
0
    def get_query_str(  # sqla
            self, engine, qry_start_dttm,
            groupby, metrics,
            granularity,
            from_dttm, to_dttm,
            filter=None,  # noqa
            is_timeseries=True,
            timeseries_limit=15,
            timeseries_limit_metric=None,
            row_limit=None,
            inner_from_dttm=None,
            inner_to_dttm=None,
            orderby=None,
            extras=None,
            columns=None):
        """Querying any sqla table from this common interface"""

        template_kwargs = {
            'from_dttm': from_dttm,
            'groupby': groupby,
            'metrics': metrics,
            'row_limit': row_limit,
            'to_dttm': to_dttm,
        }
        template_processor = self.get_template_processor(**template_kwargs)

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        cols = {col.column_name: col for col in self.columns}
        metrics_dict = {m.metric_name: m for m in self.metrics}

        if not granularity and is_timeseries:
            raise Exception(_(
                "Datetime column not provided as part table configuration "
                "and is required by this type of chart"))
        for m in metrics:
            if m not in metrics_dict:
                raise Exception(_("Metric '{}' is not valid".format(m)))
        metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
        timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
        timeseries_limit_metric_expr = None
        if timeseries_limit_metric:
            timeseries_limit_metric_expr = \
                timeseries_limit_metric.sqla_col
        if metrics:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr = literal_column("COUNT(*)").label("ccount")

        select_exprs = []
        groupby_exprs = []

        if groupby:
            select_exprs = []
            inner_select_exprs = []
            inner_groupby_exprs = []
            for s in groupby:
                col = cols[s]
                outer = col.sqla_col
                inner = col.sqla_col.label(col.column_name + '__')

                groupby_exprs.append(outer)
                select_exprs.append(outer)
                inner_groupby_exprs.append(inner)
                inner_select_exprs.append(inner)
        elif columns:
            for s in columns:
                select_exprs.append(cols[s].sqla_col)
            metrics_exprs = []

        if granularity:
            @compiles(ColumnClause)
            def visit_column(element, compiler, **kw):
                """Patch for sqlalchemy bug

                TODO: sqlalchemy 1.2 release should be doing this on its own.
                Patch only if the column clause is specific for DateTime
                set and granularity is selected.
                """
                text = compiler.visit_column(element, **kw)
                try:
                    if (
                            element.is_literal and
                            hasattr(element.type, 'python_type') and
                            type(element.type) is DateTime
                    ):
                        text = text.replace('%%', '%')
                except NotImplementedError:
                    # Some elements raise NotImplementedError for python_type
                    pass
                return text

            dttm_col = cols[granularity]
            time_grain = extras.get('time_grain_sqla')

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs += [timestamp]

            time_filter = dttm_col.get_time_filter(from_dttm, to_dttm)

        select_exprs += metrics_exprs
        qry = sa.select(select_exprs)

        tbl = table(self.table_name)
        if self.schema:
            tbl.schema = self.schema

        # Supporting arbitrary SQL statements in place of tables
        if self.sql:
            from_sql = template_processor.process_template(self.sql)
            tbl = TextAsFrom(sa.text(from_sql), []).alias('expr_qry')

        if not columns:
            qry = qry.group_by(*groupby_exprs)

        where_clause_and = []
        having_clause_and = []
        for flt in filter:
            if not all([flt.get(s) for s in ['col', 'op', 'val']]):
                continue
            col = flt['col']
            op = flt['op']
            eq = flt['val']
            col_obj = cols.get(col)
            if col_obj:
                if op in ('in', 'not in'):
                    values = [types.strip("'").strip('"') for types in eq]
                    if col_obj.is_num:
                        values = [utils.js_string_to_num(s) for s in values]
                    cond = col_obj.sqla_col.in_(values)
                    if op == 'not in':
                        cond = ~cond
                    where_clause_and.append(cond)
                elif op == '==':
                    where_clause_and.append(col_obj.sqla_col == eq)
                elif op == '!=':
                    where_clause_and.append(col_obj.sqla_col != eq)
                elif op == '>':
                    where_clause_and.append(col_obj.sqla_col > eq)
                elif op == '<':
                    where_clause_and.append(col_obj.sqla_col < eq)
                elif op == '>=':
                    where_clause_and.append(col_obj.sqla_col >= eq)
                elif op == '<=':
                    where_clause_and.append(col_obj.sqla_col <= eq)
                elif op == 'LIKE':
                    where_clause_and.append(
                        col_obj.sqla_col.like(eq.replace('%', '%%')))
        if extras:
            where = extras.get('where')
            if where:
                where_clause_and += [wrap_clause_in_parens(
                    template_processor.process_template(where))]
            having = extras.get('having')
            if having:
                having_clause_and += [wrap_clause_in_parens(
                    template_processor.process_template(having))]
        if granularity:
            qry = qry.where(and_(*([time_filter] + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))
        if groupby:
            qry = qry.order_by(desc(main_metric_expr))
        elif orderby:
            for col, ascending in orderby:
                direction = asc if ascending else desc
                qry = qry.order_by(direction(col))

        qry = qry.limit(row_limit)

        if is_timeseries and timeseries_limit and groupby:
            # some sql dialects require for order by expressions
            # to also be in the select clause -- others, e.g. vertica,
            # require a unique inner alias
            inner_main_metric_expr = main_metric_expr.label('mme_inner__')
            inner_select_exprs += [inner_main_metric_expr]
            subq = select(inner_select_exprs)
            subq = subq.select_from(tbl)
            inner_time_filter = dttm_col.get_time_filter(
                inner_from_dttm or from_dttm,
                inner_to_dttm or to_dttm,
            )
            subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
            subq = subq.group_by(*inner_groupby_exprs)
            ob = inner_main_metric_expr
            if timeseries_limit_metric_expr is not None:
                ob = timeseries_limit_metric_expr
            subq = subq.order_by(desc(ob))
            subq = subq.limit(timeseries_limit)
            on_clause = []
            for i, gb in enumerate(groupby):
                on_clause.append(
                    groupby_exprs[i] == column(gb + '__'))

            tbl = tbl.join(subq.alias(), and_(*on_clause))

        qry = qry.select_from(tbl)

        sql = "{}".format(
            qry.compile(
                engine, compile_kwargs={"literal_binds": True},),
        )
        logging.info(sql)
        sql = sqlparse.format(sql, reindent=True)
        return sql
예제 #49
0
 def mute_cond(muted):
     stream_cond = column("recipient_id") == recipient_map[
         muted[0].lower()]
     topic_cond = func.upper(column("subject")) == func.upper(
         muted[1])
     return and_(stream_cond, topic_cond)
def _add_version_column(table_name):
    op.add_column(table_name, sa.Column('version', sa.Integer))
    table = sql.table(table_name, sql.column('version'))
    op.execute(table.update().values(version=1))
    op.alter_column(table_name, 'version', nullable=False)
def _insert_operation_form_field():
    tb = table(
        'operation_form_field',
        column('id', Integer),
        column('name', String),
        column('type', String),
        column('required', Integer),
        column('order', Integer),
        column('default', Text),
        column('suggested_widget', String),
        column('values_url', String),
        column('values', String),
        column('scope', String),
        column('form_id', Integer),
        column('enable_conditions', String),
    )

    columns = ('id', 'name', 'type', 'required', 'order', 'default',
               'suggested_widget', 'values_url', 'values', 'scope', 'form_id',
               'enable_conditions')

    enabled_condition = 'this.loss.internalValue === "huber" || this.loss.internalValue === "quantile"'
    enabled_condition2 = 'this.n_iter_no_change.internalValue !== "0"'

    data = [
        #Flatten - data_format
        (4357, 'features', 'TEXT', 1, 1, None, 'attribute-selector', None,
         None, 'EXECUTION', 4006, None),
        (4358, 'label', 'TEXT', 1, 2, None, 'attribute-selector', None, None,
         'EXECUTION', 4006, None),
        (4359, 'prediction', 'TEXT', 0, 3, 'prediction', 'text', None, None,
         'EXECUTION', 4006, None),
        (4360, 'validation_fraction', 'FLOAT', 0, 6, 0.1, 'decimal', None,
         None, 'EXECUTION', 4006, enabled_condition2),
        (4361, 'max_depth', 'INTEGER', 0, 8, 3, 'integer', None, None,
         'EXECUTION', 4006, None),
        (4362, 'n_iter_no_change', 'INTEGER', 0, 5, None, 'integer', None,
         None, 'EXECUTION', 4006, None),
        (4363, 'tol', 'DECIMAL', 0, 12, 1e-4, 'decimal', None, None,
         'EXECUTION', 4006, None),
        (4364, 'criterion', 'TEXT', 0, 13, 'friedman_mse', 'dropdown', None,
         json.dumps([
             {
                 "key": "friedman_mse",
                 "value":
                 "Mean squared error with improvement score by Friedman"
             },
             {
                 "key": "mse",
                 "value": "Mean squared error"
             },
             {
                 "key": "mae",
                 "value": "Mean absolute error"
             },
         ]), 'EXECUTION', 4006, None),
        (4365, 'loss', 'TEXT', 0, 13, 'ls', 'dropdown', None,
         json.dumps([
             {
                 "key": "ls",
                 "value": "Least squares regression"
             },
             {
                 "key": "lad",
                 "value": "Least absolute deviation"
             },
             {
                 "key": "huber",
                 "value": "Combination of the two above"
             },
             {
                 "key": "quantile",
                 "value": "Quantile regression"
             },
         ]), 'EXECUTION', 4006, None),
        (4366, 'subsample', 'DECIMAL', 0, 14, 1.0, 'decimal', None, None,
         'EXECUTION', 4006, None),
        (4367, 'alpha', 'DECIMAL', 0, 15, 0.9, 'decimal', None, None,
         'EXECUTION', 4006, enabled_condition),
        (4368, 'min_weight_fraction_leaf', 'DECIMAL', 0, 16, 0, 'decimal',
         None, None, 'EXECUTION', 4006, None),
        (4369, 'max_leaf_nodes', 'INTEGER', 0, 17, None, 'integer', None, None,
         'EXECUTION', 4006, None),
        (4370, 'min_impurity_decrease', 'DECIMAL', 0, 18, 0, 'decimal', None,
         None, 'EXECUTION', 4006, None),
        (4371, 'max_features', 'TEXT', 0, 22, None, 'dropdown', None,
         json.dumps([
             {
                 "key": "auto",
                 "value": "auto"
             },
             {
                 "key": "sqrt",
                 "value": "sqrt"
             },
             {
                 "key": "log2",
                 "value": "log2"
             },
         ]), 'EXECUTION', 4006, None),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
예제 #52
0
def get_old_messages_backend(request,
                             user_profile,
                             anchor=REQ(converter=int),
                             num_before=REQ(converter=to_non_negative_int),
                             num_after=REQ(converter=to_non_negative_int),
                             narrow=REQ('narrow',
                                        converter=narrow_parameter,
                                        default=None),
                             use_first_unread_anchor=REQ(
                                 default=False, converter=ujson.loads),
                             apply_markdown=REQ(default=True,
                                                converter=ujson.loads)):
    include_history = ok_to_include_history(narrow, user_profile.realm)

    if include_history and not use_first_unread_anchor:
        query = select([column("id").label("message_id")], None,
                       "zerver_message")
        inner_msg_id_col = literal_column("zerver_message.id")
    elif narrow is None:
        query = select(
            [column("message_id"), column("flags")],
            column("user_profile_id") == literal(user_profile.id),
            "zerver_usermessage")
        inner_msg_id_col = column("message_id")
    else:
        # TODO: Don't do this join if we're not doing a search
        query = select(
            [column("message_id"), column("flags")],
            column("user_profile_id") == literal(user_profile.id),
            join(
                "zerver_usermessage", "zerver_message",
                literal_column("zerver_usermessage.message_id") ==
                literal_column("zerver_message.id")))
        inner_msg_id_col = column("message_id")

    num_extra_messages = 1
    is_search = False

    if narrow is not None:
        # Add some metadata to our logging data for narrows
        verbose_operators = []
        for term in narrow:
            if term['operator'] == "is":
                verbose_operators.append("is:" + term['operand'])
            else:
                verbose_operators.append(term['operator'])
        request._log_data['extra'] = "[%s]" % (",".join(verbose_operators), )

        # Build the query for the narrow
        num_extra_messages = 0
        builder = NarrowBuilder(user_profile, inner_msg_id_col)
        for term in narrow:
            if term['operator'] == 'search' and not is_search:
                query = query.column("subject").column("rendered_content")
                is_search = True
            query = builder.add_term(query, term)

    # We add 1 to the number of messages requested if no narrow was
    # specified to ensure that the resulting list always contains the
    # anchor message.  If a narrow was specified, the anchor message
    # might not match the narrow anyway.
    if num_after != 0:
        num_after += num_extra_messages
    else:
        num_before += num_extra_messages

    sa_conn = get_sqlalchemy_connection()
    if use_first_unread_anchor:
        condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0

        # We exclude messages on muted topics when finding the first unread
        # message in this narrow
        muting_conditions = exclude_muting_conditions(user_profile, narrow)
        if muting_conditions:
            condition = and_(condition, *muting_conditions)

        first_unread_query = query.where(condition)
        first_unread_query = first_unread_query.order_by(
            inner_msg_id_col.asc()).limit(1)
        first_unread_result = list(
            sa_conn.execute(first_unread_query).fetchall())
        if len(first_unread_result) > 0:
            anchor = first_unread_result[0][0]
        else:
            anchor = 10000000000000000

    before_query = None
    after_query = None
    if num_before != 0:
        before_anchor = anchor
        if num_after != 0:
            # Don't include the anchor in both the before query and the after query
            before_anchor = anchor - 1
        before_query = query.where(inner_msg_id_col <= before_anchor) \
                            .order_by(inner_msg_id_col.desc()).limit(num_before)
    if num_after != 0:
        after_query = query.where(inner_msg_id_col >= anchor) \
                           .order_by(inner_msg_id_col.asc()).limit(num_after)

    if num_before == 0 and num_after == 0:
        # This can happen when a narrow is specified.
        after_query = query.where(inner_msg_id_col == anchor)

    if before_query is not None:
        if after_query is not None:
            query = union_all(before_query.self_group(),
                              after_query.self_group())
        else:
            query = before_query
    else:
        query = after_query
    main_query = alias(query)
    query = select(main_query.c, None,
                   main_query).order_by(column("message_id").asc())
    # This is a hack to tag the query we use for testing
    query = query.prefix_with("/* get_old_messages */")
    query_result = list(sa_conn.execute(query).fetchall())

    # The following is a little messy, but ensures that the code paths
    # are similar regardless of the value of include_history.  The
    # 'user_messages' dictionary maps each message to the user's
    # UserMessage object for that message, which we will attach to the
    # rendered message dict before returning it.  We attempt to
    # bulk-fetch rendered message dicts from remote cache using the
    # 'messages' list.
    search_fields = dict()  # type: Dict[int, Dict[str, text_type]]
    message_ids = []  # type: List[int]
    user_message_flags = {}  # type: Dict[int, List[str]]
    if include_history:
        message_ids = [row[0] for row in query_result]

        # TODO: This could be done with an outer join instead of two queries
        user_message_flags = dict(
            (user_message.message_id, user_message.flags_list())
            for user_message in UserMessage.objects.filter(
                user_profile=user_profile, message__id__in=message_ids))
        for row in query_result:
            message_id = row[0]
            if user_message_flags.get(message_id) is None:
                user_message_flags[message_id] = ["read", "historical"]
            if is_search:
                (_, subject, rendered_content, content_matches,
                 subject_matches) = row
                search_fields[message_id] = get_search_fields(
                    rendered_content, subject, content_matches,
                    subject_matches)
    else:
        for row in query_result:
            message_id = row[0]
            flags = row[1]
            user_message_flags[message_id] = parse_usermessage_flags(flags)

            message_ids.append(message_id)

            if is_search:
                (_, _, subject, rendered_content, content_matches,
                 subject_matches) = row
                search_fields[message_id] = get_search_fields(
                    rendered_content, subject, content_matches,
                    subject_matches)

    cache_transformer = lambda row: Message.build_dict_from_raw_db_row(
        row, apply_markdown)
    id_fetcher = lambda row: row['id']

    message_dicts = generic_bulk_cached_fetch(
        lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
        Message.get_raw_db_rows,
        message_ids,
        id_fetcher=id_fetcher,
        cache_transformer=cache_transformer,
        extractor=extract_message_dict,
        setter=stringify_message_dict)

    message_list = []
    for message_id in message_ids:
        msg_dict = message_dicts[message_id]
        msg_dict.update({"flags": user_message_flags[message_id]})
        msg_dict.update(search_fields.get(message_id, {}))
        message_list.append(msg_dict)

    statsd.incr('loaded_old_messages', len(message_list))
    ret = {'messages': message_list, "result": "success", "msg": ""}
    return json_success(ret)
def upgrade():
    corp_type_table = table("corp_type", column("code", String),
                            column("description", String),
                            column("service_fee_code", String),
                            column("bcol_fee_code", String),
                            column("gl_memo", String),
                            column("service_gl_memo", String))

    filing_type_table = table("filing_type", column("code", String),
                              column("description", String))
    fee_schedule_table = table("fee_schedule",
                               column("filing_type_code", String),
                               column("corp_type_code", String),
                               column("fee_code", String),
                               column("fee_start_date", Date),
                               column("fee_end_date", Date),
                               column("future_effective_fee_code", String),
                               column("priority_fee_code", String))

    # Corp Type
    op.bulk_insert(corp_type_table,
                   [{
                       "code": "NRO",
                       "description": "Name Requests Online",
                       "service_fee_code": "TRF01",
                       "bcol_fee_code": None,
                       "gl_memo": "Benefit Companies",
                       "service_gl_memo": "SBC Modernization Service Charge"
                   }])

    # Filing Types
    op.bulk_insert(filing_type_table, [{
        'code': 'NM620',
        'description': 'Reg. Submission Online'
    }, {
        'code': 'NM606',
        'description': 'Upgrade to Priority'
    }])

    # Fee Schedule
    op.bulk_insert(fee_schedule_table, [{
        "filing_type_code": "NM620",
        "corp_type_code": "NRO",
        "fee_code": "EN103",
        "fee_start_date": date.today(),
        "fee_end_date": None,
        "future_effective_fee_code": None,
        "priority_fee_code": "PRI01"
    }, {
        "filing_type_code": "NM606",
        "corp_type_code": "NRO",
        "fee_code": "EN105",
        "fee_start_date": date.today(),
        "fee_end_date": None,
        "future_effective_fee_code": None,
        "priority_fee_code": None
    }])
예제 #54
0
 def by_has(self, query, operand, maybe_negate):
     if operand not in ['attachment', 'image', 'link']:
         raise BadNarrowOperator("unknown 'has' operand " + operand)
     col_name = 'has_' + operand
     cond = column(col_name)
     return query.where(maybe_negate(cond))

def long2ip(ip_int):
    '''Converts a saved integer/long back into an IP address'''
    return str(netaddr.IPAddress(ip_int))


# revision identifiers, used by Alembic.
revision = '1ec4a28fe0ff'
down_revision = 'c7225db614c1'
branch_labels = None
depends_on = None

solves_table = table(
    'solves',
    column('id', db.Integer),
    column('ip', db.Integer),
)

tracking_table = table(
    'tracking',
    column('id', db.Integer),
    column('ip', db.String(46)),
    column('team', db.Integer),
    column('date', db.DateTime),
)


def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    connection = op.get_bind()
def _insert_operation_form_field_translation():
    tb = table(
        'operation_form_field_translation',
        column('id', Integer),
        column('locale', String),
        column('label', String),
        column('help', String),
    )

    columns = ('id', 'locale', 'label', 'help')
    data = [
        #Flatten - data_format
        (4357, 'en', 'Features', 'Features.'),
        (4357, 'pt', 'Atributo(s) previsor(es)', 'Atributo(s) previsor(es).'),
        (4358, 'en', 'Label attribute', 'Label attribute.'),
        (4358, 'pt', 'Atributo com o rótulo', 'Atributo com o rótulo.'),
        (4359, 'en', 'Prediction attribute (new)',
         'Prediction attribute (new).'),
        (4359, 'pt', 'Atributo com a predição (novo)',
         'Atributo usado para predição (novo).'),
        (4360, 'en', 'Validation fraction',
         'The proportion of training data to set aside as validation set for early'
         ' stopping.'),
        (4360, 'pt', 'Fração de validação',
         'A proporção de dados de treinamento a serem retirados como validação'
         ' definida para parada antecipada.'),
        (4361, 'en', 'Maximum depth',
         'Maximum depth of the individual regression estimators. The maximum depth limits'
         ' the number of nodes in the tree. Tune this parameter for best performance; the'
         ' best value depends on the interaction of the input variables.'),
        (4361, 'pt', 'Profundidade máxima', 'Profundidade máxima na árvore.'),
        (4362, 'en', 'Early stopping',
         'Used to decide if early stopping will be used to terminate training when'
         ' validation score is not improving.'),
        (4362, 'pt', 'Parada antecipada',
         'Usada para decidir se a parada antecipada vai ser usada para terminar treino'
         ' quando a pontuação de validação não está melhorando.'),
        (4363, 'en', 'Tolerance', 'Tolerance for the early stopping.'),
        (4363, 'pt', 'Tolerância', 'Tolerância para a parada antecipada.'),
        (4364, 'en', 'Criterion',
         'The function to measure the quality of a split..'),
        (4364, 'pt', 'Critério',
         'A função para medir a qualidade de um split..'),
        (4365, 'en', 'Loss', 'Loss function to be optimized.'),
        (4365, 'pt', 'Perda', 'Função de perda a ser otimizada.'),
        (4366, 'en', 'Subsample',
         'The fraction of samples to be used for fitting the individual base learners.'
         ),
        (4366, 'pt', 'Subamostra',
         'A fração de amostras para serem usadas para fazer o fitting em learners de base'
         ' individual.'),
        (4367, 'en', 'Alpha',
         'The alpha-quantile of the huber loss function and the quantiles loss function.'
         ),
        (4367, 'pt', 'Alfa',
         'O alfa-quantil da função huber loss e a função de perda de quantis.'
         ),
        (4368, 'en', 'Min. weight fraction leaf',
         'The minimum weighted fraction of the sum total of weights (of all'
         ' the input samples) required to be at a leaf node..'),
        (4368, 'pt', 'Fração ponderada mínima',
         'A fração ponderada mínima da soma total de pesos (de todas as amostras'
         ' de entrada) necessária para estar em um nó folha.'),
        (4369, 'en', 'Max. leaf nodes',
         'Grow trees with max_leaf_nodes in best-first fashion.'),
        (4369, 'pt', 'Max. nós folha',
         'Cresça árvores com max_leaf_nodes da melhor maneira possível.'),
        (4370, 'en', 'Min. impurity decrease',
         'A node will be split if this split induces a decrease of the impurity'
         ' greater than or equal to this value.'),
        (4370, 'pt', 'Redução mínima da impureza',
         'Um nó será dividido se essa divisão induzir uma redução da impureza'
         ' maior ou igual a esse valor.'),
        (4371, 'en', 'Max. features',
         'The number of features to consider when looking for the best split.'
         ),
        (4371, 'pt', 'Número máximo de atributos',
         'Número de atributos a serem considerados ao procurar a melhor '
         'divisão.'),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
"""

from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import column, table
import sqlalchemy as sa

# revision identifiers, used by Alembic.
revision = '41a4531be082'
down_revision = 'e8665a81606d'
branch_labels = None
depends_on = None

account_name = table(
    'account_name',
    column('id', postgresql.UUID()),
    column('created_at', sa.TIMESTAMP(timezone=True)),
    column('updated_at', sa.TIMESTAMP(timezone=True)),
    column('name', sa.Unicode(63)),
    column('user_id', sa.Integer),
    column('organization_id', sa.Integer),
    column('reserved', sa.Boolean),
)

profile = table(
    'profile',
    column('uuid', postgresql.UUID()),
    column('created_at', sa.TIMESTAMP(timezone=True)),
    column('updated_at', sa.TIMESTAMP(timezone=True)),
    column('name', sa.Unicode(63)),
    column('user_id', sa.Integer),
import sqlalchemy as sa

from progressbar import ProgressBar
import progressbar.widgets

from coaster.utils import buid2uuid, uuid2buid

# revision identifiers, used by Alembic.
revision = '87fc422c81f9'
down_revision = '039d2745e628'
branch_labels = None
depends_on = None

user_session = table(
    'user_session',
    column('id', sa.Integer()),
    column('buid', sa.Unicode(22)),
    column('uuid', UUIDType(binary=False)),
)


def get_progressbar(label, maxval):
    return ProgressBar(
        maxval=maxval,
        widgets=[
            label,
            ': ',
            progressbar.widgets.Percentage(),
            ' ',
            progressbar.widgets.Bar(),
            ' ',
def upgrade():
    # Create Lots table and Lot to Framework relationship
    op.create_table(
        'lots', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('slug', sa.String(), nullable=False),
        sa.Column('name', sa.String(), nullable=False),
        sa.Column('one_service_limit', sa.Boolean(), nullable=False),
        sa.PrimaryKeyConstraint('id'))

    op.create_index(op.f('ix_lots_slug'), 'lots', ['slug'], unique=False)

    op.create_table(
        'framework_lots',
        sa.Column('framework_id', sa.Integer(), nullable=False),
        sa.Column('lot_id', sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ['framework_id'],
            ['frameworks.id'],
        ), sa.ForeignKeyConstraint(
            ['lot_id'],
            ['lots.id'],
        ))

    # Insert G-Cloud lot records
    lot_table = table('lots', column('name', sa.String),
                      column('slug', sa.String),
                      column('one_service_limit', sa.Boolean))

    op.bulk_insert(lot_table, [
        {
            'name': 'Software as a Service',
            'slug': 'saas',
            'one_service_limit': False
        },
        {
            'name': 'Platform as a Service',
            'slug': 'paas',
            'one_service_limit': False
        },
        {
            'name': 'Infrastructure as a Service',
            'slug': 'iaas',
            'one_service_limit': False
        },
        {
            'name': 'Specialist Cloud Services',
            'slug': 'scs',
            'one_service_limit': False
        },
    ])

    framework_lots_table = table('framework_lots',
                                 column('framework_id', sa.Integer),
                                 column('lot_id', sa.Integer))

    # Add 4 lots (ids 1-4) to all G-Cloud frameworks (ids 1-4)
    op.bulk_insert(framework_lots_table, [{
        'framework_id': framework_id,
        'lot_id': lot_id
    } for framework_id, lot_id in itertools.product(range(1, 5), range(1, 5))])

    op.add_column(u'archived_services',
                  sa.Column('lot_id', sa.BigInteger(), nullable=True))
    op.create_index(op.f('ix_archived_services_lot_id'),
                    'archived_services', ['lot_id'],
                    unique=False)
    op.create_foreign_key(None, 'archived_services', 'lots', ['lot_id'],
                          ['id'])

    op.add_column(u'draft_services',
                  sa.Column('lot_id', sa.BigInteger(), nullable=True))
    op.create_index(op.f('ix_draft_services_lot_id'),
                    'draft_services', ['lot_id'],
                    unique=False)
    op.create_foreign_key(None, 'draft_services', 'lots', ['lot_id'], ['id'])

    op.add_column(u'services',
                  sa.Column('lot_id', sa.BigInteger(), nullable=True))
    op.create_index(op.f('ix_services_lot_id'),
                    'services', ['lot_id'],
                    unique=False)
    op.create_foreign_key(None, 'services', 'lots', ['lot_id'], ['id'])
예제 #60
0
    def query(self):
        query = powa_getstatdata_sample("query")
        query = query.where((column("datname") == bindparam("database"))
                            & (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((c.shared_blks_read +
                         c.shared_blks_hit).label("total_blocks"))

        def bps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (mulblock(col) / ts).label(col.name)

        cols = [
            to_epoch(c.ts), c.rows, c.calls,
            case([(total_blocks == 0, 0)],
                 else_=cast(c.shared_blks_hit, Numeric) * 100 /
                 total_blocks).label("hit_ratio"),
            bps(c.shared_blks_read),
            bps(c.shared_blks_hit),
            bps(c.shared_blks_dirtied),
            bps(c.shared_blks_written),
            bps(c.local_blks_read),
            bps(c.local_blks_hit),
            bps(c.local_blks_dirtied),
            bps(c.local_blks_written),
            bps(c.temp_blks_read),
            bps(c.temp_blks_written), c.blk_read_time, c.blk_write_time,
            (c.runtime / greatest(c.calls, 1)).label("avg_runtime")
        ]

        from_clause = query
        if self.has_extension("pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample()
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                kc.queryid == bindparam("query")).alias())
            kc = kcache_query.c
            sys_hits = (greatest(mulblock(c.shared_blks_read) - kc.reads,
                                 0).label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (kc.reads / mulblock(total_blocks))
            total_time = greatest(c.runtime, 1)
            cols.extend([
                kc.reads, kc.writes,
                ((kc.user_time * 1000 * 100) / total_time).label("user_time"),
                ((kc.system_time * 1000 * 100) /
                 total_time).label("system_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")
            ])
            from_clause = from_clause.join(kcache_query,
                                           kcache_query.c.ts == c.ts)
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_read, Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols).select_from(from_clause).where(
            c.calls != None).order_by(c.ts).params(samples=100))