示例#1
0
def reset_sequence_query(table, column):
    if isinstance(table, sa.Table) and isinstance(column, sa.Column):
        return sa.select([
            sa.func.setval(
                sa.func.pg_get_serial_sequence(table.key, column.key),
                sa.select([sa.func.coalesce(sa.func.max(column), 0) + 1
                           ]).as_scalar(), sa.false())
        ])
示例#2
0
文件: db.py 项目: vadimber18/ksg
async def get_pure_recipe_list(conn, limit, offset, where_list, usr, many,
                               favored):
    ''' selects related source and category for recipe '''
    ''' gets number of value=True votes to likes field '''
    ''' gets liked=True if user liked for recipe '''

    # we fetch only liked recipes if favored is true otherwise full list
    fetch_all_recipes = False if favored else True

    if usr:
        alias1 = vote.alias('alias1')
        alias2 = vote.alias('alias2')
        query = sa.select([recipe, source, category, alias2.c.value.label('liked'), sa.func.count(alias1.c.recipe_id).label('likes')], use_labels=True).\
            select_from(
                recipe.join(source, source.c.id == recipe.c.source_id)
                .join(category, category.c.id == recipe.c.category_id)
                .join(
                    alias1,
                    sa.and_(alias1.c.recipe_id == recipe.c.id, alias1.c.value == True),
                    isouter=fetch_all_recipes)
                .join(
                    alias2,
                    sa.and_(alias2.c.recipe_id == recipe.c.id, alias2.c.value == True, alias2.c.user_id == usr['id']),
                    isouter=fetch_all_recipes)
            ).group_by(recipe.c.id, source.c.id, category.c.id, alias2.c.value)
    else:
        query = sa.select([recipe, source, category, sa.func.count(vote.c.recipe_id).label('likes')], use_labels=True).\
            select_from(
                recipe.join(source, source.c.id == recipe.c.source_id)
                .join(category, category.c.id == recipe.c.category_id)
                .join(vote, sa.and_(vote.c.recipe_id == recipe.c.id, vote.c.value == True), isouter=True)
            ).group_by(recipe.c.id, source.c.id, category.c.id)

    for where in where_list:
        query = query.where(where)

    if not many:
        cursor = await conn.execute(query)
        recipe_record = await cursor.fetchone()
        if not recipe_record:
            raise RecordNotFound('No recipe with such id')
        rec = dict(recipe_record)
        return [rec]

    query = query.limit(limit).offset(offset)

    cursor = await conn.execute(query)
    recipe_records = await cursor.fetchall()
    recipes = [dict(q) for q in recipe_records]
    return recipes
示例#3
0
文件: db.py 项目: vadimber18/ksg
async def get_recipe_list_count(conn, where_list, favored=False, usr=None):
    if favored:
        query = sa.select([sa.func.count()]).\
            select_from(
            recipe.join(vote, sa.and_(
                            vote.c.recipe_id == recipe.c.id,
                            vote.c.value == True,
                            vote.c.user_id == usr['id'])))
    else:
        query = sa.select([sa.func.count()]).select_from(recipe)
    for where in where_list:
        query = query.where(where)
    cursor = await conn.execute(query)
    count_record = await cursor.fetchone()
    return count_record[0]
示例#4
0
async def get_file_list_count(conn, where_list):
    query = sa.select([sa.func.count()]).select_from(file)
    for where in where_list:
        query = query.where(where)
    cursor = await conn.execute(query)
    count_record = await cursor.fetchone()
    return count_record[0]
示例#5
0
async def get_pastes(conn):
    query = sa.select([paste.c.id, paste.c.created_on])
    result = await conn.execute(query)
    pastes = await result.fetchall()
    if not pastes:
        raise PasteNotFound("Paste Not Found")
    return [Paste(r) for r in pastes]
示例#6
0
async def account(request, account_id):
    accountroles_table = metadata().tables['AccountRoles']
    async with request.app['engine'].acquire() as conn:
        result_proxy = await conn.execute(
            sa.select([accountroles_table
                       ]).where(accountroles_table.c.account_id == account_id))
        return await result_proxy.fetchone()
示例#7
0
async def get_pure_file_list(conn, limit, offset, where_list, many):
    ''' selects related user for file '''

    query = sa.select([file, users.c.username, users.c.email], use_labels=True).\
        select_from(
            file.join(users, users.c.id == file.c.user_id)
        ).group_by(file.c.id, users.c.id)


    for where in where_list:
        query = query.where(where)

    if not many:
        cursor = await conn.execute(query)
        file_record = await cursor.fetchone()
        if not file_record:
            raise RecordNotFound('No file with such id')
        rec = dict(file_record)
        return [rec]

    query = query.limit(limit).offset(offset)

    cursor = await conn.execute(query)
    file_records = await cursor.fetchall()
    files = [dict(q) for q in file_records]


    return files
示例#8
0
async def get_paste(conn, paste_id):
    query = sa.select([paste]).where(paste.c.id == paste_id)
    result = await conn.execute(query)
    p = await result.first()
    comments = await get_comments(conn, paste_id)
    if not p:
        raise PasteNotFound("Paste Not Found")
    return Paste(p, comments)
示例#9
0
    async def put_file(self, resource_path, row_id, subpath, body, revision,
                       content_type):
        resource_type = self._get_resource_type(resource_path)
        table = self._get_table(resource_path)
        files_table = self.files_tables[resource_type]

        new_revision = get_new_id(resource_type)
        old_revision = revision

        data = {
            'content-type': content_type,
        }

        async with self.pool.acquire() as conn:
            async with conn.begin():
                result = await conn.execute(
                    table.update().where(table.c.id == row_id).where(
                        table.c.revision == old_revision).values({
                            'revision':
                            new_revision,
                            'data_' + subpath:
                            data,
                        }))

                if result.rowcount == 1:
                    data = {
                        'id': row_id,
                        'subpath': subpath,
                        'blob': body,
                    }
                    await conn.execute(
                        insert(files_table).values(data).on_conflict_do_update(
                            constraint=self._get_file_unique_idx_name(
                                resource_type),
                            set_=data))

                elif result.rowcount == 0:
                    result = await conn.execute(
                        sa.select([table.c.revision
                                   ]).where(table.c.id == row_id))
                    row = await result.first()
                    if row is None:
                        raise ResourceNotFound("Resource %s not found." %
                                               row_id)
                    else:
                        raise WrongRevision(
                            "Expected revision is %s, got %s." %
                            (row.revision, old_revision),
                            current=row.revision,
                            update=old_revision)

                else:
                    raise UnexpectedError((
                        "Update query returned %r rowcount, expected values are 0 or 1. Don't know how to handle that."
                    ) % result.rowcount)

        return {'id': row_id, 'revision': new_revision}
示例#10
0
文件: db.py 项目: vadimber18/ksg
async def get_pure_comment_list(conn, recipes):
    recipe_ids = [recipe['recipe_id'] for recipe in recipes]
    query = sa.select([comment.c.recipe_id, comment.c.body, comment.c.pub_date, users.c.username]).\
        select_from(
        comment.join(users, users.c.id == comment.c.user_id)
    ).where(comment.c.recipe_id.in_(recipe_ids))
    cursor = await conn.execute(query)
    comment_records = await cursor.fetchall()
    comments = [dict(q) for q in comment_records]
    return comments
示例#11
0
async def accounts(request, role_ids=None):
    accountroles = metadata().tables['AccountRoles']
    statement = sa.select([accountroles])
    if role_ids is not None:
        statement = statement.where(
            accountroles.c.role_ids.contains(
                sa.cast(role_ids, postgresql.ARRAY(sa.String(32)))))
    async with request.app['engine'].acquire() as conn:
        async for row in conn.execute(statement):
            yield row
示例#12
0
async def get_accumulated_error(date: datetime.date) -> Optional[Decimal]:
    async with connection() as cur:
        row = await anext(
            cur.execute(
                sa.select([trade_date.c.accumulated_error
                           ]).select_from(trade_date).where(
                               (trade_date.c.accumulated_error != None)
                               & (trade_date.c.date <= date)).order_by(
                                   sa.desc(trade_date.c.date)).limit(1)), None)

        return row and row.accumulated_error
示例#13
0
 async def get_subpath(self, resource_path, row_id, subpath):
     table = self._get_table(resource_path)
     async with self.pool.acquire() as conn:
         result = await conn.execute(
             sa.select([
                 table.c.revision,
                 table.c['data_' + subpath],
             ]).where(table.c.id == row_id))
         row = await result.first()
     if row:
         return dict(row['data_' + subpath], revision=row.revision)
     else:
         raise ResourceNotFound("Resource %s not found." % row_id)
示例#14
0
async def get_bcse_in(
        currency: str,
        start_dt: datetime.datetime,
        end_dt: datetime.datetime = None) -> Iterable[Tuple[int, Decimal]]:
    end_dt = end_dt or datetime.datetime(2035, 1, 1)

    start_dt = start_dt.timestamp()
    end_dt = end_dt.timestamp()

    async with connection() as cur:
        return [
            x.as_tuple() async for x in cur.execute(
                sa.select([bcse.c.timestamp, bcse.c.rate]).select_from(
                    bcse).where((bcse.c.currency == currency)
                                & (bcse.c.timestamp >= start_dt)
                                & (bcse.c.timestamp < end_dt)))
        ]
示例#15
0
    async def put_subpath(self, resource_path, row_id, subpath, data):
        table = self._get_table(resource_path)

        resource_type = self._get_resource_type(resource_path)
        new_revision = get_new_id(resource_type)
        old_revision = data.get('revision')

        data = validated(
            resource_type,
            self.schema[resource_type]['subpaths'][subpath]['prototype'], data)

        async with self.pool.acquire() as conn:
            async with conn.begin():
                result = await conn.execute(
                    table.update().where(table.c.id == row_id).where(
                        table.c.revision == old_revision).values({
                            'revision':
                            new_revision,
                            'data_' + subpath:
                            data,
                        }))

                if result.rowcount == 1:
                    await self._update_aux_tables(conn, resource_type, row_id)

                elif result.rowcount == 0:
                    result = await conn.execute(
                        sa.select([table.c.revision
                                   ]).where(table.c.id == row_id))
                    row = await result.first()
                    if row is None:
                        raise ResourceNotFound("Resource %s not found." %
                                               row_id)
                    else:
                        raise WrongRevision(
                            "Expected revision is %s, got %s." %
                            (row.revision, old_revision),
                            current=row.revision,
                            update=old_revision)

                else:
                    raise UnexpectedError((
                        "Update query returned %r rowcount, expected values are 0 or 1. Don't know how to handle that."
                    ) % result.rowcount)

        return dict(data, revision=new_revision)
示例#16
0
    async def _update_aux_tables(self,
                                 conn,
                                 resource_type,
                                 row_id,
                                 create=None):
        # TODO: should be defered

        if create is None:
            # Get data from main table and all subpaths.
            # We need this, because search look for data everywhere including all subpaths.
            table = self.tables[resource_type]
            subpaths = self._get_subpaths(resource_type)
            result = await conn.execute(
                sa.select([table.c.data] +
                          [table.c['data_' + subpath]
                           for subpath in subpaths]).where(
                               table.c.id == row_id))
            row = await result.first()
            data = ([row.data] + [
                row['data_' + subpath]
                for subpath in subpaths if row['data_' + subpath]
            ])

            # Update search field containing data from resource and all subpaths in a convinient shape for searches.
            await conn.execute(
                table.update().where(table.c.id == row_id).values(search=list(
                    itertools.chain.from_iterable(
                        flatten_for_gin(x) for x in data))))

        else:
            data = create

        # Update list tables
        aux_table = self.aux_tables[resource_type]

        # Delete old rows, before inserting new ones.
        if create is None:
            await conn.execute(
                aux_table.delete().where(aux_table.c.id == row_id))

        # Populate aux table with data from lists, for searches.
        await conn.execute(aux_table.insert().values([{
            'id': row_id,
            'data': item,
        } for item in flatten_for_lists(data)]))
示例#17
0
async def initialize_database(engine,
                              required_accounts: T.Dict[str, T.Iterable[str]]):
    # language=rst
    """

    Raises:
        PreconditionFailed: if a race condition exists between this process and
            another process trying to initialize the database at the same time.

    """
    accountroles_table = metadata().tables['AccountRoles']
    async with engine.acquire() as conn:
        for account_id, role_ids in required_accounts.items():
            result_proxy = await conn.execute(
                sa.select([sa_functions.count('*')
                           ]).select_from(accountroles_table).where(
                               accountroles_table.c.account_id == account_id))
            row = await result_proxy.fetchone()
            if row[0] == 0:
                _logger.info(
                    "Required account '%s' not found. Creating this account with roles %s",
                    account_id, repr(role_ids))
                async with conn.begin():
                    log_id = await _accountroleslog_insert(
                        conn,
                        created_by='authz_admin_service',
                        request_info='Initialization',
                        account_id=account_id,
                        action='C',
                        role_ids=role_ids)
                    try:
                        await conn.execute(accountroles_table.insert().values(
                            account_id=account_id,
                            role_ids=role_ids,
                            log_id=log_id))
                    except sa_exceptions.IntegrityError:
                        raise PreconditionFailed() from None
示例#18
0
 async def get_file(self, resource_path, row_id, subpath):
     resource_type = self._get_resource_type(resource_path)
     table = self._get_table(resource_path)
     files_table = self.files_tables[resource_type]
     async with self.pool.acquire() as conn:
         result = await conn.execute(
             sa.select([
                 table.c.revision,
                 table.c['data_' + subpath],
                 files_table.c.blob,
             ]).select_from(
                 table.join(
                     files_table,
                     sa.and_(
                         files_table.c.id == table.c.id,
                         files_table.c.subpath == subpath,
                     ))).where(table.c.id == row_id))
         row = await result.first()
     if row:
         return dict(row['data_' + subpath],
                     revision=row.revision,
                     blob=row.blob)
     else:
         raise ResourceNotFound("Resource %s not found." % row_id)
示例#19
0
文件: db.py 项目: me-laman/postgis
gis_polygon = Table(
    'gis_polygon',
    meta,
    Column('_created', DateTime(timezone=False), server_default=func.now()),
    Column('_updated', DateTime(timezone=False), onupdate=func.now()),
    Column('id', Integer, primary_key=True, autoincrement=True,
           nullable=False),
    Column('class_id', Integer),
    Column('name', String),
    Column('props', JSON),
    Column('geom', Geometry('POLYGON', srid=4326)),
)

base_query = sa.select([
    gis_polygon.c.id, gis_polygon.c.name, gis_polygon.c._created,
    gis_polygon.c._updated, gis_polygon.c.class_id, gis_polygon.c.props,
    functions.ST_AsText(gis_polygon.c.geom).label('geom')
])


async def get_all(connection: SAConnection) -> RowProxy:

    result = await connection.execute(base_query)
    records = await result.fetchall()
    return records


async def get_record(connection: SAConnection, record_id: int) -> RowProxy:
    query = base_query.where(gis_polygon.c.id == record_id)

    result = await connection.execute(query)
示例#20
0
 async def list(self, resource_path):
     table = self._get_table(resource_path)
     async with self.pool.acquire() as conn:
         return [
             row.id async for row in conn.execute(sa.select([table.c.id]))
         ]
示例#21
0
async def get_comments(conn, paste_id):
    query = sa.select([comment]).where(comment.c.paste_id == paste_id)
    result = await conn.execute(query)
    comments = await result.fetchall()
    return [Comment(c) for c in comments]
示例#22
0
    async def search(self, resource_path, search_path):
        operator_args = {
            'contains': 2,
            'exact': 2,
            'ge': 2,
            'gt': 2,
            'le': 2,
            'lt': 2,
            'ne': 2,
            'startswith': 2,
            'show': 1,
            'show_all': 0,
            'sort': 1,
            'offset': 1,
            'limit': 1,
        }
        operators = []
        words = map(urllib.parse.unquote, search_path.split('/'))
        operator = next(words, None)
        while operator:
            if operator not in operator_args:
                raise Exception("Unknown operator %r." % operator)
            args_count = operator_args[operator]
            try:
                args = [next(words) for i in range(args_count)]
            except StopIteration:
                raise Exception("Operator %r requires at least %d arguments." %
                                (operator, args_count))
            operators.append((operator, args))
            operator = next(words, None)

        sort_keys = []
        show_all = False
        show = []
        offset = None
        limit = None
        where = []
        gin = []
        joins = []

        table = self._get_table(resource_path)
        resource_type = self._get_resource_type(resource_path)
        aux_table = self.aux_tables[resource_type]
        schema = self._get_prototype_schema(resource_type)

        for operator, args in operators:

            if operator == 'show_all':
                show_all = True

            elif operator == 'show':
                show.extend(args)

            elif operator == 'sort':
                sort_keys.extend(args)

            elif operator == 'offset':
                offset = int(args[0])

            elif operator == 'limit':
                limit = int(args[0])

            elif operator == 'exact':
                key, value = args
                value = schema[key].search(value, cast=False)
                gin.append({key: value})

            elif operator == 'startswith':
                key, value = args
                value = schema[key].search(value, cast=False)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key].astext.startswith(value))

            elif operator == 'contains':
                key, value = args
                value = schema[key].search(value, cast=False)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key].astext.contains(value))

            elif operator == 'ge':
                key, value = args
                value = schema[key].search(value)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key] >= value)

            elif operator == 'gt':
                key, value = args
                value = schema[key].search(value)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key] > value)

            elif operator == 'le':
                key, value = args
                value = schema[key].search(value)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key] <= value)

            elif operator == 'lt':
                key, value = args
                value = schema[key].search(value)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key] < value)

            elif operator == 'ne':
                key, value = args
                value = schema[key].search(value)
                alias = aux_table.alias('t' + str(len(joins) + 1))
                joins.append(table.join(alias, table.c.id == alias.c.id))
                where.append(alias.c.data[key] != value)

            else:
                raise Exception("Operator %r is not yet implemented." %
                                operator)

        if show_all is False and len(show) == 0:
            query = sa.select([table.c.id], distinct=table.c.id)
        else:
            query = sa.select([table.c.id, table.c.revision, table.c.data],
                              distinct=table.c.id)

        for join in joins:
            query = query.select_from(join)

        if gin:
            where.append(table.c.search.contains(gin))

        if where:
            query = query.where(sa.and_(*where))

        if sort_keys:
            db_sort_keys = []
            for sort_key in sort_keys:
                if sort_key == 'id':
                    db_sort_keys.append(table.c.id)
                else:
                    db_sort_keys.append(table.c.data[sort_key])
            query = query.order_by(*db_sort_keys)

        if limit:
            query = query.limit(limit)

        if offset:
            query = query.offset(offset)

        async with self.pool.acquire() as conn:
            result = conn.execute(query)

            if show_all:
                return [
                    dict(row.data, id=row.id, revision=row.revision)
                    async for row in result
                ]
            elif show:
                return [
                    dict(
                        {
                            field: row.data[field]
                            for field in show if field in row.data
                        },
                        id=row.id) async for row in result
                ]
            else:
                return [{'id': row.id} async for row in result]
示例#23
0
async def interpolate_data_gen(get_conn):
    query = sa.select([db.pre_train_filtered]).order_by(db.pre_train_filtered.c.client_name,
                                                        db.pre_train_filtered.c.vin,
                                                        db.pre_train_filtered.c.yyy,
                                                        db.pre_train_filtered.c.mmm)

    new_values = []
    insert_values = []
    x = tuple()
    x_new = tuple()
    y = tuple()
    prev_key = None
    rows_counter = 10000
    total_rows = 0
    async for row in get_conn.execute(query):
        current_key = '|'.join([row.client_name, row.vin])

        if prev_key and current_key != prev_key:
            filtered_x_y = filter_x_y(x, y)
            if filtered_x_y[0].size > 1 and filtered_x_y[1].size > 1:  # Remove clients with the only one presence
                rows_counter -= 1
                # x = filtered_x_y[0], y = filtered_x_y[1]
                y_new_arr = interpolate(filtered_x_y[0], filtered_x_y[1], x_new)
                km_arr = np.append([-1], np.diff(y_new_arr))
                assert y_new_arr.size == len(new_values)
                for new_values_line in new_values:
                    # new_values_line['r_n'] = int(r_n)
                    r_n = new_values_line['r_n']
                    new_odometer = int(round(y_new_arr[r_n - 1], 0))
                    new_values_line['odometer'] = new_odometer
                    new_values_line['exp_work_type'] = calc_exp_work_type(new_odometer)
                    new_values_line['km'] = int(round(km_arr[r_n - 1], 0)) if km_arr[r_n - 1] != -1 else None

                insert_values.extend(new_values)
                if rows_counter == 0:
                    print('Insert rows {}'.format(len(insert_values)))
                    yield insert_values
                    rows_counter = 10000
                    total_rows += len(insert_values)
                    insert_values = []
            new_values = []

            x = tuple()
            y = tuple()
            x_new = tuple()

        new_values.append({'region': row.region,
                           'bir': row.bir,
                           'client_name': row.client_name,
                           'vin': row.vin,
                           'model': row.model,
                           'yyy': row.yyy,
                           'mmm': row.mmm,
                           'presence': row.presence,
                           'r_n': row.r_n})

        if row.odometer != 0:
            x += (row.r_n,)
            y += (row.odometer,)
        x_new += (row.r_n,)

        prev_key = current_key

    if insert_values:
        print('Insert rows {}'.format(len(insert_values)))
        yield insert_values
        total_rows += len(insert_values)
        print('Total inserted rows {}'.format(total_rows))
示例#24
0
import click
import aiopg.sa
import sqlalchemy as sa
from aiohttp import web

from att.db.schema import t_user

CHECK_PASSWORD = (sa.select([
    t_user.c.id.label('user_id'),
]).where(t_user.c.login == sa.bindparam('login')).where(
    t_user.c.password == sa.func.crypt(sa.bindparam('password'),
                                       t_user.c.password)))


def make_login_response(user_id):
    if user_id is not None:
        return web.json_response({'user_id': str(user_id)})
    else:
        return web.json_response({'error': 'auth failed'})


async def login(request):
    params = await request.json()
    login = params.get('login')
    password = params.get('password')

    async with request.app['db'].acquire() as conn:
        result = await conn.execute(CHECK_PASSWORD,
                                    login=login,
                                    password=password)
        user_id = await result.scalar()