예제 #1
0
    def test_count_with_bindparams(self):
        User = self.classes.User

        bq = self.bakery(lambda s: s.query(User))

        sess = Session()

        eq_(
            bq(sess).count(),
            4
        )

        bq += lambda q: q.filter(User.name == bindparam("uname"))
        # calling with *args
        eq_(
            bq(sess).params(uname='fred').count(), 1
        )
        # with multiple params, the **kwargs will be used
        bq += lambda q: q.filter(User.id == bindparam("anid"))
        eq_(
            bq(sess).params(uname='fred', anid=9).count(), 1
        )
        eq_(
            # wrong id, so 0 results:
            bq(sess).params(uname='fred', anid=8).count(), 0
        )
예제 #2
0
def delete(jobs):
    """Delete a job or a list of jobs. This does **NOT** resolve any
    dependencies but removes the relationships.

    **Note** that no searched on the jobs dependencies are performed. You
    have to create the job list with all the jobs you want updated manually.
    You can use :py:func:`jip.jobs.get_subgraph` to get a full subgraph of a
    job, or :py:func:`jip.jobs.get_group_jobs` to create a list of all jobs
    that are related due to grouping or piping.

    :param jobs: single job or list of jobs
    """
    if not isinstance(jobs, (list, tuple)):
        jobs = [jobs]
    # create delete statement for the job
    stmt = [Job.__table__.delete().where(Job.id == bindparam("_id"))]
    # delete entries in the relationship tables
    for relation_table in [job_dependencies, job_pipes, job_groups]:
        dep = relation_table.delete().where(
            (relation_table.c.source == bindparam("_id")) |
            (relation_table.c.target == bindparam("_id"))
        )
        stmt.append(dep)
    # delete entries in file tables
    for relation_table in [InputFile.__table__, OutputFile.__table__]:
        dep = relation_table.delete().where(
            relation_table.c.job_id == bindparam("_id")
        )
        stmt.append(dep)

    # convert the job values
    values = [{"_id": j.id} for j in jobs if j.id is not None]
    if values:
        _execute(stmt[::-1], values) # Reverse the elements in the list to remove first the relations
예제 #3
0
    def test_update(self):
        with self.engine.connect() as conn:
            conn.execute(
                self.tables.data.insert(),
                [
                    {"x": "x1", "y": "y1"},
                    {"x": "x2", "y": "y2"},
                    {"x": "x3", "y": "y3"}
                ]
            )

            conn.execute(
                self.tables.data.update().
                where(self.tables.data.c.x == bindparam('xval')).
                values(y=bindparam('yval')),
                [
                    {"xval": "x1", "yval": "y5"},
                    {"xval": "x3", "yval": "y6"}
                ]
            )
            eq_(
                conn.execute(
                    select([self.tables.data]).
                    order_by(self.tables.data.c.id)).
                fetchall(),
                [
                    (1, "x1", "y5", 5),
                    (2, "x2", "y2", 5),
                    (3, "x3", "y6", 5)
                ]
            )
예제 #4
0
 def search_for_scraped(session, argument_oyez_id=None, section_number=None, number=None):
   baked_query = bakery(lambda session: session.query(Turn).join(Section, Turn.section).join(Argument, Section.argument))
   baked_query += lambda q: q.filter(Argument.oyez_id == bindparam('argument_oyez_id'))
   baked_query += lambda q: q.filter(Section.number == bindparam('section_number'))
   baked_query += lambda q: q.filter(Turn.number == bindparam('number'))
   result = baked_query(session).params(argument_oyez_id=argument_oyez_id, section_number=section_number, number=number).one_or_none()
   return result
예제 #5
0
    def fast_load_module_id(cls, name, version, path, code_hash,                 # pylint: disable=too-many-arguments
                            session=None):
        """Load module id by name, version and code_hash

        Compile SQLAlchemy core query into string for optimization

        Keyword arguments:
        session -- specify session for loading (default=relational.session)
        """
        session = session or relational.session
        if not hasattr(cls, "_load_or_create_module_id"):
            tmodule = cls.t
            _query = select([tmodule.c.id]).where(
                (tmodule.c.name == bindparam("name")) &
                ((is_none(tmodule.c.version)) |
                 (tmodule.c.version == bindparam("version"))) &
                ((is_none(tmodule.c.code_hash)) |
                 (tmodule.c.code_hash == bindparam("code_hash")))
            )
            cls._load_or_create_module_id = str(_query)

        info = dict(name=name, path=path, version=version, code_hash=code_hash)
        an_id = session.execute(
            cls._load_or_create_module_id, info).fetchone()
        if an_id:
            return an_id[0]
예제 #6
0
파일: tdb_sql.py 프로젝트: rfurman/arxaliv
def update_data(table, thing_id, **vals):
    transactions.add_engine(table.bind)

    u = table.update(sa.and_(table.c.thing_id == thing_id, table.c.key == sa.bindparam("_key")))
    d = table.delete(sa.and_(table.c.thing_id == thing_id, table.c.key == sa.bindparam("_key")))

    inserts = []
    for key, val in vals.iteritems():
        if key[0:5] == "multi":
            # vks = [py2db(v, return_kind=True) for v in vs]
            # vals = [vk[0] for vk in vks]
            val = [py2db(v, return_kind=False) for v in val]
            kind = "num"

            d.execute(_key=key)
            for v in val:
                inserts.append({"key": key, "value": v, "kind": kind})
        else:
            val, kind = py2db(val, return_kind=True)

            uresult = u.execute(_key=key, value=val, kind=kind)
            if not uresult.rowcount:
                inserts.append({"key": key, "value": val, "kind": kind})

    # do one insert
    if inserts:
        i = table.insert(values=dict(thing_id=thing_id))
        i.execute(*inserts)
예제 #7
0
 def insert_dataset(self, metadata_doc, dataset_id, dataset_type_id):
     """
     Insert dataset if not already indexed.
     :type metadata_doc: dict
     :type dataset_id: str or uuid.UUID
     :type dataset_type_id: int
     :return: whether it was inserted
     :rtype: bool
     """
     try:
         dataset_type_ref = bindparam('dataset_type_ref')
         ret = self._connection.execute(
             DATASET.insert().from_select(
                 ['id', 'dataset_type_ref', 'metadata_type_ref', 'metadata'],
                 select([
                     bindparam('id'), dataset_type_ref,
                     select([
                         DATASET_TYPE.c.metadata_type_ref
                     ]).where(
                         DATASET_TYPE.c.id == dataset_type_ref
                     ).label('metadata_type_ref'),
                     bindparam('metadata', type_=JSONB)
                 ])
             ),
             id=dataset_id,
             dataset_type_ref=dataset_type_id,
             metadata=metadata_doc
         )
         return ret.rowcount > 0
     except IntegrityError as e:
         if e.orig.pgcode == PGCODE_UNIQUE_CONSTRAINT:
             raise DuplicateRecordError('Duplicate dataset, not inserting: %s' % dataset_id)
         raise
예제 #8
0
파일: models.py 프로젝트: itsonlycode/Mailu
    def resolve(cls, localpart, domain_name):
        alias_preserve_case = cls.query.filter(
                sqlalchemy.and_(cls.domain_name == domain_name,
                    sqlalchemy.or_(
                        sqlalchemy.and_(
                            cls.wildcard == False,
                            cls.localpart == localpart
                        ), sqlalchemy.and_(
                            cls.wildcard == True,
                            sqlalchemy.bindparam("l", localpart).like(cls.localpart)
                        )
                    )
                )
            ).order_by(cls.wildcard, sqlalchemy.func.char_length(cls.localpart).desc()).first()
        if alias_preserve_case:
            return alias_preserve_case

        if localpart:
            localpart = localpart.lower()
        return cls.query.filter(
                sqlalchemy.and_(cls.domain_name == domain_name,
                    sqlalchemy.or_(
                        sqlalchemy.and_(
                            cls.wildcard == False,
                            sqlalchemy.func.lower(cls.localpart) == localpart
                        ), sqlalchemy.and_(
                            cls.wildcard == True,
                            sqlalchemy.bindparam("l", localpart).like(sqlalchemy.func.lower(cls.localpart))
                        )
                    )
                )
            ).order_by(cls.wildcard, sqlalchemy.func.char_length(sqlalchemy.func.lower(cls.localpart)).desc()).first()
예제 #9
0
    def get_original_details(self, iid=None, detail_name=None, os=None):
        """
        tested by: TestItemTable.test_get_original_details_* functions
        :param iid: get detail for specific iid or all if None
        :param detail_name: get detail with specific name or all names if None
        :param os: get detail for os name or for all oses if None
        :return: list original details in the order they were inserted
        """
        if "get_original_details" not in self.baked_queries_map:
            the_query = self.bakery(lambda session: session.query(IndexItemDetailRow))
            the_query += lambda q: q.join(IndexItemRow)
            the_query += lambda q: q.filter(IndexItemRow.iid.like(bindparam('iid')))
            the_query += lambda q: q.filter(IndexItemDetailRow.detail_name.like(bindparam('detail_name')))
            the_query += lambda q: q.filter(IndexItemDetailRow.os_id.like(bindparam('os')))
            the_query += lambda q: q.order_by(IndexItemDetailRow._id)
            self.baked_queries_map["get_original_details"] = the_query
        else:
            the_query = self.baked_queries_map["get_original_details"]

        # params with None are turned to '%'
        params = [iid, detail_name, os]
        for iparam in range(len(params)):
            if params[iparam] is None: params[iparam] = '%'
        retVal = the_query(self.session).params(iid=params[0], detail_name=params[1], os=params[2]).all()
        return retVal
예제 #10
0
def updateRegion(mapper, connection, target):
    sitePosition = target.newPosition
    stmt = text('''SELECT dbo.[fn_GetRegionFromLatLon] (:lat,:lon)
        ''').bindparams(bindparam('lat', sitePosition.LAT),
                        bindparam('lon', sitePosition.LON))
    regionID = connection.execute(stmt).scalar()
    target.FK_Region = regionID
예제 #11
0
    def test_functions_with_cols(self):
        users = table(
            'users',
            column('id'),
            column('name'),
            column('fullname'))
        calculate = select([column('q'), column('z'), column('r')], from_obj=[
                           func.calculate(
                               bindparam('x', None), bindparam('y', None)
                           )])

        self.assert_compile(select([users], users.c.id > calculate.c.z),
                            "SELECT users.id, users.name, users.fullname "
                            "FROM users, (SELECT q, z, r "
                            "FROM calculate(:x, :y)) "
                            "WHERE users.id > z"
                            )

        s = select([users], users.c.id.between(
            calculate.alias('c1').unique_params(x=17, y=45).c.z,
            calculate.alias('c2').unique_params(x=5, y=12).c.z))

        self.assert_compile(
            s, "SELECT users.id, users.name, users.fullname "
            "FROM users, (SELECT q, z, r "
            "FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r "
            "FROM calculate(:x_2, :y_2)) AS c2 "
            "WHERE users.id BETWEEN c1.z AND c2.z", checkparams={
                'y_1': 45, 'x_1': 17, 'y_2': 12, 'x_2': 5})
    def list_notifications(self, tenant_id, offset, limit):

        rows = []

        with self._db_engine.connect() as conn:
            nm = self.nm

            select_nm_query = (select([nm])
                               .where(nm.c.tenant_id == bindparam('b_tenant_id')))

            parms = {'b_tenant_id': tenant_id}

            if offset:
                select_nm_query = (select_nm_query
                                   .where(nm.c.id > bindparam('b_offset')))

                parms['b_offset'] = offset.encode('utf8')

            select_nm_query = (select_nm_query
                               .order_by(nm.c.id)
                               .limit(bindparam('b_limit')))

            parms['b_limit'] = limit + 1

            rows = conn.execute(select_nm_query, parms).fetchall()

        return [dict(row) for row in rows]
예제 #13
0
def delete_exercise(user_id, exercise_id):
    """
    Submit a request to have an exercise deleted.
    :param exercise_id: ID of the exercise we're requesting to have deleted
    :return: Nothing.
    """
    conn = eng.connect()

    exercise_parm = bindparam("exercise_id", type_=Integer)
    user_parm = bindparam("user_id", type_=String)

    query = select([exercise_table.c.id]).where(
        and_(exercise_table.c.id == exercise_parm, exercise_table.c.user_id == user_parm)
    )

    is_valid_user = conn.execute(query, exercise_id=exercise_id, user_id=user_id).fetchone()

    if is_valid_user:
        with conn.begin() as trans:
            query = attempt_table.delete().where(attempt_table.c.exercise_id == exercise_parm)
            conn.execute(query, exercise_id=exercise_id)

            query = resource_by_exercise_table.delete().where(resource_by_exercise_table.c.exercise_id == exercise_parm)
            conn.execute(query, exercise_id=exercise_id)

            query = exercise_table.delete().where(exercise_table.c.id == exercise_parm)
            conn.execute(query, exercise_id=exercise_id)
            trans.commit()

        msg = "Executed deleteion query on exercise: {} belonging to user: {}".format(exercise_id, user_id)
    else:
        msg = "User: {} not the owner of exercise: {}".format(user_id, exercise_id)

    conn.close()
    return msg
예제 #14
0
def get_resources_for_exercise(exercise_id, user_id):
    """
    Get all resources connected to the given exercise
    :param exercise_id: ID of the exercise in question
    :param user_id: Owning user
    :return: A list the appropriate resources.
    """

    conn = eng.connect()
    user_id_parm = bindparam("user_id")
    exercise_parm = bindparam("exercise_id")

    query = (
        select([resource_table.c.id, resource_table.c.caption, resource_table.c.url, resource_table.c.user_id])
        .select_from(resource_table.join(resource_by_exercise_table))
        .where(
            and_(resource_table.c.user_id == user_id_parm, resource_by_exercise_table.c.exercise_id == exercise_parm)
        )
    )

    result = conn.execute(query, user_id=user_id, exercise_id=exercise_id)
    resources = [
        dict(resource_id=resource_id, user_id=user_id, caption=caption, url=url)
        for resource_id, caption, url, user_id in result.fetchall()
    ]
    conn.close()

    return resources
예제 #15
0
def add_resource(caption, url, user_id, exercise_id=None):
    """
    Add a clickable resource to the data store
    :param caption: The text to show up for the user to click on.
    :param url: Where clicking the text takes you.
    :param user_id: Who owns this link.
    :param exercise_id The exercise that this resource refers to.
    :return: Nothing.
    """

    if len(caption) > CHARACTER_LIMIT or len(url) > CHARACTER_LIMIT:
        msg = "Either new caption or new url exceeded char limit of {} chars".format(CHARACTER_LIMIT)
        raise Exception(msg)

    caption_parm = bindparam("caption", type_=String)
    url_parm = bindparam("url", type_=String)
    user_parm = bindparam("user_id", type_=String)
    exercise_parm = bindparam("exercise_id", type_=Integer)

    conn = eng.connect()

    with conn.begin() as trans:
        query = resource_table.insert().values(caption=caption_parm, url=url_parm, user_id=user_parm)
        result = conn.execute(query, caption=caption, url=url, user_id=user_id)
        new_resource_id = result.inserted_primary_key[0]

        query = resource_by_exercise_table.insert().values(exercise_id=exercise_parm, resource_id=new_resource_id)
        conn.execute(query, exercise_id=exercise_id, user_id=user_id)
        trans.commit()

    conn.close()
    msg = ""
    return msg
예제 #16
0
def author(mit_id, conn):
    """
    Returns an author object for insertion into mongo summary collection.

    The format is as follows:
        {"_id": {"name": <name>, "mitid": <mitid>},
         "type": "author",
         "size": <num docs>,
         "downloads": <num downloads>,
         "countries": [
            {"country": <3 ltr code>, "downloads": <num downloads>},...
         ]
         "dates": [
            {"date": <YYYY-MM-DD>, "downloads": <num>},...
         ]}
    """

    requests_to_authors = requests.join(documents)\
                                  .join(documents_authors)\
                                  .join(authors)

    totals = select([
                authors.c.mit_id,
                authors.c.name,
                select([func.count()])
                    .select_from(documents_authors.join(authors))
                    .where(authors.c.mit_id==bindparam('mit_id'))
                    .label('size'),
                select([func.count()])
                    .select_from(requests_to_authors)
                    .where(authors.c.mit_id==bindparam('mit_id'))
                    .label('downloads')
                ])\
             .where(authors.c.mit_id==bindparam('mit_id'))
    countries = select([requests.c.country, func.count().label('downloads')])\
                .select_from(requests_to_authors)\
                .where(authors.c.mit_id==bindparam('mit_id'))\
                .group_by(requests.c.country)
    dates = select([
                func.date_trunc('day', requests.c.datetime).label('date'),
                func.count().label('downloads')])\
            .select_from(requests_to_authors)\
            .where(authors.c.mit_id==bindparam('mit_id'))\
            .group_by(func.date_trunc('day', requests.c.datetime))

    author_obj = {'type': 'author'}
    res = conn.execute(totals, mit_id=mit_id).first()
    author_obj['_id'] = {'name': res['name'], 'mitid': res['mit_id']}
    author_obj['size'] = res['size']
    author_obj['downloads'] = res['downloads']
    res = conn.execute(countries, mit_id=mit_id)
    for row in res:
        author_obj.setdefault('countries', [])\
            .append({'country': row['country'], 'downloads': row['downloads']})
    res = conn.execute(dates, mit_id=mit_id)
    for row in res:
        author_obj.setdefault('dates', [])\
            .append({'date': row['date'].strftime('%Y-%m-%d'),
                     'downloads': row['downloads']})
    return author_obj
예제 #17
0
def migrate_post_photos():
    photo_map = {}
    photo_attrs = [
        photos.c.caption,
        photos.c.filename,
    ]

    for row in conn.execute(
            select([posts, photos], use_labels=True).select_from(
                posts.join(photos))):
        post_id = row[posts.c.id]
        photo_json = {
            attr.name: row[attr] for attr in photo_attrs if row[attr]
        }
        photo_map.setdefault(post_id, []).append(photo_json)

    photo_batch = []
    for post_id, photo_blob in photo_map.items():
        photo_batch.append({
            'post_id': post_id,
            'photos': json.dumps(photo_blob),
        })

    update_photos = posts.update()\
                         .where(posts.c.id == bindparam('post_id'))\
                         .values(photos=bindparam('photos'))

    conn.execute(update_photos, photo_batch)
def auto_validate_stored_procGSM_Argos(ptt, ind_id,user,type_,freq,session):
    procStockDict = {
    'argos': '[sp_auto_validate_Argos_GPS]',
    'gsm': '[sp_auto_validate_GSM]'
    }

    if type_ == 'argos' :
        table = ArgosDatasWithIndiv
    elif type_ == 'gsm' :
        table = GsmDatasWithIndiv

    if ind_id is None:
        stmt = update(table).where(and_(table.c['FK_Individual'] == None, table.c['FK_ptt'] == ptt)
            ).where(table.c['checked'] == 0).values(checked =1)
 
        session.execute(stmt)
        nb_insert = exist = error = 0
    else:
        stmt = text(""" DECLARE @nb_insert int , @exist int , @error int;
        exec """+ dbConfig['data_schema'] + """."""+procStockDict[type_]+""" :ptt , :ind_id , :user ,:freq , @nb_insert OUTPUT, @exist OUTPUT, @error OUTPUT;
        SELECT @nb_insert, @exist, @error; """
        ).bindparams(bindparam('ind_id', ind_id),bindparam('user', user),bindparam('freq', freq),bindparam('ptt', ptt))
        nb_insert, exist , error= session.execute(stmt).fetchone()

    return nb_insert, exist , error
    def list_notifications(self, tenant_id, sort_by, offset, limit):

        rows = []

        with self._db_engine.connect() as conn:
            nm = self.nm

            select_nm_query = (select([nm])
                               .where(nm.c.tenant_id == bindparam('b_tenant_id')))

            parms = {'b_tenant_id': tenant_id}

            if sort_by is not None:
                order_columns = [literal_column(col) for col in sort_by]
                if 'id' not in sort_by:
                    order_columns.append(nm.c.id)
            else:
                order_columns = [nm.c.id]

            select_nm_query = select_nm_query.order_by(*order_columns)

            select_nm_query = (select_nm_query
                               .order_by(nm.c.id)
                               .limit(bindparam('b_limit')))

            parms['b_limit'] = limit + 1

            if offset:
                select_nm_query = select_nm_query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            rows = conn.execute(select_nm_query, parms).fetchall()

        return [dict(row) for row in rows]
예제 #20
0
    def _add_missing_mau(self, table_prefix, days_ago):
        # Locate beginning table
        tables = self._get_filtered_tables(table_prefix, days_ago+30)
        first_date = self._date_from_tablename(tables[0].name)

        grouped_monthly_unique = select([
            monthly_rollup.c.date,
            func.sum(monthly_rollup.c.count).label("count")
        ]).\
            where(monthly_rollup.c.date >= first_date).\
            group_by(monthly_rollup.c.date).\
            order_by(monthly_rollup.c.date)

        # Average them over 6 days prior inclding today for MAU's
        maus = select([
            grouped_monthly_unique.c.date,
            func.avg(grouped_monthly_unique.c.count).over(
                order_by=text("date ROWS 6 PRECEDING")
            ).label("mau")
        ]).\
            order_by(grouped_monthly_unique.c.date)
        results = self._conn.execute(maus).fetchall()

        if not results:
            return

        # Update appropriate rows
        stmt = daily_stats.update().\
            where(daily_stats.c.date == bindparam("update_date")).\
            values(mau=bindparam("mau"))
        self._conn.execute(stmt, [
            {"update_date": x.date, "mau": x.mau}
            for x in results[-days_ago:]
        ])
예제 #21
0
파일: sqla_test.py 프로젝트: jjyyyin/luigi
 def copy(self, conn, ins_rows, table_bound):
     ins = (
         table_bound.update()
         .where(table_bound.c.property == sqlalchemy.bindparam("_property"))
         .values({table_bound.c.item: sqlalchemy.bindparam("_item")})
     )
     conn.execute(ins, ins_rows)
예제 #22
0
파일: nodes.py 프로젝트: lonvia/osgende
    def __init__(self, meta, name, osmtables, subset=None, change=None,
                 column_geom='geom', geom_change=None):
        super().__init__(meta, name, osmtables.node, subset=subset,
                             change=change)
        # need a geometry column
        if isinstance(column_geom, Column):
            self.column_geom = column_geom
            srid = column_geom.type.srid
        else:
            srid = meta.info.get('srid', self.src.data.c.geom.type.srid)
            self.column_geom = Column(column_geom, Geometry('POINT', srid=srid))
        self.data.append_column(self.column_geom)

        # add an additional transform to the insert statement if the srid changes
        params = {}
        for c in self.data.c:
            if c == self.column_geom and self.src.data.c.geom.type.srid != srid:
                geomparam = bindparam(c.name, type_=self.column_geom.type)
                params[c.name] = ST_Transform(geomparam, self.column_geom.type.srid)
            else:
                params[c.name] = bindparam(c.name)
        self.stm_insert = self.stm_insert.values(params)

        # the table to remember geometry changes
        self.geom_change = geom_change
예제 #23
0
 def test_missing_bind_posn(self):
     assert_raises_message(
         exc.ArgumentError,
         "This text\(\) construct doesn't define a bound parameter named 'bar'",
         text(":foo").bindparams,
         bindparam('foo', value=5), bindparam('bar', value=7)
     )
예제 #24
0
 def test_select_where(self):
     stmt = (
         select([self.tables.foo])
         .where(self.tables.foo.c.data == bindparam("data"))
         .where(self.tables.foo.c.x == bindparam("x"))
     )
     self._assert_raises(stmt, {"data": "data"})
예제 #25
0
파일: database.py 프로젝트: Nordeus/pushkin
def update_canonicals(canonicals):
    '''
    Update canonical data for android devices.
    '''
    global ENGINE
    binding = [{"p_{}".format(k): v for k, v in canonical.items()} for canonical in canonicals]
    device_table = model.metadata.tables['device']
    stmt = update(device_table).\
        values(device_token_new=bindparam('p_new_token')).\
        where(and_(device_table.c.login_id == bindparam('p_login_id'),
                   func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_old_token')))
    ENGINE.execute(stmt, binding)

    with session_scope() as session:
        query = text('SELECT keep_max_users_per_device( \
                     (:platform_id)::int2, :device_token, (:max_users_per_device)::int2)')
        for canonical in canonicals:
            session.execute(query,
                            {'platform_id': constants.PLATFORM_ANDROID,
                             'device_token': canonical['new_token'],
                             'max_users_per_device': config.max_users_per_device
                            })
            session.execute(query,
                            {'platform_id': constants.PLATFORM_ANDROID_TABLET,
                             'device_token': canonical['new_token'],
                             'max_users_per_device': config.max_users_per_device
                            })
        session.commit()
예제 #26
0
    def test_updatemany(self):
        # MySQL-Python 1.2.2 breaks functions in execute_many :(
        if (testing.against('mysql+mysqldb') and
                testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
            return

        t.insert().execute({}, {}, {})

        t.update(t.c.col1 == sa.bindparam('pkval')).execute(
            {'pkval': 51, 'col7': None, 'col8': None, 'boolcol1': False})

        t.update(t.c.col1 == sa.bindparam('pkval')).execute(
            {'pkval': 51},
            {'pkval': 52},
            {'pkval': 53})

        l = t.select().execute()
        ctexec = currenttime.scalar()
        today = datetime.date.today()
        eq_(l.fetchall(),
            [(51, 'im the update', f2, ts, ts, ctexec, False, False,
              13, today, 'py', 'hi'),
             (52, 'im the update', f2, ts, ts, ctexec, True, False,
              13, today, 'py', 'hi'),
             (53, 'im the update', f2, ts, ts, ctexec, True, False,
              13, today, 'py', 'hi')])
예제 #27
0
    def test_expanding_in_special_chars(self):
        testing.db.execute(
            users.insert(),
            [
                dict(user_id=7, user_name='jack'),
                dict(user_id=8, user_name='fred'),
            ]
        )

        with testing.db.connect() as conn:
            stmt = select([users]).where(
                users.c.user_name.in_(bindparam('u35', expanding=True))
            ).where(
                users.c.user_id == bindparam("u46")
            ).order_by(users.c.user_id)

            eq_(
                conn.execute(
                    stmt, {"u35": ['jack', 'fred'], "u46": 7}).fetchall(),
                [(7, 'jack')]
            )

            stmt = select([users]).where(
                users.c.user_name.in_(bindparam('u.35', expanding=True))
            ).where(
                users.c.user_id == bindparam("u.46")
            ).order_by(users.c.user_id)

            eq_(
                conn.execute(
                    stmt, {"u.35": ['jack', 'fred'], "u.46": 7}).fetchall(),
                [(7, 'jack')]
            )
예제 #28
0
def test_dbapi_raw(n):
    """The DBAPI's API inserting rows in bulk."""

    conn = engine.pool._creator()
    cursor = conn.cursor()
    compiled = Customer.__table__.insert().values(
        name=bindparam('name'),
        description=bindparam('description')).\
        compile(dialect=engine.dialect)

    if compiled.positional:
        args = (
            ('customer name %d' % i, 'customer description %d' % i)
            for i in range(n))
    else:
        args = (
            dict(
                name='customer name %d' % i,
                description='customer description %d' % i
            )
            for i in range(n)
        )

    cursor.executemany(
        str(compiled),
        list(args)
    )
    conn.commit()
    conn.close()
예제 #29
0
파일: test_select.py 프로젝트: cloudera/hue
 def test_bound_limit_offset(self):
     table = self.tables.some_table
     self._assert_result(
         select([table]).order_by(table.c.id).
         limit(bindparam("l")).offset(bindparam("o")),
         [(2, 2, 3), (3, 3, 4)],
         params={"l": 2, "o": 1}
     )
예제 #30
0
def lastseenuid(account_id, session, folder_id):
    q = bakery(lambda session: session.query(func.max(ImapUid.msg_uid)))
    q += lambda q: q.filter(
        ImapUid.account_id == bindparam('account_id'),
        ImapUid.folder_id == bindparam('folder_id'))
    res = q(session).params(account_id=account_id,
                            folder_id=folder_id).one()[0]
    return res or 0
예제 #31
0
    def get_alarms(self, tenant_id, query_parms=None, offset=None, limit=None):
        if not query_parms:
            query_parms = {}

        with self._db_engine.connect() as conn:
            parms = {}
            ad = self.ad
            am = self.am
            mdd = self.mdd
            md = self.md
            a = self.a

            query = (self.base_subquery_list.where(
                ad.c.tenant_id == bindparam('b_tenant_id')))

            parms['b_tenant_id'] = tenant_id

            if 'alarm_definition_id' in query_parms:
                query = query.where(
                    ad.c.id == bindparam('b_alarm_definition_id'))
                parms['b_alarm_definition_id'] = query_parms[
                    'alarm_definition_id']

            if 'metric_name' in query_parms:
                query = query.where(a.c.id.in_(self.get_a_am_query))
                parms['b_md_name'] = query_parms['metric_name'].encode('utf8') if six.PY2 else \
                    query_parms['metric_name']

            if 'severity' in query_parms:
                severities = query_parms['severity'].split('|')
                query = query.where(
                    or_(ad.c.severity == bindparam('b_severity' + str(i))
                        for i in range(len(severities))))
                for i, s in enumerate(severities):
                    parms['b_severity' +
                          str(i)] = s if six.PY3 else s.encode('utf-8')

            if 'state' in query_parms:
                query = query.where(a.c.state == bindparam('b_state'))
                parms['b_state'] = query_parms['state']

            if 'lifecycle_state' in query_parms:
                query = (query.where(
                    a.c.lifecycle_state == bindparam('b_lifecycle_state')))
                parms['b_lifecycle_state'] = query_parms['lifecycle_state'] \
                    if six.PY3 else query_parms['lifecycle_state'].encode('utf8')

            if 'link' in query_parms:
                query = query.where(a.c.link == bindparam('b_link'))
                parms['b_link'] = query_parms['link'] if six.PY3 \
                    else query_parms['link'].encode('utf8')

            if 'state_updated_start_time' in query_parms:
                query = (query.where(
                    a.c.state_updated_at >= bindparam('b_state_updated_at')))

                date_str = query_parms['state_updated_start_time'] if six.PY3 \
                    else query_parms['state_updated_start_time'].encode('utf8')
                date_param = datetime.strptime(date_str,
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                parms['b_state_updated_at'] = date_param

            if 'metric_dimensions' in query_parms:
                sub_query = select([a.c.id])
                sub_query_from = (a.join(am, am.c.alarm_id == a.c.id).join(
                    mdd, mdd.c.id == am.c.metric_definition_dimensions_id))

                sub_query_md_base = select([md.c.dimension_set_id
                                            ]).select_from(md)

                for i, metric_dimension in enumerate(
                        query_parms['metric_dimensions'].items()):

                    md_name = "b_md_name_{}".format(i)

                    values_cond = None
                    values_cond_flag = False

                    if metric_dimension and metric_dimension[1]:
                        if '|' in metric_dimension[1]:
                            values = metric_dimension[1].encode('utf8').split('|') if six.PY2 else \
                                metric_dimension[1].split('|')
                            sub_values_cond = []
                            for j, value in enumerate(values):
                                sub_md_value = "b_md_value_{}_{}".format(i, j)
                                sub_values_cond.append(
                                    md.c.value == bindparam(sub_md_value))
                                parms[sub_md_value] = value
                            values_cond = or_(*sub_values_cond)
                            values_cond_flag = True
                        else:
                            md_value = "b_md_value_{}".format(i)
                            values_cond = (md.c.value == bindparam(md_value))
                            values_cond_flag = True
                            parms[md_value] = metric_dimension[1]

                    sub_query_md = (sub_query_md_base.where(
                        md.c.name == bindparam(md_name)))
                    if values_cond_flag:
                        sub_query_md = (sub_query_md.where(values_cond))

                    sub_query_md = (sub_query_md.distinct().alias(
                        'md_{}'.format(i)))

                    sub_query_from = (sub_query_from.join(
                        sub_query_md, sub_query_md.c.dimension_set_id ==
                        mdd.c.metric_dimension_set_id))

                    parms[md_name] = metric_dimension[0].encode('utf8') if six.PY2 else \
                        metric_dimension[0]

                    sub_query = (
                        sub_query.select_from(sub_query_from).distinct())
                    query = query.where(a.c.id.in_(sub_query))
            order_columns = []
            if 'sort_by' in query_parms:
                columns_mapper = \
                    {'alarm_id': a.c.id,
                     'alarm_definition_id': ad.c.id,
                     'alarm_definition_name': ad.c.name,
                     'state_updated_timestamp': a.c.state_updated_at,
                     'updated_timestamp': a.c.updated_at,
                     'created_timestamp': a.c.created_at,
                     'severity': models.field_sort(ad.c.severity, list(map(text, ["'LOW'",
                                                                                  "'MEDIUM'",
                                                                                  "'HIGH'",
                                                                                  "'CRITICAL'"]))),
                     'state': models.field_sort(a.c.state, list(map(text, ["'OK'",
                                                                           "'UNDETERMINED'",
                                                                           "'ALARM'"])))}

                order_columns, received_cols = self._remap_columns(
                    query_parms['sort_by'], columns_mapper)

                if not received_cols.get('alarm_id', False):
                    order_columns.append(a.c.id)
            else:
                order_columns = [a.c.id]

            if limit:
                query = query.limit(bindparam('b_limit'))
                parms['b_limit'] = limit + 1

            if offset:
                query = query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            query = (query.order_by(*order_columns).alias('alarm_id_list'))

            main_query = (self.base_query.select_from(
                self.base_query_from.join(query,
                                          query.c.id == a.c.id)).distinct())

            main_query = main_query.order_by(*order_columns)

            return [
                dict(row)
                for row in conn.execute(main_query, parms).fetchall()
            ]
예제 #32
0
    def __init__(self):
        super(AlarmsRepository, self).__init__()

        metadata = MetaData()
        self.a_du = models.create_a_model(metadata)
        self.aa = models.create_aa_model(metadata).alias('aa')
        self.sa = models.create_sa_model(metadata).alias('sa')
        self.ad = models.create_ad_model(metadata).alias('ad')
        self.am = models.create_am_model(metadata).alias('am')
        self.md = models.create_md_model(metadata).alias('md')
        self.mdd = models.create_mdd_model(metadata).alias('mdd')
        self.mde = models.create_mde_model(metadata).alias('mde')
        self.sad = models.create_sad_model(metadata).alias('sad')
        self.sadd = models.create_sadd_model(metadata).alias('sadd')
        a = self.a_du
        self.a = a.alias('a')
        a_s = self.a
        sa = self.sa
        ad = self.ad
        am = self.am
        md = self.md
        mdd = self.mdd
        mde = self.mde

        gc_columns = [md.c.name + text("'='") + md.c.value]

        mdg = (select([
            md.c.dimension_set_id,
            models.group_concat(gc_columns).label('dimensions')
        ]).select_from(md).group_by(md.c.dimension_set_id).alias('mdg'))

        self.base_query_from = (a_s.join(
            ad, ad.c.id == a_s.c.alarm_definition_id).join(
                am, am.c.alarm_id == a_s.c.id).join(
                    mdd,
                    mdd.c.id == am.c.metric_definition_dimensions_id).join(
                        mde, mde.c.id == mdd.c.metric_definition_id).outerjoin(
                            mdg, mdg.c.dimension_set_id ==
                            mdd.c.metric_dimension_set_id))

        self.base_query = select([
            a_s.c.id.label('alarm_id'), a_s.c.state,
            a_s.c.state_updated_at.label('state_updated_timestamp'),
            a_s.c.updated_at.label('updated_timestamp'),
            a_s.c.created_at.label('created_timestamp'), a_s.c.lifecycle_state,
            a_s.c.link,
            ad.c.id.label('alarm_definition_id'),
            ad.c.name.label('alarm_definition_name'), ad.c.severity,
            mde.c.name.label('metric_name'),
            mdg.c.dimensions.label('metric_dimensions')
        ])

        self.base_subquery_list = (select([a_s.c.id]).select_from(
            a_s.join(ad, a_s.c.alarm_definition_id == ad.c.id)))

        self.get_ad_query = (select([ad]).select_from(
            ad.join(a, ad.c.id == a.c.alarm_definition_id)).where(
                ad.c.tenant_id == bindparam('b_tenant_id')).where(
                    a.c.id == bindparam('b_id')))

        self.get_am_query = (select([
            a_s.c.id.label('alarm_id'), mde.c.name, mdg.c.dimensions
        ]).select_from(
            a_s.join(am, am.c.alarm_id == a_s.c.id).join(
                mdd, mdd.c.id == am.c.metric_definition_dimensions_id).join(
                    mde, mde.c.id == mdd.c.metric_definition_id).outerjoin(
                        mdg, mdg.c.dimension_set_id ==
                        mdd.c.metric_dimension_set_id)).where(
                            a_s.c.id == bindparam('b_id')).order_by(
                                a_s.c.id).distinct())

        self.get_sa_query = (select([
            sa.c.id.label('sub_alarm_id'), sa.c.alarm_id, sa.c.expression,
            ad.c.id.label('alarm_definition_id')
        ]).select_from(
            sa.join(a_s, a_s.c.id == sa.c.alarm_id).join(
                ad, ad.c.id == a_s.c.alarm_definition_id)).where(
                    ad.c.tenant_id == bindparam('b_tenant_id')).where(
                        a_s.c.id == bindparam('b_id')).distinct())

        self.get_a_query = (select(
            [a_s.c.state, a_s.c.link, a_s.c.lifecycle_state]).select_from(
                a_s.join(ad, ad.c.id == a_s.c.alarm_definition_id)).where(
                    ad.c.tenant_id == bindparam('b_tenant_id')).where(
                        a_s.c.id == bindparam('b_id')))

        self.get_a_ad_query = (select([a_s.c.id]).select_from(
            a_s.join(ad, ad.c.id == a_s.c.alarm_definition_id)).where(
                ad.c.tenant_id == bindparam('b_tenant_id')).where(
                    a_s.c.id == bindparam('b_id')).alias('a_ad'))

        select_tmp = (select([literal_column('id')]).select_from(
            self.get_a_ad_query).distinct().alias('temporarytable'))

        self.delete_alarm_query = (delete(a).where(a.c.id.in_(select_tmp)))

        md_ = (select([
            mde.c.id
        ]).where(mde.c.name == bindparam('b_md_name')).alias('md_'))

        self.get_a_am_query = (select([a_s.c.id]).select_from(
            a_s.join(am, am.c.alarm_id == a_s.c.id).join(
                mdd, mdd.c.id == am.c.metric_definition_dimensions_id).join(
                    md_, md_.c.id == mdd.c.metric_definition_id)))
예제 #33
0
def get_significant_states_with_session(
    hass: HomeAssistant,
    session: Session,
    start_time: datetime,
    end_time: datetime | None = None,
    entity_ids: list[str] | None = None,
    filters: Any = None,
    include_start_time_state: bool = True,
    significant_changes_only: bool = True,
    minimal_response: bool = False,
    no_attributes: bool = False,
) -> MutableMapping[str, Iterable[LazyState | State | dict[str, Any]]]:
    """
    Return states changes during UTC period start_time - end_time.

    entity_ids is an optional iterable of entities to include in the results.

    filters is an optional SQLAlchemy filter which will be applied to the database
    queries unless entity_ids is given, in which case its ignored.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    timer_start = time.perf_counter()
    baked_query, join_attributes = bake_query_and_join_attributes(
        hass, no_attributes)

    if entity_ids is not None and len(entity_ids) == 1:
        if (significant_changes_only and split_entity_id(entity_ids[0])[0]
                not in SIGNIFICANT_DOMAINS):
            baked_query += lambda q: q.filter(States.last_changed == States.
                                              last_updated)
    elif significant_changes_only:
        baked_query += lambda q: q.filter(
            or_(
                *[
                    States.entity_id.like(entity_domain)
                    for entity_domain in SIGNIFICANT_DOMAINS_ENTITY_ID_LIKE
                ],
                (States.last_changed == States.last_updated),
            ))

    if entity_ids is not None:
        baked_query += lambda q: q.filter(
            States.entity_id.in_(bindparam("entity_ids", expanding=True)))
    else:
        baked_query += lambda q: q.filter(
            and_(*[
                ~States.entity_id.like(entity_domain)
                for entity_domain in IGNORE_DOMAINS_ENTITY_ID_LIKE
            ]))
        if filters:
            filters.bake(baked_query)

    baked_query += lambda q: q.filter(States.last_updated > bindparam(
        "start_time"))
    if end_time is not None:
        baked_query += lambda q: q.filter(States.last_updated < bindparam(
            "end_time"))

    if join_attributes:
        baked_query += lambda q: q.outerjoin(
            StateAttributes, States.attributes_id == StateAttributes.
            attributes_id)
    baked_query += lambda q: q.order_by(States.entity_id, States.last_updated)

    states = execute(
        baked_query(session).params(start_time=start_time,
                                    end_time=end_time,
                                    entity_ids=entity_ids))

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("get_significant_states took %fs", elapsed)

    return _sorted_states_to_dict(
        hass,
        session,
        states,
        start_time,
        entity_ids,
        filters,
        include_start_time_state,
        minimal_response,
        no_attributes,
    )
예제 #34
0
        def __parse_sql_statement(self, text, **params):
            """
            Parse and bind paramaters to create a SQL statement.
            adapted from https://github.com/cs50/python-cs50
            """

            class UserDefinedType(sqlalchemy.TypeDecorator):
                """
                Add support for expandable values, a la https://bitbucket.org/zzzeek/sqlalchemy/issues/3953/expanding-parameter.
                """

                impl = sqlalchemy.types.UserDefinedType

                def process_literal_param(self, value, dialect):
                    """Receive a literal parameter value to be rendered inline within a statement."""
                    def process(value):
                        """Render a literal value, escaping as needed."""

                        # bool
                        if isinstance(value, bool):
                            return sqlalchemy.types.Boolean().literal_processor(dialect)(value)

                        # datetime.date
                        elif isinstance(value, datetime.date):
                            return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d"))

                        # datetime.datetime
                        elif isinstance(value, datetime.datetime):
                            return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d %H:%M:%S"))

                        # datetime.time
                        elif isinstance(value, datetime.time):
                            return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%H:%M:%S"))

                        # float
                        elif isinstance(value, float):
                            return sqlalchemy.types.Float().literal_processor(dialect)(value)

                        # int
                        elif isinstance(value, int):
                            return sqlalchemy.types.Integer().literal_processor(dialect)(value)

                        # long - modified to use int instead of long in py3 so i dont need this
                        # - see https://docs.python.org/3.3/whatsnew/3.0.html#integers
                        # elif sys.version_info.major != 3 and isinstance(value, long):
                        #    return sqlalchemy.types.Integer().literal_processor(dialect)(value)

                        # str
                        elif isinstance(value, str):
                            return sqlalchemy.types.String().literal_processor(dialect)(value)


                        # None
                        elif isinstance(value, sqlalchemy.sql.elements.Null):
                            return sqlalchemy.types.NullType().literal_processor(dialect)(value)

                        # Unsupported value
                        raise RuntimeError("unsupported value")

                    # Process value(s), separating with commas as needed
                    if type(value) is list:
                        return ", ".join([process(v) for v in value])
                    else:
                        return process(value)

            # Allow only one statement at a time
            # SQLite does not support executing many statements
            # https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.execute
            if (len(sqlparse.split(text)) > 1 and
                self.__common_engine.url.get_backend_name() == "sqlite"):
                raise RuntimeError("too many statements at once")

            # Raise exceptions for warnings
            warnings.filterwarnings("error")
            log = re.sub(r"\n\s*", " ", text)

            # Prepare, execute statement
            try:

                # Construct a new TextClause clause
                statement = sqlalchemy.text(text)
                

                # Iterate over parameters
                for key, value in params.items():

                    # Translate None to NULL
                    if value is None:
                        value = sqlalchemy.sql.null()

                    if self.__common_engine.url.get_backend_name() == "sqlite":
                        # for some reason, bool isnt being converted to int
                        if value == True:
                            value = 1
                        elif value == False:
                            value = 0

                    # Bind parameters before statement reaches database, so that bound parameters appear in exceptions
                    # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text
                    statement = statement.bindparams(sqlalchemy.bindparam(
                        key, value=value, type_=UserDefinedType()))

                # Stringify bound parameters
                # http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined
                statement = str(statement.compile(compile_kwargs={"literal_binds": True}))
                log = re.sub(r"\n\s*", " ", sqlparse.format(statement, reindent=True))
                return statement
            except:
                self.logger.debug(termcolor.colored(log, "red"))
                self.logger.debug(termcolor.colored(sys.exc_info()[0], "red"))
                
                raise
예제 #35
0
 def fn2(q):
     canary.fn2()
     return q.filter(User.id == bindparam("id"))
예제 #36
0
 def param(self, parameter: str) -> Union[BindParameter]:
     if self._is_sqlalchemy:
         return bindparam(parameter)
     raise NotImplementedError()
예제 #37
0
def upgrade():
    from depot.manager import DepotManager
    from depot.fields.upload import UploadedFile
    from sqlalchemy import bindparam, Unicode, Column

    from kotti import DBSession, metadata

    files = sa.Table('files', metadata)
    files.c.data.type = sa.LargeBinary()    # this restores to old column type
    dn = DepotManager.get_default()

    _saved = []

    def process(thing):
        id, data, filename, mimetype = thing
        uploaded_file = UploadedFile({'depot_name': dn, 'files': []})
        # noinspection PyProtectedMember
        uploaded_file._thaw()
        uploaded_file.process_content(
            data, filename=filename, content_type=mimetype)
        _saved.append({'nodeid': id, 'data': uploaded_file.encode()})
        log.info(f"Saved data for node id {id}")

    query = DBSession.query(
        files.c.id, files.c.data, files.c.filename, files.c.mimetype
    ).order_by(files.c.id).yield_per(10)

    window_size = 10
    window_idx = 0

    log.info("Starting migration of blob data")

    now = time.time()
    while True:
        start, stop = window_size * window_idx, window_size * (window_idx + 1)
        things = query.slice(start, stop).all()
        if things is None:
            break
        for thing in things:
            process(thing)
        if len(things) < window_size:
            break
        window_idx += 1

    log.info("Files written on disk, saving information to DB")

    op.drop_column('files', 'data')
    op.add_column('files', Column('data', Unicode(4096)))
    files.c.data.type = Unicode(4096)

    update = files.update().where(files.c.id == bindparam('nodeid')).\
        values({files.c.data: bindparam('data')})

    def chunks(l, n):
        for i in range(0, len(l), n):
            yield l[i:i + n]

    for cdata in chunks(_saved, 10):
        DBSession.execute(update, cdata)

    log.info("Blob migration completed in {} seconds".format(
        int(time.time() - now)))
 def test_insert(self):
     stmt = self.tables.foo.insert().values(
         x=bindparam('x'), data=bindparam('data'))
     self._assert_raises(stmt, {'data': 'data'})
예제 #39
0
파일: queryset.py 프로젝트: mbeacom/ormar
    async def bulk_update(  # noqa:  CCR001
            self,
            objects: List["Model"],
            columns: List[str] = None) -> None:
        """
        Performs bulk update in one database session to speed up the process.

        Allows to update multiple instance at once.

        All `Models` passed need to have primary key column populated.

        You can also select which fields to update by passing `columns` list
        as a list of string names.

        Bulk operations do not send signals.

        :param objects: list of ormar models
        :type objects: List[Model]
        :param columns: list of columns to update
        :type columns: List[str]
        """
        ready_objects = []
        pk_name = self.model_meta.pkname
        if not columns:
            columns = list(self.model.extract_db_own_fields().union(
                self.model.extract_related_names()))

        if pk_name not in columns:
            columns.append(pk_name)

        columns = [self.model.get_column_alias(k) for k in columns]

        for objt in objects:
            new_kwargs = objt.dict()
            if pk_name not in new_kwargs or new_kwargs.get(pk_name) is None:
                raise ModelPersistenceError(
                    "You cannot update unsaved objects. "
                    f"{self.model.__name__} has to have {pk_name} filled.")
            new_kwargs = self.model.substitute_models_with_pks(new_kwargs)
            new_kwargs = self.model.translate_columns_to_aliases(new_kwargs)
            new_kwargs = {
                "new_" + k: v
                for k, v in new_kwargs.items() if k in columns
            }
            ready_objects.append(new_kwargs)

        pk_column = self.model_meta.table.c.get(
            self.model.get_column_alias(pk_name))
        pk_column_name = self.model.get_column_alias(pk_name)
        table_columns = [c.name for c in self.model_meta.table.c]
        expr = self.table.update().where(
            pk_column == bindparam("new_" + pk_column_name))
        expr = expr.values(
            **{
                k: bindparam("new_" + k)
                for k in columns if k != pk_column_name and k in table_columns
            })
        # databases bind params only where query is passed as string
        # otherwise it just passes all data to values and results in unconsumed columns
        expr = str(expr)
        await self.database.execute_many(expr, ready_objects)

        for objt in objects:
            objt.set_save_status(True)
예제 #40
0
    def get_alarms_count(self,
                         tenant_id,
                         query_parms=None,
                         offset=None,
                         limit=None):
        if not query_parms:
            query_parms = {}

        with self._db_engine.connect() as conn:
            parms = {}
            ad = self.ad
            am = self.am
            mdd = self.mdd
            mde = self.mde
            md = self.md
            a = self.a

            query_from = a.join(ad, ad.c.id == a.c.alarm_definition_id)

            parms['b_tenant_id'] = tenant_id

            group_by_columns = []

            if 'group_by' in query_parms:
                group_by_columns = query_parms['group_by']
                sub_group_by_columns = []
                metric_group_by = {
                    'metric_name', 'dimension_name', 'dimension_value'
                }.intersection(set(query_parms['group_by']))
                if metric_group_by:
                    sub_query_columns = [am.c.alarm_id]
                    if 'metric_name' in metric_group_by:
                        sub_group_by_columns.append(
                            mde.c.name.label('metric_name'))
                    if 'dimension_name' in metric_group_by:
                        sub_group_by_columns.append(
                            md.c.name.label('dimension_name'))
                    if 'dimension_value' in metric_group_by:
                        sub_group_by_columns.append(
                            md.c.value.label('dimension_value'))

                    sub_query_columns.extend(sub_group_by_columns)

                    sub_query_from = (mde.join(
                        mdd, mde.c.id == mdd.c.metric_definition_id).join(
                            md, mdd.c.metric_dimension_set_id ==
                            md.c.dimension_set_id).join(
                                am, am.c.metric_definition_dimensions_id ==
                                mdd.c.id))

                    sub_query = (select(sub_query_columns).select_from(
                        sub_query_from).distinct().alias('metrics'))

                    query_from = query_from.join(
                        sub_query, sub_query.c.alarm_id == a.c.id)

            query_columns = [func.count().label('count')]
            query_columns.extend(group_by_columns)

            query = (select(query_columns).select_from(query_from).where(
                ad.c.tenant_id == bindparam('b_tenant_id')))

            parms['b_tenant_id'] = tenant_id

            if 'alarm_definition_id' in query_parms:
                parms['b_alarm_definition_id'] = query_parms[
                    'alarm_definition_id']
                query = query.where(
                    ad.c.id == bindparam('b_alarm_definition_id'))

            if 'state' in query_parms:
                parms['b_state'] = query_parms['state'] if six.PY3 else \
                    query_parms['state'].encode('utf8')
                query = query.where(a.c.state == bindparam('b_state'))

            if 'severity' in query_parms:
                severities = query_parms['severity'].split('|')
                query = query.where(
                    or_(ad.c.severity == bindparam('b_severity' + str(i))
                        for i in range(len(severities))))
                for i, s in enumerate(severities):
                    parms['b_severity' +
                          str(i)] = s if six.PY3 else s.encode('utf8')

            if 'lifecycle_state' in query_parms:
                parms['b_lifecycle_state'] = query_parms['lifecycle_state'] if six.PY3 else \
                    query_parms['lifecycle_state'].encode('utf8')
                query = query.where(
                    a.c.lifecycle_state == bindparam('b_lifecycle_state'))

            if 'link' in query_parms:
                parms['b_link'] = query_parms['link'] if six.PY3 else \
                    query_parms['link'].encode('utf8')
                query = query.where(a.c.link == bindparam('b_link'))

            if 'state_updated_start_time' in query_parms:
                date_str = query_parms['state_updated_start_time'] if six.PY3 \
                    else query_parms['state_updated_start_time'].encode('utf8')
                date_param = datetime.strptime(date_str,
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                parms['b_state_updated_at'] = date_param
                query = query.where(
                    a.c.state_updated_at >= bindparam('b_state_updated_at'))

            if 'metric_name' in query_parms:
                query = query.where(a.c.id.in_(self.get_a_am_query))
                parms['b_md_name'] = query_parms['metric_name'] if six.PY3 else \
                    query_parms['metric_name'].encode('utf8')

            if 'metric_dimensions' in query_parms:
                sub_query = select([a.c.id])
                sub_query_from = (a.join(am, am.c.alarm_id == a.c.id).join(
                    mdd, mdd.c.id == am.c.metric_definition_dimensions_id))

                sub_query_md_base = select([md.c.dimension_set_id
                                            ]).select_from(md)

                for i, metric_dimension in enumerate(
                        query_parms['metric_dimensions'].items()):
                    dimension_value = metric_dimension[1] if six.PY3 else \
                        metric_dimension[1].encode('utf8')

                    if '|' in dimension_value:
                        dimension_value = tuple(dimension_value.split('|'))

                    md_name = "b_md_name_{}".format(i)
                    md_value = "b_md_value_{}".format(i)

                    sub_query_md = (sub_query_md_base.where(
                        md.c.name == bindparam(md_name)))

                    if isinstance(dimension_value, tuple):
                        sub_query_md = (sub_query_md.where(
                            md.c.value.op('IN')(bindparam(md_value))))
                    else:
                        sub_query_md = (sub_query_md.where(
                            md.c.value == bindparam(md_value)))

                    sub_query_md = (sub_query_md.distinct().alias(
                        'md_{}'.format(i)))

                    sub_query_from = (sub_query_from.join(
                        sub_query_md, sub_query_md.c.dimension_set_id ==
                        mdd.c.metric_dimension_set_id))

                    parms[md_name] = metric_dimension[0] if six.PY3 else \
                        metric_dimension[0].encode('utf8')
                    parms[md_value] = dimension_value

                    sub_query = (
                        sub_query.select_from(sub_query_from).distinct())
                    query = query.where(a.c.id.in_(sub_query))

            if group_by_columns:
                query = (query.order_by(*group_by_columns).group_by(
                    *group_by_columns))

            if limit:
                query = query.limit(bindparam('b_limit'))
                parms['b_limit'] = limit + 1

            if offset:
                query = query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            query = query.distinct()
            return [dict(row) for row in conn.execute(query, parms).fetchall()]
예제 #41
0
def messages_or_drafts(namespace_id, drafts, subject, from_addr, to_addr,
                       cc_addr, bcc_addr, any_email, thread_public_id,
                       started_before, started_after, last_message_before,
                       last_message_after, received_before, received_after,
                       filename, in_, unread, starred, limit, offset, view,
                       db_session):
    # Warning: complexities ahead. This function sets up the query that gets
    # results for the /messages API. It loads from several tables, supports a
    # variety of views and filters, and is performance-critical for the API. As
    # such, it is not super simple.
    #
    # We bake the generated query to avoid paying query compilation overhead on
    # every request. This requires some attention: every parameter that can
    # vary between calls *must* be inserted via bindparam(), or else the first
    # value passed will be baked into the query and reused on each request.
    # Subqueries (on contact tables) can't be properly baked, so we have to
    # call query.spoil() on those code paths.

    param_dict = {
        'namespace_id': namespace_id,
        'drafts': drafts,
        'subject': subject,
        'from_addr': from_addr,
        'to_addr': to_addr,
        'cc_addr': cc_addr,
        'bcc_addr': bcc_addr,
        'any_email': any_email,
        'thread_public_id': thread_public_id,
        'received_before': received_before,
        'received_after': received_after,
        'started_before': started_before,
        'started_after': started_after,
        'last_message_before': last_message_before,
        'last_message_after': last_message_after,
        'filename': filename,
        'in_': in_,
        'unread': unread,
        'starred': starred,
        'limit': limit,
        'offset': offset
    }

    if view == 'count':
        target = func.count(Message.id)
    elif view == 'ids':
        target = Message.public_id
    else:
        target = Message
    query = bakery(lambda s: s.query(target))
    query += lambda q: q.join(Thread)
    query += lambda q: q.filter(
        Message.namespace_id == bindparam('namespace_id'), Message.is_draft ==
        bindparam('drafts'))

    if subject is not None:
        query += lambda q: q.filter(Message.subject == bindparam('subject'))

    if unread is not None:
        query += lambda q: q.filter(Message.is_read != bindparam('unread'))

    if starred is not None:
        query += lambda q: q.filter(Message.is_starred == bindparam('starred'))

    if thread_public_id is not None:
        query += lambda q: q.filter(Thread.public_id == bindparam(
            'thread_public_id'))

    # TODO: deprecate thread-oriented date filters on message endpoints.
    if started_before is not None:
        query += lambda q: q.filter(
            Thread.subjectdate < bindparam('started_before'), Thread.
            namespace_id == bindparam('namespace_id'))

    if started_after is not None:
        query += lambda q: q.filter(
            Thread.subjectdate > bindparam('started_after'), Thread.
            namespace_id == bindparam('namespace_id'))

    if last_message_before is not None:
        query += lambda q: q.filter(
            Thread.recentdate < bindparam('last_message_before'), Thread.
            namespace_id == bindparam('namespace_id'))

    if last_message_after is not None:
        query += lambda q: q.filter(
            Thread.recentdate > bindparam('last_message_after'), Thread.
            namespace_id == bindparam('namespace_id'))

    if received_before is not None:
        query += lambda q: q.filter(Message.received_date <= bindparam(
            'received_before'))

    if received_after is not None:
        query += lambda q: q.filter(Message.received_date > bindparam(
            'received_after'))

    if to_addr is not None:
        query.spoil()
        to_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'to_addr',
                Contact.email_address == to_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(to_query))

    if from_addr is not None:
        query.spoil()
        from_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'from_addr',
                Contact.email_address == from_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(from_query))

    if cc_addr is not None:
        query.spoil()
        cc_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'cc_addr',
                Contact.email_address == cc_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(cc_query))

    if bcc_addr is not None:
        query.spoil()
        bcc_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'bcc_addr',
                Contact.email_address == bcc_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(bcc_query))

    if any_email is not None:
        query.spoil()
        any_email_query = db_session.query(
            MessageContactAssociation.message_id).join(Contact). \
            filter(Contact.email_address == any_email,
                   Contact.namespace_id == bindparam('namespace_id')). \
            subquery()
        query += lambda q: q.filter(Message.id.in_(any_email_query))

    if filename is not None:
        query += lambda q: q.join(Part).join(Block). \
            filter(Block.filename == bindparam('filename'),
                   Block.namespace_id == bindparam('namespace_id'))

    if in_ is not None:
        query.spoil()
        category_filters = [
            Category.name == bindparam('in_'),
            Category.display_name == bindparam('in_')
        ]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == bindparam('in_id'))
            # Type conversion and bindparams interact poorly -- you can't do
            # e.g.
            # query.filter(or_(Category.name == bindparam('in_'),
            #                  Category.public_id == bindparam('in_')))
            # because the binary conversion defined by Category.public_id will
            # be applied to the bound value prior to its insertion in the
            # query. So we define another bindparam for the public_id:
            param_dict['in_id'] = in_
        except InputError:
            pass
        query += lambda q: q.join(MessageCategory).join(Category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters))

    if view == 'count':
        res = query(db_session).params(**param_dict).one()[0]
        return {"count": res}

    query += lambda q: q.order_by(desc(Message.received_date))
    query += lambda q: q.limit(bindparam('limit'))
    if offset:
        query += lambda q: q.offset(bindparam('offset'))

    if view == 'ids':
        res = query(db_session).params(**param_dict).all()
        return [x[0] for x in res]

    # Eager-load related attributes to make constructing API representations
    # faster.
    query += lambda q: q.options(
        contains_eager(Message.thread),
        subqueryload(Message.messagecategories).joinedload('category'),
        subqueryload(Message.parts).joinedload(Part.block),
        subqueryload(Message.events))

    prepared = query(db_session).params(**param_dict)
    return prepared.all()
예제 #42
0
def _get_significant_states(
    hass,
    session,
    start_time,
    end_time=None,
    entity_ids=None,
    filters=None,
    include_start_time_state=True,
    significant_changes_only=True,
    minimal_response=False,
):
    """
    Return states changes during UTC period start_time - end_time.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    timer_start = time.perf_counter()

    baked_query = hass.data[HISTORY_BAKERY](
        lambda session: session.query(*QUERY_STATES)
    )

    if significant_changes_only:
        baked_query += lambda q: q.filter(
            (
                States.domain.in_(SIGNIFICANT_DOMAINS)
                | (States.last_changed == States.last_updated)
            )
            & (States.last_updated > bindparam("start_time"))
        )
    else:
        baked_query += lambda q: q.filter(States.last_updated > bindparam("start_time"))

    if entity_ids is not None:
        baked_query += lambda q: q.filter(
            States.entity_id.in_(bindparam("entity_ids", expanding=True))
        )
    else:
        baked_query += lambda q: q.filter(~States.domain.in_(IGNORE_DOMAINS))
        if filters:
            filters.bake(baked_query)

    if end_time is not None:
        baked_query += lambda q: q.filter(States.last_updated < bindparam("end_time"))

    baked_query += lambda q: q.order_by(States.entity_id, States.last_updated)

    states = execute(
        baked_query(session).params(
            start_time=start_time, end_time=end_time, entity_ids=entity_ids
        )
    )

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("get_significant_states took %fs", elapsed)

    return _sorted_states_to_dict(
        hass,
        session,
        states,
        start_time,
        entity_ids,
        filters,
        include_start_time_state,
        minimal_response,
    )
예제 #43
0
def check_password(environ, user, password):
    """

    :param environ:  Dictionary that contains the apache environment variables.
                     There's a lot of overlap of what you get from this dictionary
                     and http://php.net/manual/en/reserved.variables.server.php
    :param user:     String containing the username passed to the apache authentication box
    :param password: String containing the password passed to the apache authentication box
    :return:         Boolean for whether user was properly authenticated (True) or not (False)
    """
    # The REQUEST_URI will contain stuff after the usual /<VCS>/<SEMESTER>/<COURSE>/<G_ID>/<USER_ID> that have
    # to do with the GIT and whether it's pushing, pulling, cloning, etc.

    params = list(
        filter(lambda x: len(x) > 0, environ['REQUEST_URI'].split("/")))
    vcs = params[0]

    vcs_paths = []
    if vcs == 'git':
        # info/refs?service=git-upload-pack
        vcs_paths = [
            'info', 'git-upload-pack', 'refs?service=git-upload-pack',
            'refs?service=git-receive-pack', 'git-receive-pack'
        ]

    params = list(filter(lambda x: x not in vcs_paths, params))
    if len(params) == 5:
        semester, course, gradeable = params[1:4]

        # check if this is a team or individual gradeable
        course_db = "submitty_{}_{}".format(semester, course)
        if os.path.isdir(DATABASE_HOST):
            course_conn_string = "postgresql://{}:{}@/{}?host={}".format(
                DATABASE_USER, DATABASE_PASS, course_db, DATABASE_HOST)
        else:
            course_conn_string = "postgresql://{}:{}@{}/{}".format(
                DATABASE_USER, DATABASE_PASS, DATABASE_HOST, course_db)

        course_engine = create_engine(course_conn_string)
        course_connection = course_engine.connect()
        course_metadata = MetaData(bind=course_engine)

        eg_table = Table('electronic_gradeable',
                         course_metadata,
                         autoload=True)
        select = eg_table.select().where(
            eg_table.c.g_id == bindparam('gradeable_id'))
        eg = course_connection.execute(select,
                                       gradeable_id=gradeable).fetchone()

        if eg is None:
            is_team = False
        else:
            is_team = eg.eg_team_assignment

        if is_team:
            user_id = None
            team_id = params[4]
        else:
            user_id = params[4]
            team_id = None
    else:
        return None

    engine = connection = metadata = None
    authenticated = False

    if AUTHENTICATION_METHOD == 'PamAuthentication':
        authenticated = check_pam(user, password)
        # print(authenticated)
    elif AUTHENTICATION_METHOD == 'DatabaseAuthentication':
        engine, connection, metadata = open_database()
        authenticated = check_database(user, password, connection, metadata)

    if authenticated is not True or user == user_id:
        close_database(engine, connection)
        close_database(course_engine, course_connection)
        return authenticated

    if is_team:
        teams_table = Table('teams', course_metadata, autoload=True)
        select = teams_table.select().where(
            teams_table.c.team_id == bindparam('team_id')).where(
                teams_table.c.user_id == bindparam('user_id'))
        team_user = course_connection.execute(select,
                                              team_id=team_id,
                                              user_id=user).fetchone()
        if team_user is not None:
            close_database(engine, connection)
            close_database(course_engine, course_connection)
            return authenticated

    if engine is None:
        engine, connection, metadata = open_database()

    users_table = Table('courses_users', metadata, autoload=True)
    select = users_table.select().where(users_table.c.user_id == bindparam('user_id'))\
        .where(users_table.c.semester == bindparam('semester')).where(users_table.c.course == bindparam('course'))
    course_user = connection.execute(select,
                                     user_id=user,
                                     semester=semester,
                                     course=course).fetchone()
    if course_user is None:
        authenticated = None
    else:
        if course_user['user_group'] <= 2:
            authenticated = True
        else:
            authenticated = False

    close_database(engine, connection)
    close_database(course_engine, course_connection)

    return authenticated
def visit_unload_from_select(element, compiler, **kw):
    """Returns the actual sql query for the UnloadFromSelect class."""

    template = """
       UNLOAD (:select) TO :unload_location
       CREDENTIALS :credentials
       {manifest}
       {header}
       {format}
       {delimiter}
       {encrypted}
       {fixed_width}
       {gzip}
       {add_quotes}
       {null}
       {escape}
       {allow_overwrite}
       {parallel}
       {region}
       {max_file_size}
    """
    el = element

    if el.format is None:
        format_ = ''
    elif el.format == Format.csv:
        format_ = 'FORMAT AS {}'.format(el.format.value)
        if el.delimiter is not None or el.fixed_width is not None:
            raise ValueError(
                'CSV format cannot be used with delimiter or fixed_width')
    elif el.format == Format.parquet:
        format_ = 'FORMAT AS {}'.format(el.format.value)
        if any((
            el.delimiter, el.fixed_width, el.add_quotes, el.escape, el.null,
            el.header, el.gzip
        )):
            raise ValueError(
                "Parquet format can't be used with `delimiter`, `fixed_width`,"
                ' `add_quotes`, `escape`, `null`, `header`, or `gzip`.'
            )
    else:
        raise ValueError(
            'Only CSV and Parquet formats are currently supported.'
        )

    qs = template.format(
        manifest='MANIFEST' if el.manifest else '',
        header='HEADER' if el.header else '',
        format=format_,
        delimiter=(
            'DELIMITER AS :delimiter' if el.delimiter is not None else ''
        ),
        encrypted='ENCRYPTED' if el.encrypted else '',
        fixed_width='FIXEDWIDTH AS :fixed_width' if el.fixed_width else '',
        gzip='GZIP' if el.gzip else '',
        add_quotes='ADDQUOTES' if el.add_quotes else '',
        escape='ESCAPE' if el.escape else '',
        null='NULL AS :null_as' if el.null is not None else '',
        allow_overwrite='ALLOWOVERWRITE' if el.allow_overwrite else '',
        parallel='PARALLEL OFF' if not el.parallel else '',
        region='REGION :region' if el.region is not None else '',
        max_file_size=(
            'MAXFILESIZE :max_file_size MB'
            if el.max_file_size is not None else ''
        ),
    )

    query = sa.text(qs)

    if el.delimiter is not None:
        query = query.bindparams(sa.bindparam(
            'delimiter', value=element.delimiter, type_=sa.String,
        ))

    if el.fixed_width:
        query = query.bindparams(sa.bindparam(
            'fixed_width',
            value=_process_fixed_width(el.fixed_width),
            type_=sa.String,
        ))

    if el.null is not None:
        query = query.bindparams(sa.bindparam(
            'null_as', value=el.null, type_=sa.String
        ))

    if el.region is not None:
        query = query.bindparams(sa.bindparam(
            'region', value=el.region, type_=sa.String
        ))

    if el.max_file_size is not None:
        max_file_size_mib = float(el.max_file_size) / 1024 / 1024
        query = query.bindparams(sa.bindparam(
            'max_file_size', value=max_file_size_mib, type_=sa.Float
        ))

    return compiler.process(
        query.bindparams(
            sa.bindparam('credentials', value=el.credentials, type_=sa.String),
            sa.bindparam(
                'unload_location', value=el.unload_location, type_=sa.String,
            ),
            sa.bindparam(
                'select',
                value=compiler.process(
                    el.select,
                    literal_binds=True,
                ),
                type_=sa.String,
            ),
        ),
        **kw
    )
예제 #45
0
    def test_custom_bind(self):
        Address, addresses, users, User = (
            self.classes.Address,
            self.tables.addresses,
            self.tables.users,
            self.classes.User,
        )

        mapper(
            User,
            users,
            properties=dict(
                addresses=relationship(
                    mapper(Address, addresses),
                    lazy="select",
                    primaryjoin=and_(
                        users.c.id == addresses.c.user_id,
                        users.c.name == bindparam("name"),
                    ),
                )
            ),
        )

        canary = mock.Mock()

        class MyOption(MapperOption):
            propagate_to_loaders = True

            def __init__(self, crit):
                self.crit = crit

            def process_query_conditionally(self, query):
                """process query during a lazyload"""
                canary()
                params = dict(query.load_options._params)
                query.load_options += {"_params": params}
                query.load_options._params.update(dict(name=self.crit))

        s = Session()
        ed = s.query(User).options(MyOption("ed")).filter_by(name="ed").one()
        eq_(
            ed.addresses,
            [
                Address(id=2, user_id=8),
                Address(id=3, user_id=8),
                Address(id=4, user_id=8),
            ],
        )
        eq_(canary.mock_calls, [mock.call()])

        fred = (
            s.query(User).options(MyOption("ed")).filter_by(name="fred").one()
        )
        eq_(fred.addresses, [])  # fred is missing
        eq_(canary.mock_calls, [mock.call(), mock.call()])

        # the lazy query was not cached; the option is re-applied to the
        # Fred object due to populate_existing()
        fred = (
            s.query(User)
            .populate_existing()
            .options(MyOption("fred"))
            .filter_by(name="fred")
            .one()
        )
        eq_(fred.addresses, [Address(id=5, user_id=9)])  # fred is there

        eq_(canary.mock_calls, [mock.call(), mock.call(), mock.call()])
예제 #46
0
    def setUpClass(cls):
        from sqlalchemy import engine_from_config

        engine = engine_from_config({'url': 'sqlite://'}, prefix='')

        qry = open('monasca_api/tests/sqlite_alarm.sql', 'r').read()
        sconn = engine.raw_connection()
        c = sconn.cursor()
        c.executescript(qry)
        sconn.commit()
        c.close()
        cls.engine = engine

        def _fake_engine_from_config(*args, **kw):
            return cls.engine

        cls.fixture = fixtures.MonkeyPatch('sqlalchemy.create_engine',
                                           _fake_engine_from_config)
        cls.fixture.setUp()

        metadata = MetaData()

        cls.aa = models.create_aa_model(metadata)
        cls._delete_aa_query = delete(cls.aa)
        cls._insert_aa_query = (insert(cls.aa).values(
            alarm_definition_id=bindparam('alarm_definition_id'),
            alarm_state=bindparam('alarm_state'),
            action_id=bindparam('action_id')))

        cls.ad = models.create_ad_model(metadata)
        cls._delete_ad_query = delete(cls.ad)
        cls._insert_ad_query = (insert(cls.ad).values(
            id=bindparam('id'),
            tenant_id=bindparam('tenant_id'),
            name=bindparam('name'),
            severity=bindparam('severity'),
            expression=bindparam('expression'),
            match_by=bindparam('match_by'),
            actions_enabled=bindparam('actions_enabled'),
            created_at=bindparam('created_at'),
            updated_at=bindparam('updated_at'),
            deleted_at=bindparam('deleted_at')))
        cls.sad = models.create_sad_model(metadata)
        cls._delete_sad_query = delete(cls.sad)
        cls._insert_sad_query = (insert(cls.sad).values(
            id=bindparam('id'),
            alarm_definition_id=bindparam('alarm_definition_id'),
            function=bindparam('function'),
            metric_name=bindparam('metric_name'),
            operator=bindparam('operator'),
            threshold=bindparam('threshold'),
            period=bindparam('period'),
            periods=bindparam('periods'),
            created_at=bindparam('created_at'),
            updated_at=bindparam('updated_at')))

        cls.sadd = models.create_sadd_model(metadata)
        cls._delete_sadd_query = delete(cls.sadd)
        cls._insert_sadd_query = (insert(cls.sadd).values(
            sub_alarm_definition_id=bindparam('sub_alarm_definition_id'),
            dimension_name=bindparam('dimension_name'),
            value=bindparam('value')))

        cls.nm = models.create_nm_model(metadata)
        cls._delete_nm_query = delete(cls.nm)
        cls._insert_nm_query = (insert(cls.nm).values(
            id=bindparam('id'),
            tenant_id=bindparam('tenant_id'),
            name=bindparam('name'),
            type=bindparam('type'),
            address=bindparam('address'),
            created_at=bindparam('created_at'),
            updated_at=bindparam('updated_at')))
예제 #47
0
def main(inicio, fin, ci):
    """
    En el archivo bandejas.cfg se guardan los parametros de configuracion para la base de datos. El procedimiento toma el archivo de desde el mismo directorio donde se encuentra. En el archivo config.cfg se guardan parametros de configuracion.
    \nEjecucion
    El procedimiento se ejecuta de la siguiente forma:
    (ejemplo)\n
    $python bj.py --inicio='2018-05-01' --fin='2018-05-14'

        - Novedades en el periodo [inicio, fin) , incluyendo inicio y no incluye la fecha de fin.Las novedades se refiere a las altas de designacion, ceses de designacion, anulaciones y pasajes a suplencias
        - Para todas las personas (PerId) que tuvieron novedades en el periodo indicado, se toman los datos de toda la historia de altas, ceses, con tope el 01/03 del año correspondiente a la fecha de inicio que es pasada como parametro.
    """

    with open('bandejas.cfg', 'r') as ymlfile:
        cdb = yaml.load(ymlfile)

    with open('config.cfg', 'r') as ymlfile:
        cfg = yaml.load(ymlfile)

    engine = create_engine('mysql+pymysql://' + cdb['personal']['usr'] + ':' +
                           cdb['personal']['password'] + '@' +
                           cdb['personal']['host'] + '/' +
                           cdb['personal']['bd'])
    engine_bandeja_in = create_engine('mysql+pymysql://' +
                                      cdb['bandeja_in']['usr'] + ':' +
                                      cdb['bandeja_in']['password'] + '@' +
                                      cdb['bandeja_in']['host'] + '/' +
                                      cdb['bandeja_in']['bd'])
    engine_bandeja_out = create_engine('mysql+pymysql://' +
                                       cdb['bandeja_out']['usr'] + ':' +
                                       cdb['bandeja_out']['password'] + '@' +
                                       cdb['bandeja_out']['host'] + '/' +
                                       cdb['bandeja_out']['bd'])

    puestos_funcion = cfg[
        'puestos_funcion']  # los puestos considerados docencia directa
    parametros = {}
    parametros['p1d'] = dt.date(int(inicio.split('-')[0]),
                                int(inicio.split('-')[1]),
                                int(inicio.split('-')[2]))
    parametros['p2d'] = dt.date(int(fin.split('-')[0]), int(fin.split('-')[1]),
                                int(fin.split('-')[2]))

    # no voy a dejar pasar designaciones que inicien a partir de este tope (el mes siguiente al dado como fin)
    parametros['tope'] = dt.date(
        int(fin.split('-')[0]) + (1 if (fin.split('-')[1] == '12') else 0),
        1 if (fin.split('-')[1] == '12') else int(fin.split('-')[1]) + 1, 1)

    # las causales de suplencia que interesan
    suplcausales = cfg['suplcausales']

    parametros['inicioLectivo'] = dt.datetime(
        int(inicio.split('-')[0]) - (1 if inicio.split('-')[1] < '03' else 0),
        03, 01)

    #cargo metadatos del modelo Personal
    metadata = sa.MetaData()
    relaciones_laborales = sa.Table('RELACIONES_LABORALES',
                                    metadata,
                                    autoload=True,
                                    autoload_with=engine)
    anulaciones = sa.Table('ANULACIONES',
                           metadata,
                           autoload=True,
                           autoload_with=engine)
    funciones_relacion_laboral = sa.Table('FUNCIONES_RELACION_LABORAL',
                                          metadata,
                                          autoload=True,
                                          autoload_with=engine)
    funciones_asignadas = sa.Table('FUNCIONES_ASIGNADAS',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)
    sillas = sa.Table('SILLAS', metadata, autoload=True, autoload_with=engine)
    cargas_horarias = sa.Table('CARGAS_HORARIAS',
                               metadata,
                               autoload=True,
                               autoload_with=engine)
    silla_grupo_materia = sa.Table('SILLAGRUPOMATERIA',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)
    puestos = sa.Table('PUESTOS',
                       metadata,
                       autoload=True,
                       autoload_with=engine)
    denominaciones_cargo = sa.Table('DENOMINACIONES_CARGOS',
                                    metadata,
                                    autoload=True,
                                    autoload_with=engine)
    suplencias = sa.Table('SUPLENCIAS',
                          metadata,
                          autoload=True,
                          autoload_with=engine)
    funciones_agrup_lin = sa.Table('FUNCION_AGRUP_LIN',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)

    # cargo metadatos de Personas
    personas = sa.Table('PERSONAS',
                        metadata,
                        schema="Personas",
                        autoload=True,
                        autoload_with=engine)
    personas_documentos = sa.Table('PERSONASDOCUMENTOS',
                                   metadata,
                                   schema="Personas",
                                   autoload=True,
                                   autoload_with=engine)

    # cargo los datos de materias de estudiantil
    asignaturas_materias = sa.Table('ASIGNATURAS_MATERIAS',
                                    metadata,
                                    schema="Estudiantil",
                                    autoload=True,
                                    autoload_with=engine)

    # cargo las materias de estudiantil
    query_asignaturas_materias = sa.select([asignaturas_materias])
    df_asignaturas_materias = pd.read_sql_query(query_asignaturas_materias,
                                                engine,
                                                params=parametros)

    # cargo los datos de la base de siap para las dependencias
    tabla_institucional = sa.Table('tabla_institucional',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine_bandeja_in)
    query_tabla_institucional = sa.select([
        tabla_institucional.c.DEP_AS400.label('dependid'),
        tabla_institucional.c.DEP_DBC.label('dependidSiap')
    ]).select_from(tabla_institucional)
    df_tabla_institucional = pd.read_sql_query(query_tabla_institucional,
                                               engine_bandeja_in,
                                               params=parametros)

    # cargo las funciones para identificar las horas de apoyo o POB , POP, talleristas , codigo 68
    query_funciones_cargo = sa.select([funciones_agrup_lin])
    df_funciones_cargo = pd.read_sql_query(query_funciones_cargo,
                                           engine,
                                           params=parametros)
    df_funciones_hap = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 1, 'FuncionId']
    df_funciones_POB = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 8, 'FuncionId']
    df_funciones_POP = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 7, 'FuncionId']
    df_funciones_68 = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 5, 'FuncionId']
    df_funciones_talleristas = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 9, 'FuncionId']
    df_coordinadores_especiales = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 10, 'FuncionId']

    # novedades
    query_novedades = sa. \
        select([relaciones_laborales.c.PersonalPerId, relaciones_laborales.c.RelLabId]). \
        select_from(relaciones_laborales.join(puestos)). \
        where( \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
              # RL designada

              (relaciones_laborales.c.PersonalPerId <> None) & \
              ( \
               # se inicia en el período de la bandeja

               ( \
                (relaciones_laborales.c.RelLabFchIniActividades >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabFchIniActividades < sa.bindparam('p2d')) \
               ) | \
               # o termina en el período de la bandeja

               ( \
                (relaciones_laborales.c.RelLabCeseFchReal >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchReal < sa.bindparam('p2d')) \
               ) | \
               # o cambiaron el alta con retraso

               ( \
                (relaciones_laborales.c.RelLabFchIniActividades < sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabDesignFchAlta >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabDesignFchAlta < sa.bindparam('p2d')) \
               ) | \
               # o cambiaron el cese con retraso

               ( \
                (relaciones_laborales.c.RelLabCeseFchReal < sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchAlta >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchAlta < sa.bindparam('p2d'))
               ) \
              ) \
             )
    df_novedades = pd.read_sql_query(query_novedades,
                                     engine,
                                     params=parametros)

    # cargo las anulaciones del periodo
    query_anulaciones_periodo = sa. \
        select([relaciones_laborales.c.PersonalPerId,relaciones_laborales.c.RelLabId, anulaciones.c.AnulacionFchAlta]). \
        select_from(anulaciones.join(relaciones_laborales, cast(anulaciones.c.AnulacionValorPkTabla,Integer)==relaciones_laborales.c.RelLabId).join(puestos)). \
        where( \
              (anulaciones.c.AnulacionFchAlta >= sa.bindparam('p1d')) & \
              (anulaciones.c.AnulacionFchAlta < sa.bindparam('p2d')) & \
              (anulaciones.c.AnulacionTipoNombre=='DESIGNACION') & \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) \
             )
    df_anulaciones_periodo = pd.read_sql(query_anulaciones_periodo,
                                         engine,
                                         params=parametros)

    rlt = aliased(relaciones_laborales)  # RL de los titulares
    rls = aliased(relaciones_laborales)  # RL de los suplentes

    # perids que tuvieron novedades o tienen eventos en el período de la bandeja (o el que vino de parámetro)
    if ci != None:  # si me pasaron una ci como parametro me interesan solo las novedades de esa ci
        query_perid = sa.select([personas_documentos.c.PerId
                                 ]).select_from(personas_documentos).where(
                                     (personas_documentos.c.PaisCod == 'UY')
                                     & (personas_documentos.c.DocCod == 'CI')
                                     & (personas_documentos.c.PerDocId == ci))
        set_perids_novedades = pd.read_sql_query(
            query_perid, engine, params=parametros)['PerId'].unique().tolist()
    else:
        # cargo las suplencias del período
        query_suplencias = sa. \
            select([rlt.c.PersonalPerId,suplencias.c.RelLabId,func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades).label('SuplFchAlta'),suplencias.c.SuplCausId,rlt.c.RelLabFchIniActividades,rlt.c.RelLabCeseFchReal,rls.c.RelLabAnulada.label('RelLabAnuladaS'),rls.c.RelLabFchIniActividades.label('RelLabFchIniActividadesS'),rls.c.RelLabCeseFchReal.label('RelLabCeseFchRealS')]). \
            select_from(rlt.join(puestos).join(suplencias, suplencias.c.RelLabId==rlt.c.RelLabId).join(rls, rls.c.RelLabId==suplencias.c.SuplRelLabId)). \
            where((puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
                  (suplencias.c.SuplCausId.in_(suplcausales)) & \
                  (rlt.c.RelLabAnulada==0) & \
                  ((rlt.c.RelLabFchIniActividades < rlt.c.RelLabCeseFchReal) | (rlt.c.RelLabCeseFchReal==None)) & \
                  # la rls podría estar anulada y en ese caso se marca la novedad en RelLabCeseFchAlta

                  ( \
                   # inicio de la suplencia está en el período de la bandeja:

                   ((func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades) < sa.bindparam('p2d')) & \
                    (func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades) >= sa.bindparam('p1d')) \
                   ) | \
                   # o el inicio de la suplencia fue modificado en el período de la bandeja:

                   ((cast(suplencias.c.Suplencias_FchUltAct,Date) < sa.bindparam('p2d')) & \
                    (cast(suplencias.c.Suplencias_FchUltAct,Date) >= sa.bindparam('p1d')) \
                   ) | \
                   # o el fin de la suplencia está en el período de la bandeja:

                   (((rls.c.RelLabCeseFchReal < sa.bindparam('p2d')) | (rls.c.RelLabCeseFchReal==None)) & \
                    (rls.c.RelLabCeseFchReal >= sa.bindparam('p1d')) \
                   ) | \
                   # o el fin de la suplencia fue modificado o anulado en el período de la bandeja:

                   ((rls.c.RelLabCeseFchAlta < sa.bindparam('p2d')) & \
                    (rls.c.RelLabCeseFchAlta >= sa.bindparam('p1d')) \
                   ) \
                  ) \
                 )
        df_suplencias = pd.read_sql_query(query_suplencias,
                                          engine,
                                          params=parametros)

        set_perids_novedades = df_novedades['PersonalPerId'].append(
            df_anulaciones_periodo['PersonalPerId']).append(
                df_suplencias['PersonalPerId']).unique().tolist()

    if len(set_perids_novedades) == 0:  #si no tengo cédulas para procesar
        return

    ## Tomo la historia de los perid con novedades
    # join historia básica
    j3 = rlt.join(puestos).join(funciones_relacion_laboral).join(
        funciones_asignadas).join(sillas).join(
            silla_grupo_materia,
            sillas.c.SillaId == silla_grupo_materia.c.SillaId,
            isouter=True).join(
                asignaturas_materias,
                sillas.c.MateriaId == asignaturas_materias.c.MateriaId,
                isouter=True)

    # join suplencias
    jsupl = suplencias.join(rls,
                            ((rls.c.RelLabId == suplencias.c.SuplRelLabId) &
                             (rls.c.RelLabAnulada == 0) &
                             (suplencias.c.SuplCausId.in_(suplcausales))))
    # clone de join suplencias para encontrar la siguiente
    supl_siguiente = aliased(suplencias)  # suplencia consecutiva a la actual
    rls_siguiente = aliased(relaciones_laborales)
    jsupl_siguiente = supl_siguiente.join(
        rls_siguiente,
        ((rls_siguiente.c.RelLabId == supl_siguiente.c.SuplRelLabId) &
         (rls_siguiente.c.RelLabAnulada == 0) &
         (supl_siguiente.c.SuplCausId.in_(suplcausales))))
    # clone de join suplencias para asegurar que no hay una intermedia entre la actual y la siguiente
    supl_intermedia = aliased(suplencias)  # suplencia consecutiva a la actual
    rls_intermedia = aliased(relaciones_laborales)
    jsupl_intermedia = supl_intermedia.join(
        rls_intermedia,
        ((rls_intermedia.c.RelLabId == supl_intermedia.c.SuplRelLabId) &
         (rls_intermedia.c.RelLabAnulada == 0) &
         (supl_intermedia.c.SuplCausId.in_(suplcausales))))

    # historia básica de los perids con novedades, no incluye RL bajadas a suplencia
    query_historia_rl = sa. \
        select([rlt.c.PersonalPerId, puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,rlt.c.RelLabFchIniActividades, rlt.c.RelLabCeseFchReal, rlt.c.CauBajCod,silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId, sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId]). \
        select_from( \
            j3. \
            join(jsupl, ((rlt.c.RelLabId==suplencias.c.RelLabId)), isouter=True) \
        ). \
        where((rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
              (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
              (suplencias.c.RelLabId==None) \
             )
    df_historia_rl = pd.read_sql_query(query_historia_rl,
                                       engine,
                                       params=parametros)
    df_historia_rl.loc[:, 'Origen'] = ['df_historia_rl']

    # Cambio el número de asignatura de las Coordinaciones
    df_historia_rl.loc[df_historia_rl['AsignId'] == 90,
                       ['AsignId', 'RelLabDesignCaracter']] = [75, 'I']
    # Cambio el número de asignatura de AAM
    df_historia_rl.loc[df_historia_rl['AsignId'] == 98, 'AsignId'] = 77

    # SUPLENCIAS
    # Para cada bajada a suplencia implica (recorriéndolas en orden de fecha) hay que:
    #  (1) agregar un registro desde el fin de la suplencia hasta el final original (luego el paso 2 le puede cambiar el cese)
    #  (2) cesar la RL vigente en la fecha de inicio de la suplencia
    #  (3) si el causal de bajada corresponde, hay que crear un registro (alta) para el período de suplencia paga

    # (1) altas inyectadas en la bandeja para el período posterior a cada licencia
    query_alta_luego_de_suplencia = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(rls.c.RelLabCeseFchReal,1)).label('RelLabFchIniActividades'),func.IF(supl_siguiente.c.SuplId==None,rlt.c.RelLabCeseFchReal,cast(supl_siguiente.c.SuplFchAlta,Date)).label('RelLabCeseFchReal'),func.IF(supl_siguiente.c.SuplId==None,rlt.c.CauBajCod,'50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId]). \
        select_from( \
            jsupl. \
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))). \
            join(jsupl_siguiente,
                 ((supl_siguiente.c.RelLabId==rlt.c.RelLabId) & (supl_siguiente.c.SuplId<>suplencias.c.SuplId) & (supl_siguiente.c.SuplFchAlta>=suplencias.c.SuplFchAlta)), \
                 isouter=True). \
            join(jsupl_intermedia, \
                 ((supl_intermedia.c.RelLabId==rlt.c.RelLabId) & (supl_intermedia.c.SuplId<>suplencias.c.SuplId) & (supl_intermedia.c.SuplFchAlta>=suplencias.c.SuplFchAlta) & (supl_intermedia.c.SuplId<>supl_siguiente.c.SuplId) & (supl_intermedia.c.SuplFchAlta<=supl_siguiente.c.SuplFchAlta)), \
                 isouter=True) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (rls.c.RelLabCeseFchReal<>None)  & \
            (supl_intermedia.c.SuplId==None) & \
            ((supl_siguiente.c.SuplId==None) | ((rls.c.RelLabCeseFchReal<>None) & (cast(supl_siguiente.c.SuplFchAlta,Date) > rls.c.RelLabCeseFchReal))) & \
            (func.ADDDATE(rls.c.RelLabCeseFchReal,1) < func.IF(supl_siguiente.c.SuplId==None,rlt.c.RelLabCeseFchReal,cast(supl_siguiente.c.SuplFchAlta,Date))) \
        )
    df_alta_luego_de_suplencia = pd.read_sql_query(
        query_alta_luego_de_suplencia, engine, params=parametros)
    df_alta_luego_de_suplencia.loc[:,
                                   'Origen'] = ['df_alta_luego_de_suplencia']

    # (2) alta inyectada para el período antes de la primer licencia
    query_primera_suplencia = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,rlt.c.RelLabFchIniActividades,cast(suplencias.c.SuplFchAlta,Date).label('RelLabCeseFchReal'),literal_column('50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId, asignaturas_materias.c.AsignId]). \
        select_from(
            jsupl. \
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))). \
            join(jsupl_intermedia, \
                 ((supl_intermedia.c.RelLabId==rlt.c.RelLabId) & (supl_intermedia.c.SuplId<>suplencias.c.SuplId) & (supl_intermedia.c.SuplFchAlta<=suplencias.c.SuplFchAlta)),
                 isouter=True) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (supl_intermedia.c.SuplId==None) & \
            (rlt.c.RelLabFchIniActividades < cast(suplencias.c.SuplFchAlta,Date)) \
        )
    df_primera_suplencia = pd.read_sql_query(query_primera_suplencia,
                                             engine,
                                             params=parametros)
    df_primera_suplencia.loc[:, 'Origen'] = ['df_primera_suplencia']

    # (3) altas inyectadas en la bandeja para el período de licencia si es Junta Médica o Pase en Comisión
    query_alta_suplencia_paga = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(cast(suplencias.c.SuplFchAlta,Date),1)).label('RelLabFchIniActividades'),func.IFNULL(rls.c.RelLabCeseFchReal,rlt.c.RelLabFchFinPrevista).label('RelLabCeseFchReal'),literal_column('50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId,suplencias.c.SuplCausId]). \
        select_from(
            jsupl.
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (suplencias.c.SuplCausId.in_([16, 17, 162])) & \
            (func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(cast(suplencias.c.SuplFchAlta,Date),1)) <= func.IFNULL(rls.c.RelLabCeseFchReal,rlt.c.RelLabFchFinPrevista)) \
        )
    df_alta_suplencia_paga = pd.read_sql_query(query_alta_suplencia_paga,
                                               engine,
                                               params=parametros)
    df_alta_suplencia_paga.loc[:, 'Origen'] = ['df_alta_suplencia_paga']

    # Las Juntas Médicas van con asignatura 162:
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 162,
                               ['AsignId', 'CauBajCod']] = [162, 66]
    # Los pases en comisión DENTRO ANEP van con dependencia 8902
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 16,
                               ['SillaDependId', 'CauBajCod']] = [8902, 66]
    # Los pases en comisión FUERA SECUN van con dependencia 8901
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 17,
                               ['SillaDependId', 'CauBajCod']] = [8901, 66]

    del df_alta_suplencia_paga['SuplCausId']

    df_historia_completa = pd.concat([
        df_historia_rl, df_primera_suplencia, df_alta_luego_de_suplencia,
        df_alta_suplencia_paga
    ],
                                     axis=0)
    df_historia_completa = df_historia_completa.rename(
        columns={
            'RelLabFchIniActividades': 'falta',
            'RelLabCeseFchReal': 'fcese',
            'SillaDependId': 'dependid'
        })

    df_historia_completa = df_historia_completa.reset_index(drop=True)
    df_historia_completa.merge(df_anulaciones_periodo,
                               on='RelLabId',
                               how='left')
    df_anulaciones_a_eliminar = df_anulaciones_periodo[
        df_anulaciones_periodo['RelLabId'].isin(df_novedades['RelLabId'])]
    # Elimino los anulaciones de la historia
    df_historia_completa = df_historia_completa[
        df_historia_completa['RelLabId'].isin(
            df_anulaciones_a_eliminar['RelLabId']) == False]

    # obtengo los datos de las personas
    query_personas = sa.select([
        personas.c.PerId.label('PersonalPerId'), personas_documentos.c.PerDocId
    ]).select_from(personas.join(personas_documentos)).where(
        (personas_documentos.c.PaisCod == 'UY')
        & (personas_documentos.c.DocCod == 'CI')
        & (personas.c.PerId.in_(set_perids_novedades)))
    df_personas = pd.read_sql_query(query_personas, engine, params=parametros)
    df_historia_completa = df_historia_completa.merge(df_personas,
                                                      on='PersonalPerId',
                                                      how='left')

    # agrego asignatura 151 a todos los que no la tienen
    df_historia_completa.loc[(
        (df_historia_completa['AsignId'].isnull()) &
        (df_historia_completa['PuestoAsignId'].notnull())),
                             'AsignId'] = df_historia_completa['PuestoAsignId']
    df_historia_completa.loc[(df_historia_completa['AsignId'].isnull()),
                             'AsignId'] = cfg['asignid_otros']

    df_historia_completa = df_historia_completa.loc[:, [
        'PerDocId', 'dependid', 'AsignId', 'RelLabCicloPago',
        'RelLabDesignCaracter', 'FuncRelLabCantHrs', 'falta', 'fcese',
        'CauBajCod', 'GrupoMateriaId', 'FuncionId', 'RelLabAnulada',
        'PersonalPerId', 'RelLabId'
    ]]

    # atributos hardcoded
    df_historia_completa['PerDocTpo'] = 'DO'
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_hap.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_horas_apoyo']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_hap.tolist()),
        'AsignId'] = cfg['asignid_horas_apoyo']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POB.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_pob']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POB.tolist()),
        'AsignId'] = cfg['asignid_pob']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POP.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_pop']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POP.tolist()),
        'AsignId'] = cfg['asignid_pop']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_68.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_68']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_68.tolist()),
        'AsignId'] = cfg['asignid_68']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_talleristas.tolist(
        )), 'RelLabDesignCaracter'] = cfg['caracter_talleristas']
    df_historia_completa.loc[df_historia_completa['FuncionId'].
                             isin(df_funciones_talleristas.tolist()),
                             'AsignId'] = cfg['asignid_talleristas']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_coordinadores_especiales.
                                               tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_especiales']
    df_historia_completa.loc[df_historia_completa['FuncionId'].
                             isin(df_coordinadores_especiales.tolist()),
                             'AsignId'] = cfg['asignid_especiales']
    df_historia_completa.loc[(df_historia_completa['AsignId'] == 75) &
                             (df_historia_completa['fcese'].notnull()),
                             'CauBajCod'] = cfg['causal_coordinacion']
    df_historia_completa.loc[(df_historia_completa['RelLabAnulada'] == 1),
                             'CauBajCod'] = cfg['causal_anulacion']
    df_historia_completa['PerDocPaisCod'] = 'UY'
    df_historia_completa['HorClaCurTpo'] = ''
    df_historia_completa['HorClaCur'] = ''
    df_historia_completa['HorClaArea'] = ''
    df_historia_completa['HorClaAnio'] = 0
    df_historia_completa['HorClaHorTope'] = 0
    df_historia_completa['HorClaObs'] = ''
    df_historia_completa['HorClaNumInt'] = 0
    df_historia_completa['HorClaParPreCod'] = 0
    df_historia_completa['HorClaCompPor'] = 0
    df_historia_completa['HorClaCompPor'] = 0
    df_historia_completa['HorClaLote'] = 0
    df_historia_completa['HorClaAudUsu'] = 0
    df_historia_completa['HorClaMod'] = 0
    df_historia_completa['HorClaEmpCod'] = 1
    df_historia_completa['HorClaCarNum'] = 0
    df_historia_completa['DesFchCarga'] = date.today()
    df_historia_completa['Resultado'] = 'PE'
    df_historia_completa['Mensaje'] = ''
    df_historia_completa['HorClaFchLib'] = df_historia_completa['fcese']
    df_historia_completa.loc[(df_historia_completa['CauBajCod'].isnull()),
                             'CauBajCod'] = 0

    del df_historia_completa['FuncionId']
    del df_historia_completa['PersonalPerId']

    #Transformacion de la dependencia a Siap
    df_historia_completa = df_historia_completa.merge(df_tabla_institucional)
    del df_historia_completa[
        'dependid']  #borro la dependencia ya que voy a usar la dependidSiap

    # filtro los que tienen fcese < falta
    df_historia_completa = df_historia_completa.loc[
        (df_historia_completa['fcese'] >= df_historia_completa['falta']) |
        (df_historia_completa['fcese'].isnull())]

    # filtro los que tienen falta >= tope
    df_historia_completa = df_historia_completa.loc[
        df_historia_completa['falta'] < parametros['tope']]

    # filtro los que tienen cero horas
    df_historia_completa = df_historia_completa.loc[
        df_historia_completa['FuncRelLabCantHrs'] > 0]

    if ci != None:  # si me pasaron una ci como parametro filtro la historia solo para esa ci.
        df_historia_completa = df_historia_completa.loc[
            df_historia_completa['PerDocId'] == ci]

    # Le pongo los nombres de los campos que corresponden a la tabla ihorasclase de siap
    df_historia_completa = df_historia_completa.rename(
        columns={
            'PerDocId': 'PerDocNum',
            'RelLabDesignCaracter': 'HorClaCar',
            'RelLabCicloPago': 'HorClaCic',
            'falta': 'HorClaFchPos',
            'fcese': 'HorClaFchCese',
            'CauBajCod': 'HorClaCauBajCod',
            'GrupoMateriaId': 'HorClaGrupo',
            'dependidSiap': 'HorClaInsCod',
            'FuncRelLabCantHrs': 'HorClaHor',
            'AsignId': 'HorClaAsiCod',
            'RelLabAnulada': 'HorClaBajLog'
        })

    df_historia_completa.to_sql(name='ihorasclase',
                                con=engine_bandeja_out,
                                if_exists='append',
                                index=False)
예제 #48
0
파일: statistics.py 프로젝트: tsotsos/core
def compile_hourly_statistics(instance: Recorder, session: scoped_session,
                              start: datetime) -> None:
    """Compile hourly statistics.

    This will summarize 5-minute statistics for one hour:
    - average, min max is computed by a database query
    - sum is taken from the last 5-minute entry during the hour
    """
    start_time = start.replace(minute=0)
    end_time = start_time + timedelta(hours=1)

    # Compute last hour's average, min, max
    summary: dict[str, StatisticData] = {}
    baked_query = instance.hass.data[STATISTICS_SHORT_TERM_BAKERY](
        lambda session: session.query(*QUERY_STATISTICS_SUMMARY_MEAN))

    baked_query += lambda q: q.filter(StatisticsShortTerm.start >= bindparam(
        "start_time"))
    baked_query += lambda q: q.filter(StatisticsShortTerm.start < bindparam(
        "end_time"))
    baked_query += lambda q: q.group_by(StatisticsShortTerm.metadata_id)
    baked_query += lambda q: q.order_by(StatisticsShortTerm.metadata_id)

    stats = execute(
        baked_query(session).params(start_time=start_time, end_time=end_time))

    if stats:
        for stat in stats:
            metadata_id, _mean, _min, _max = stat
            summary[metadata_id] = {
                "start": start_time,
                "mean": _mean,
                "min": _min,
                "max": _max,
            }

    # Get last hour's last sum
    if instance._db_supports_row_number:  # pylint: disable=[protected-access]
        subquery = (session.query(*QUERY_STATISTICS_SUMMARY_SUM).filter(
            StatisticsShortTerm.start >= bindparam("start_time")).filter(
                StatisticsShortTerm.start < bindparam("end_time")).subquery())
        query = (session.query(subquery).filter(
            subquery.c.rownum == 1).order_by(subquery.c.metadata_id))
        stats = execute(query.params(start_time=start_time, end_time=end_time))

        if stats:
            for stat in stats:
                metadata_id, start, last_reset, state, _sum, _ = stat
                if metadata_id in summary:
                    summary[metadata_id].update({
                        "last_reset":
                        process_timestamp(last_reset),
                        "state":
                        state,
                        "sum":
                        _sum,
                    })
                else:
                    summary[metadata_id] = {
                        "start": start_time,
                        "last_reset": process_timestamp(last_reset),
                        "state": state,
                        "sum": _sum,
                    }
    else:
        baked_query = instance.hass.data[STATISTICS_SHORT_TERM_BAKERY](
            lambda session: session.query(*QUERY_STATISTICS_SUMMARY_SUM_LEGACY
                                          ))

        baked_query += lambda q: q.filter(StatisticsShortTerm.start >=
                                          bindparam("start_time"))
        baked_query += lambda q: q.filter(StatisticsShortTerm.start <
                                          bindparam("end_time"))
        baked_query += lambda q: q.order_by(StatisticsShortTerm.metadata_id,
                                            StatisticsShortTerm.start.desc())

        stats = execute(
            baked_query(session).params(start_time=start_time,
                                        end_time=end_time))

        if stats:
            for metadata_id, group in groupby(
                    stats, lambda stat: stat["metadata_id"]):  # type: ignore
                (
                    metadata_id,
                    last_reset,
                    state,
                    _sum,
                ) = next(group)
                if metadata_id in summary:
                    summary[metadata_id].update({
                        "start":
                        start_time,
                        "last_reset":
                        process_timestamp(last_reset),
                        "state":
                        state,
                        "sum":
                        _sum,
                    })
                else:
                    summary[metadata_id] = {
                        "start": start_time,
                        "last_reset": process_timestamp(last_reset),
                        "state": state,
                        "sum": _sum,
                    }

    # Insert compiled hourly statistics in the database
    for metadata_id, stat in summary.items():
        session.add(Statistics.from_stats(metadata_id, stat))
예제 #49
0
 def l2(q):
     return q.filter(User.name == bindparam("name"))
예제 #50
0
    def test_reaction(self):

        # dummy query to ensure that GUC params are initialized
        _ = engine.execute(select([func.rdkit_version()])).fetchall()

        stmt = pg_settings.update()
        stmt = stmt.where(pg_settings.c.name == bindparam('param'))
        stmt = stmt.values(setting=bindparam('value'))

        engine.execute(stmt, [
            {
                'param': 'rdkit.ignore_reaction_agents',
                'value': False
            },
            {
                'param': 'rdkit.agent_FP_bit_ratio',
                'value': 0.2
            },
            {
                'param': 'rdkit.difference_FP_weight_agents',
                'value': 1
            },
            {
                'param': 'rdkit.difference_FP_weight_nonagents',
                'value': 10
            },
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': True
            },
            {
                'param': 'rdkit.threshold_unmapped_reactant_atoms',
                'value': 0.2
            },
            {
                'param': 'rdkit.init_reaction',
                'value': True
            },
        ])

        rs = engine.execute(
            select([func.reaction_from_smiles('c1ccccc1>>c1cccnc1')]))
        self.assertEqual(rs.fetchall()[0][0], 'c1ccccc1>>c1ccncc1')

        rs = engine.execute(
            select([func.reaction_from_smiles('c1ccccc1>CC(=O)O>c1cccnc1')]))
        self.assertEqual(rs.fetchall()[0][0], 'c1ccccc1>CC(=O)O>c1ccncc1')

        rs = engine.execute(
            select([
                func.reaction_from_smarts(
                    '[c1:1][c:2][c:3][c:4]c[c1:5]>CC(=O)O>[c1:1][c:2][c:3][c:4]n[c1:5]'
                )
            ]))
        self.assertEqual(
            rs.fetchall()[0][0],
            'c([cH:4][cH:3][cH:2][cH2:1])[cH2:5]>CC(=O)O>n([cH:4][cH:3][cH:2][cH2:1])[cH2:5]'
        )

        rs = engine.execute(
            select([
                func.reaction_from_smarts(
                    'C(F)(F)F.[c1:1][c:2][c:3][c:4]c[c1:5]>CC(=O)O>[c1:1][c:2][c:3][c:4]n[c1:5]'
                )
            ]))
        self.assertEqual(
            rs.fetchall()[0][0],
            'c([cH:4][cH:3][cH:2][cH2:1])[cH2:5]>CC(=O)O.FC(F)F>n([cH:4][cH:3][cH:2][cH2:1])[cH2:5]'
        )

        rs = engine.execute(
            select([func.reaction_from_smarts('c1ccc[n,c]c1>>c1nccnc1')]))
        self.assertEqual(rs.fetchall()[0][0], '*1ccccc1>>c1cnccn1')

        rs = engine.execute(
            select([
                func.reaction_to_smiles(
                    func.reaction_from_smiles('c1ccccc1>>c1cccnc1'))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 'c1ccccc1>>c1ccncc1')

        rs = engine.execute(
            select([
                func.reaction_from_ctab('''$RXN

      RDKit

  1  1
$MOL

     RDKit

  6  6  0  0  0  0  0  0  0  0999 V2000
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
  1  2  4  0
  2  3  4  0
  3  4  4  0
  4  5  4  0
  5  6  4  0
  6  1  4  0
M  END
$MOL

     RDKit

  6  6  0  0  0  0  0  0  0  0999 V2000
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 N   0  0  0  0  0  0  0  0  0  0  0  0
    0.0000    0.0000    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
  1  2  4  0
  2  3  4  0
  3  4  4  0
  4  5  4  0
  5  6  4  0
  6  1  4  0
M  END
''')
            ]))
        self.assertEqual(rs.fetchall()[0][0], 'c1ccccc1>>c1ccncc1')

        rs = engine.execute(
            select([
                func.reaction_numreactants(
                    func.reaction_from_smiles('[Cl].c1ccccc1>>c1cccnc1.[OH2]'))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 2)

        rs = engine.execute(
            select([
                func.reaction_numproducts(
                    func.reaction_from_smiles('[Cl].c1ccccc1>>c1cccnc1.[OH2]'))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 2)

        rs = engine.execute(
            select([
                func.reaction_numagents(
                    func.reaction_from_smiles(
                        '[Cl].c1ccccc1>CC(=O)O.[Na+]>c1cccnc1.[OH2]'))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 2)

        rs = engine.execute(
            select([
                func.reaction_numagents(
                    func.reaction_from_smarts(
                        'C(F)(F)F.[c1:1][c:2][c:3][c:4]c[c1:5]>CC(=O)O>[c1:1][c:2][c:3][c:4]n[c1:5]'
                    ))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 2)

        engine.execute(stmt, [
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': False
            },
        ])

        rs = engine.execute(
            select([
                func.reaction_numagents(
                    func.reaction_from_smarts(
                        'C(F)(F)F.[c1:1][c:2][c:3][c:4]c[c1:5]>CC(=O)O>[c1:1][c:2][c:3][c:4]n[c1:5]'
                    ))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1)

        engine.execute(stmt, [
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': True
            },
            {
                'param': 'rdkit.threshold_unmapped_reactant_atoms',
                'value': 0.9
            },
        ])

        rs = engine.execute(
            select([
                func.reaction_numagents(
                    func.reaction_from_smarts(
                        'C(F)(F)F.[c1:1][c:2][c:3][c:4]c[c1:5]>CC(=O)O>[c1:1][c:2][c:3][c:4]n[c1:5]'
                    ))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 3)

        engine.execute(stmt, [
            {
                'param': 'rdkit.threshold_unmapped_reactant_atoms',
                'value': 0.2
            },
        ])

        rs = engine.execute(
            select([
                func.reaction('c1ccccc1>>c1cccnc1') == func.reaction(
                    'c1ccccc1>>c1cccnc1')
            ]))
        self.assertEqual(rs.fetchall()[0][0], True)

        rs = engine.execute(
            select([
                func.reaction('c1ccccc1>>c1cccnc1') == func.reaction(
                    'c1ccccc1>>c1cncnc1')
            ]))
        self.assertEqual(rs.fetchall()[0][0], False)
예제 #51
0
    def test_reaction(self):

        self.assertTrue(os.path.exists(data_filepath))

        with open(data_filepath, 'rt') as f:
            sample_data = [line.split() for line in f]

        # dummy query to ensure that GUC params are initialized
        _ = engine.execute(select([func.rdkit_version()])).fetchall()

        stmt = pg_settings.update()
        stmt = stmt.where(pg_settings.c.name == bindparam('param'))
        stmt = stmt.values(setting=bindparam('value'))

        engine.execute(stmt, [
            {
                'param': 'rdkit.ignore_reaction_agents',
                'value': False
            },
            {
                'param': 'rdkit.agent_FP_bit_ratio',
                'value': 0.2
            },
            {
                'param': 'rdkit.difference_FP_weight_agents',
                'value': 1
            },
            {
                'param': 'rdkit.difference_FP_weight_nonagents',
                'value': 10
            },
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': True
            },
            {
                'param': 'rdkit.threshold_unmapped_reactant_atoms',
                'value': 0.2
            },
            {
                'param': 'rdkit.init_reaction',
                'value': True
            },
        ])

        engine.execute(
            reactions.insert(),
            [{
                'rxn': rsmiles,
            } for _, rsmiles in sample_data],
        )

        rs = engine.execute(select([func.count()]).select_from(reactions))
        sz = rs.fetchall()[0][0]
        self.assertEqual(sz, len(sample_data))

        engine.execute(stmt, [
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': False
            },
        ])

        engine.execute(
            reactions_unchanged.insert(),
            [{
                'rxn': rsmiles,
            } for _, rsmiles in sample_data],
        )

        engine.execute(stmt, [
            {
                'param': 'rdkit.move_unmmapped_reactants_to_agents',
                'value': True
            },
        ])

        rs = engine.execute(
            select([func.count()]).select_from(reactions_unchanged))
        sz = rs.fetchall()[0][0]
        self.assertEqual(sz, len(sample_data))

        rs = engine.execute(
            select([func.sum(func.reaction_numreactants(reactions.c.rxn))]))
        sum_reactants = rs.fetchall()[0][0]
        self.assertEqual(sum_reactants, 1898)

        rs = engine.execute(
            select([
                func.sum(func.reaction_numreactants(reactions_unchanged.c.rxn))
            ]))
        sum_reactants = rs.fetchall()[0][0]
        self.assertEqual(sum_reactants, 3517)

        rs = engine.execute(
            select([func.sum(func.reaction_numproducts(reactions.c.rxn))]))
        sum_products = rs.fetchall()[0][0]
        self.assertEqual(sum_products, 1157)

        rs = engine.execute(
            select([
                func.sum(func.reaction_numproducts(reactions_unchanged.c.rxn))
            ]))
        sum_products = rs.fetchall()[0][0]
        self.assertEqual(sum_products, 1157)

        rs = engine.execute(
            select([func.sum(func.reaction_numagents(reactions.c.rxn))]))
        sum_agents = rs.fetchall()[0][0]
        self.assertEqual(sum_agents, 2528)

        rs = engine.execute(
            select([
                func.sum(func.reaction_numagents(reactions_unchanged.c.rxn))
            ]))
        sum_agents = rs.fetchall()[0][0]
        self.assertEqual(sum_agents, 909)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('c1ccccc1>>c1ccncc1')))
        self.assertEqual(rs.fetchall()[0][0], 47)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('c1cnccc1>>c1ccccc1')))
        self.assertEqual(rs.fetchall()[0][0], 50)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('c1ccccc1>>')))
        self.assertEqual(rs.fetchall()[0][0], 667)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('c1cnccc1>>')))
        self.assertEqual(rs.fetchall()[0][0], 83)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('>>c1ccncc1')))
        self.assertEqual(rs.fetchall()[0][0], 79)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstruct('>>c1ccccc1')))
        self.assertEqual(rs.fetchall()[0][0], 650)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('c1ccccc1>>c1ccncc1')))
        self.assertEqual(rs.fetchall()[0][0], 47)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('c1cnccc1>>c1ccccc1')))
        self.assertEqual(rs.fetchall()[0][0], 50)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('c1ccccc1>>')))
        self.assertEqual(rs.fetchall()[0][0], 667)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('c1cnccc1>>')))
        self.assertEqual(rs.fetchall()[0][0], 83)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('>>c1ccncc1')))
        self.assertEqual(rs.fetchall()[0][0], 79)

        rs = engine.execute(
            select([func.count()]).select_from(reactions).where(
                reactions.c.rxn.hassubstructfp('>>c1ccccc1')))
        self.assertEqual(rs.fetchall()[0][0], 650)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 1),
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 1))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 1),
                    func.reaction_difference_fp('c1ncccc1>>c1ncncc1', 1))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.636363636363636)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1),
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1),
                    func.reaction_difference_fp('c1ncccc1>[Na+]>c1ncncc1', 1))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.603448275862069)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 2),
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 2))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 2),
                    func.reaction_difference_fp('c1ncccc1>>c1ncncc1', 2))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.2)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 2),
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 2))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 2),
                    func.reaction_difference_fp('c1ncccc1>[Na+]>c1ncncc1', 2))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.2)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 3),
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 3))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp('c1ccccc1>>c1ccncc1', 3),
                    func.reaction_difference_fp('c1ncccc1>>c1ncncc1', 3))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.454545454545455)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 3),
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 3))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_difference_fp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 3),
                    func.reaction_difference_fp('c1ncccc1>[Na+]>c1ncncc1', 3))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.444933920704846)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_structural_bfp('c1ccccc1>>c1ccncc1', 1),
                    func.reaction_structural_bfp('c1ccccc1>>c1ccncc1', 1))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_structural_bfp('c1ccccc1>>c1ccncc1', 1),
                    func.reaction_structural_bfp('c1ncccc1>>c1ncncc1', 1))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.620689655172414)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_structural_bfp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1),
                    func.reaction_structural_bfp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1))
            ]))
        self.assertEqual(rs.fetchall()[0][0], 1.0)

        rs = engine.execute(
            select([
                func.tanimoto_sml(
                    func.reaction_structural_bfp(
                        'c1ccccc1>CC(=O)O.[Na+]>c1ccncc1', 1),
                    func.reaction_structural_bfp('c1ncccc1>[Na+]>c1ncncc1', 1))
            ]))
        self.assertAlmostEqual(rs.fetchall()[0][0], 0.514285714285714)
예제 #52
0
def visit_unload_from_select(element, compiler, **kw):
    """Returns the actual sql query for the UnloadFromSelect class."""

    template = """
       UNLOAD (:select) TO :unload_location
       CREDENTIALS :credentials
       {manifest}
       {header}
       {delimiter}
       {encrypted}
       {fixed_width}
       {gzip}
       {add_quotes}
       {null}
       {escape}
       {allow_overwrite}
       {parallel}
    """
    el = element

    qs = template.format(
        manifest='MANIFEST' if el.manifest else '',
        header='HEADER' if el.header else '',
        delimiter=('DELIMITER AS :delimiter'
                   if el.delimiter is not None else ''),
        encrypted='ENCRYPTED' if el.encrypted else '',
        fixed_width='FIXEDWIDTH AS :fixed_width' if el.fixed_width else '',
        gzip='GZIP' if el.gzip else '',
        add_quotes='ADDQUOTES' if el.add_quotes else '',
        escape='ESCAPE' if el.escape else '',
        null='NULL AS :null_as' if el.null is not None else '',
        allow_overwrite='ALLOWOVERWRITE' if el.allow_overwrite else '',
        parallel='PARALLEL OFF' if not el.parallel else '',
    )

    query = sa.text(qs)

    if el.delimiter is not None:
        query = query.bindparams(
            sa.bindparam(
                'delimiter',
                value=element.delimiter,
                type_=sa.String,
            ))

    if el.fixed_width:
        query = query.bindparams(
            sa.bindparam(
                'fixed_width',
                value=_process_fixed_width(el.fixed_width),
                type_=sa.String,
            ))

    if el.null is not None:
        query = query.bindparams(
            sa.bindparam('null_as', value=el.null, type_=sa.String))

    return compiler.process(
        query.bindparams(
            sa.bindparam('credentials', value=el.credentials, type_=sa.String),
            sa.bindparam(
                'unload_location',
                value=el.unload_location,
                type_=sa.String,
            ),
            sa.bindparam(
                'select',
                value=compiler.process(
                    el.select,
                    literal_binds=True,
                ),
                type_=sa.String,
            ),
        ), **kw)
 def test_select_columns(self):
     stmt = select([bindparam('data'), bindparam('x')])
     self._assert_raises(
         stmt, {'data': 'data'}
     )
예제 #54
0
    def test_updatemany(self, connection):
        t = self.tables.default_test

        connection.execute(t.insert(), [{}, {}, {}])

        connection.execute(
            t.update().where(t.c.col1 == sa.bindparam("pkval")),
            {"pkval": 51, "col7": None, "col8": None, "boolcol1": False},
        )

        connection.execute(
            t.update().where(t.c.col1 == sa.bindparam("pkval")),
            [{"pkval": 51}, {"pkval": 52}, {"pkval": 53}],
        )

        ctexec = connection.scalar(self.currenttime)
        today = datetime.date.today()
        result = connection.execute(t.select().order_by(t.c.col1))
        eq_(
            list(result),
            [
                (
                    51,
                    "im the update",
                    self.f2,
                    self.ts,
                    self.ts,
                    ctexec,
                    False,
                    False,
                    13,
                    today,
                    "py",
                    "hi",
                    "BINDfoo",
                ),
                (
                    52,
                    "im the update",
                    self.f2,
                    self.ts,
                    self.ts,
                    ctexec,
                    True,
                    False,
                    13,
                    today,
                    "py",
                    "hi",
                    "BINDfoo",
                ),
                (
                    53,
                    "im the update",
                    self.f2,
                    self.ts,
                    self.ts,
                    ctexec,
                    True,
                    False,
                    13,
                    today,
                    "py",
                    "hi",
                    "BINDfoo",
                ),
            ],
        )
예제 #55
0
    def test_insert_w_newlines(self, connection):
        from psycopg2 import extras

        t = self.tables.data

        ins = (t.insert().inline().values(
            id=bindparam("id"),
            x=select(literal_column("5")).select_from(
                self.tables.data).scalar_subquery(),
            y=bindparam("y"),
            z=bindparam("z"),
        ))
        # compiled SQL has a newline in it
        eq_(
            str(ins.compile(testing.db)),
            "INSERT INTO data (id, x, y, z) VALUES (%(id)s, "
            "(SELECT 5 \nFROM data), %(y)s, %(z)s)",
        )
        meth = extras.execute_values
        with mock.patch.object(extras, "execute_values",
                               side_effect=meth) as mock_exec:

            connection.execute(
                ins,
                [
                    {
                        "id": 1,
                        "y": "y1",
                        "z": 1
                    },
                    {
                        "id": 2,
                        "y": "y2",
                        "z": 2
                    },
                    {
                        "id": 3,
                        "y": "y3",
                        "z": 3
                    },
                ],
            )

        eq_(
            mock_exec.mock_calls,
            [
                mock.call(
                    mock.ANY,
                    "INSERT INTO data (id, x, y, z) VALUES %s",
                    (
                        {
                            "id": 1,
                            "y": "y1",
                            "z": 1
                        },
                        {
                            "id": 2,
                            "y": "y2",
                            "z": 2
                        },
                        {
                            "id": 3,
                            "y": "y3",
                            "z": 3
                        },
                    ),
                    template="(%(id)s, (SELECT 5 \nFROM data), %(y)s, %(z)s)",
                    fetch=False,
                    page_size=connection.dialect.executemany_values_page_size,
                )
            ],
        )
 def test_select_where(self):
     stmt = select([self.tables.foo]). \
         where(self.tables.foo.c.data == bindparam('data')). \
         where(self.tables.foo.c.x == bindparam('x'))
     self._assert_raises(stmt, {'data': 'data'})
예제 #57
0
    def test_insert_modified_by_event(self, connection):
        from psycopg2 import extras

        t = self.tables.data

        ins = (t.insert().inline().values(
            id=bindparam("id"),
            x=select(literal_column("5")).select_from(
                self.tables.data).scalar_subquery(),
            y=bindparam("y"),
            z=bindparam("z"),
        ))
        # compiled SQL has a newline in it
        eq_(
            str(ins.compile(testing.db)),
            "INSERT INTO data (id, x, y, z) VALUES (%(id)s, "
            "(SELECT 5 \nFROM data), %(y)s, %(z)s)",
        )
        meth = extras.execute_batch
        with mock.patch.object(
                extras, "execute_values") as mock_values, mock.patch.object(
                    extras, "execute_batch", side_effect=meth) as mock_batch:

            # create an event hook that will change the statement to
            # something else, meaning the dialect has to detect that
            # insert_single_values_expr is no longer useful
            @event.listens_for(connection,
                               "before_cursor_execute",
                               retval=True)
            def before_cursor_execute(conn, cursor, statement, parameters,
                                      context, executemany):
                statement = ("INSERT INTO data (id, y, z) VALUES "
                             "(%(id)s, %(y)s, %(z)s)")
                return statement, parameters

            connection.execute(
                ins,
                [
                    {
                        "id": 1,
                        "y": "y1",
                        "z": 1
                    },
                    {
                        "id": 2,
                        "y": "y2",
                        "z": 2
                    },
                    {
                        "id": 3,
                        "y": "y3",
                        "z": 3
                    },
                ],
            )

        eq_(mock_values.mock_calls, [])

        if connection.dialect.executemany_mode & EXECUTEMANY_BATCH:
            eq_(
                mock_batch.mock_calls,
                [
                    mock.call(
                        mock.ANY,
                        "INSERT INTO data (id, y, z) VALUES "
                        "(%(id)s, %(y)s, %(z)s)",
                        (
                            {
                                "id": 1,
                                "y": "y1",
                                "z": 1
                            },
                            {
                                "id": 2,
                                "y": "y2",
                                "z": 2
                            },
                            {
                                "id": 3,
                                "y": "y3",
                                "z": 3
                            },
                        ),
                    )
                ],
            )
        else:
            eq_(mock_batch.mock_calls, [])
예제 #58
0
def test_core_just_statement_construct_plus_cache_key(n):
    for i in range(n):
        stmt = future_select(
            Customer.__table__).where(Customer.id == bindparam("id"))

        stmt._generate_cache_key()
예제 #59
0
def visit_copy_command(element, compiler, **kw):
    """
    Returns the actual sql query for the CopyCommand class.
    """
    qs = """COPY {table}{columns} FROM :data_location
        WITH CREDENTIALS AS :credentials
        {format}
        {parameters}"""
    parameters = []
    bindparams = [
        sa.bindparam(
            'data_location',
            value=element.data_location,
            type_=sa.String,
        ),
        sa.bindparam(
            'credentials',
            value=element.credentials,
            type_=sa.String,
        ),
    ]

    if element.format == Format.csv:
        format_ = 'FORMAT AS CSV'
        if element.quote is not None:
            format_ += ' QUOTE AS :quote_character'
            bindparams.append(
                sa.bindparam(
                    'quote_character',
                    value=element.quote,
                    type_=sa.String,
                ))
    elif element.format == Format.json:
        format_ = 'FORMAT AS JSON AS :json_option'
        bindparams.append(
            sa.bindparam(
                'json_option',
                value=element.path_file,
                type_=sa.String,
            ))
    elif element.format == Format.avro:
        format_ = 'FORMAT AS AVRO AS :avro_option'
        bindparams.append(
            sa.bindparam(
                'avro_option',
                value=element.path_file,
                type_=sa.String,
            ))
    elif element.format == Format.orc:
        format_ = 'FORMAT AS ORC'
    elif element.format == Format.parquet:
        format_ = 'FORMAT AS PARQUET'
    elif element.format == Format.fixed_width and element.fixed_width is None:
        raise sa_exc.CompileError(
            "'fixed_width' argument required for format 'FIXEDWIDTH'.")
    else:
        format_ = ''

    if element.delimiter is not None:
        parameters.append('DELIMITER AS :delimiter_char')
        bindparams.append(
            sa.bindparam(
                'delimiter_char',
                value=element.delimiter,
                type_=sa.String,
            ))

    if element.fixed_width is not None:
        parameters.append('FIXEDWIDTH AS :fixedwidth_spec')
        bindparams.append(
            sa.bindparam(
                'fixedwidth_spec',
                value=_process_fixed_width(element.fixed_width),
                type_=sa.String,
            ))

    if element.compression is not None:
        parameters.append(Compression(element.compression).value)

    if element.manifest:
        parameters.append('MANIFEST')

    if element.accept_any_date:
        parameters.append('ACCEPTANYDATE')

    if element.accept_inv_chars is not None:
        parameters.append('ACCEPTINVCHARS AS :replacement_char')
        bindparams.append(
            sa.bindparam('replacement_char',
                         value=element.accept_inv_chars,
                         type_=sa.String))

    if element.blanks_as_null:
        parameters.append('BLANKSASNULL')

    if element.date_format is not None:
        parameters.append('DATEFORMAT AS :dateformat_string')
        bindparams.append(
            sa.bindparam(
                'dateformat_string',
                value=element.date_format,
                type_=sa.String,
            ))

    if element.empty_as_null:
        parameters.append('EMPTYASNULL')

    if element.encoding is not None:
        parameters.append('ENCODING AS ' + Encoding(element.encoding).value)

    if element.escape:
        parameters.append('ESCAPE')

    if element.explicit_ids:
        parameters.append('EXPLICIT_IDS')

    if element.fill_record:
        parameters.append('FILLRECORD')

    if element.ignore_blank_lines:
        parameters.append('IGNOREBLANKLINES')

    if element.ignore_header is not None:
        parameters.append('IGNOREHEADER AS :number_rows')
        bindparams.append(
            sa.bindparam(
                'number_rows',
                value=element.ignore_header,
                type_=sa.Integer,
            ))

    if element.dangerous_null_delimiter is not None:
        parameters.append("NULL AS '%s'" % element.dangerous_null_delimiter)

    if element.remove_quotes:
        parameters.append('REMOVEQUOTES')

    if element.roundec:
        parameters.append('ROUNDEC')

    if element.time_format is not None:
        parameters.append('TIMEFORMAT AS :timeformat_string')
        bindparams.append(
            sa.bindparam(
                'timeformat_string',
                value=element.time_format,
                type_=sa.String,
            ))

    if element.trim_blanks:
        parameters.append('TRIMBLANKS')

    if element.truncate_columns:
        parameters.append('TRUNCATECOLUMNS')

    if element.comp_rows:
        parameters.append('COMPROWS :numrows')
        bindparams.append(
            sa.bindparam(
                'numrows',
                value=element.comp_rows,
                type_=sa.Integer,
            ))

    if element.comp_update:
        parameters.append('COMPUPDATE ON')
    elif element.comp_update is not None:
        parameters.append('COMPUPDATE OFF')

    if element.max_error is not None:
        parameters.append('MAXERROR AS :error_count')
        bindparams.append(
            sa.bindparam(
                'error_count',
                value=element.max_error,
                type_=sa.Integer,
            ))

    if element.no_load:
        parameters.append('NOLOAD')

    if element.stat_update:
        parameters.append('STATUPDATE ON')
    elif element.stat_update is not None:
        parameters.append('STATUPDATE OFF')

    columns = ' (%s)' % ', '.join(
        compiler.preparer.format_column(column)
        for column in element.columns) if element.columns else ''

    qs = qs.format(table=compiler.preparer.format_table(element.table),
                   columns=columns,
                   format=format_,
                   parameters='\n'.join(parameters))

    return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)
 def test_compiled_insert_execute(self):
     users.insert().compile().execute(user_id=7, user_name='jack')
     s = select([users], users.c.user_id == bindparam('id')).compile()
     c = testing.db.connect()
     assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7