예제 #1
0
 def testQuestionBranchInsert(self):
     survey_id = connection.execute(survey_table.select().where(
         survey_table.c.survey_title == 'test_title')).first().survey_id
     to_question = get_questions_no_credentials(
         connection, survey_id).fetchall()[-1]
     q_where = question_table.select().where(
         cast(cast(question_table.c.logic['allow_other'], Text),
              Boolean))
     from_question = connection.execute(q_where).fetchall()[0]
     choice = get_choices(
         connection, from_question.question_id).fetchall()[0]
     from_tcn = from_question.type_constraint_name
     branch_dict = {'question_choice_id': choice.question_choice_id,
                    'from_question_id': from_question.question_id,
                    'from_type_constraint': from_tcn,
                    'from_sequence_number':
                        from_question.sequence_number,
                    'from_allow_multiple':
                        from_question.allow_multiple,
                    'from_survey_id': survey_id,
                    'to_question_id': to_question.question_id,
                    'to_type_constraint':
                        to_question.type_constraint_name,
                    'to_sequence_number':
                        to_question.sequence_number,
                    'to_allow_multiple': to_question.allow_multiple,
                    'to_survey_id': survey_id}
     branch_exec = connection.execute(question_branch_insert(**branch_dict))
     inserted_id = branch_exec.inserted_primary_key[0]
     the_branch = connection.execute(question_branch_table.select().where(
         question_branch_table.c.question_branch_id ==
         inserted_id)).first()
     self.assertEqual(the_branch.to_question_id,
                      to_question.question_id)
예제 #2
0
    def incr(self, category, ids, time=0, delta=1):
        self.delete_if_expired(category, ids)

        expiration = expiration_from_time(time)

        prof = self.profile_start('incr', category)

        engine = self.engine_by_category(category, "master")

        rp = engine.update(sa.and_(engine.c.category==category,
                                   engine.c.ids==ids,
                                   engine.c.kind=='num'),
                           values = {
                                   engine.c.value:
                                           sa.cast(
                                           sa.cast(engine.c.value, sa.Integer)
                                           + delta, sa.String),
                                   engine.c.expiration: expiration
                                   }
                           ).execute()

        self.profile_stop(prof)

        if rp.rowcount == 1:
            return self.get(category, ids, force_write_table=True)
        elif rp.rowcount == 0:
            existing_value = self.get(category, ids, force_write_table=True)
            if existing_value is None:
                raise ValueError("[%s][%s] can't be incr()ed -- it's not set" %
                                 (category, ids))
            else:
                raise ValueError("[%s][%s] has non-integer value %r" %
                                 (category, ids, existing_value))
        else:
            raise ValueError("Somehow %d rows got updated" % rp.rowcount)
예제 #3
0
def user_vote_change_links(period = '1 day'):
    rel = Vote.rel(Account, Link)
    type = tdb.rel_types_id[rel._type_id]
    # rt = rel table
    # dt = data table
    rt, account_tt, link_tt, dt = type.rel_table

    aliases = tdb.alias_generator()
    author_dt = dt.alias(aliases.next())

    link_dt = tdb.types_id[Link._type_id].data_table[0].alias(aliases.next())

    # Create an SQL CASE statement for the subreddit vote multiplier
    cases = []
    for subreddit in subreddits_with_custom_karma_multiplier():
        cases.append( (sa.cast(link_dt.c.value,sa.Integer) == subreddit._id,
                      subreddit.post_karma_multiplier) )
    cases.append( (True, g.post_karma_multiplier) )       # The default article multiplier


    date = utils.timeago(period)
    
    s = sa.select([author_dt.c.value, sa.func.sum(sa.cast(rt.c.name, sa.Integer) * sa.case(cases))],
                  sa.and_(rt.c.date >= date,
                          author_dt.c.thing_id == rt.c.rel_id,
                          author_dt.c.key == 'author_id',
                          link_tt.c.thing_id == rt.c.thing2_id,
                          link_tt.c.date >= date,
                          link_dt.c.key == 'sr_id',
                          link_dt.c.thing_id == rt.c.thing2_id),
                  group_by = author_dt.c.value)

    rows = s.execute().fetchall()
    return [(int(r.value), r.sum) for r in rows]
예제 #4
0
파일: rank.py 프로젝트: xxguo/leopard
def weekList():
    if not redis.llen('rank:week'):
        rows = db_session.query(
            User.id,
            User.username,
            label('number', func.count(Investment.amount)),
            label('total_amount', func.sum(Investment.amount))
        ).filter(
            Investment.user_id == User.id,
            cast(Investment.added_at, Date) <= datetime.datetime.today(),
            cast(Investment.added_at, Date) >= datetime.datetime.today() -
            datetime.timedelta(weeks=1)
        ).group_by(User.id).order_by(
            func.sum(Investment.amount).desc()
        ).limit(15).all()

        rank_list = []

        for i in rows:
            i = dict(zip(i.keys(), i))
            data = {
                'id': i['id'],
                'username': i['username'],
                'total_amount': float(i['total_amount']),
                'number': i['number']
            }
            rank_list.append(data)
            redis.rpush('rank:week', json.dumps(data))
        redis.expire('rank:week', 3600)
    else:
        rank_list = [json.loads(i.decode()) for i in redis.lrange('rank:week', 0, redis.llen('rank:week'))]

    return rank_list
예제 #5
0
    def add_bucket_field_to_query(self, q, q_entities, field, field_entity):
        # Get min, max if not provided.
        field_min = 0
        field_max = 0

        if not field.has_key("min") or not field.has_key("max"):
            field_min, field_max = self.get_field_min_max(field)

            # Override calculated min/max if values were provided.
        if field.has_key("min"):
            field_min = field.get("min")
        if field.has_key("max"):
            field_max = field.get("max")

        num_buckets = field.get("num_buckets", 10)

        # Get bucket width.
        bucket_width = (field_max - field_min) / num_buckets

        # Get bucket field entities.
        # Bit of a trick here: we use field_max - bucket_width because normally last bucket gets all values >= field_max.
        # Here we use one less bucket, and then filter.  This essentially makes the last bucket include values <= field_max.
        bucket_entity = func.width_bucket(field_entity, field_min, field_max - bucket_width, num_buckets - 1)
        q = q.filter(field_entity <= field_max)
        bucket_label_entity = (
            cast(field_min + (bucket_entity - 1) * bucket_width, String)
            + " to "
            + cast(field_min + bucket_entity * bucket_width, String)
        )
        bucket_label_entity = bucket_label_entity.label(field["label"])

        q_entities.add(bucket_label_entity)
        return q, bucket_label_entity
예제 #6
0
    def collect(self):
        # Strictly, we should include all possible combinations, with 0

        emf_purchases = GaugeMetricFamily('emf_purchases', "Tickets purchased",
                                          labels=['product', 'state'])
        emf_payments = GaugeMetricFamily('emf_payments', "Payments received",
                                         labels=['provider', 'state'])
        emf_attendees = GaugeMetricFamily('emf_attendees', "Attendees",
                                          labels=['checked_in', 'badged_up'])
        emf_proposals = GaugeMetricFamily('emf_proposals', "CfP Submissions",
                                          labels=['type', 'state'])

        gauge_groups(emf_purchases, Purchase.query.join(Product),
                     Product.name, Purchase.state)
        gauge_groups(emf_payments, Payment.query,
                     Payment.provider, Payment.state)
        gauge_groups(emf_attendees, AdmissionTicket.query,
                     cast(AdmissionTicket.checked_in, String), cast(AdmissionTicket.badge_issued, String))
        gauge_groups(emf_proposals, Proposal.query,
                     Proposal.type, Proposal.state)

        return [
            emf_purchases,
            emf_payments,
            emf_attendees,
            emf_proposals,
        ]
예제 #7
0
파일: filters.py 프로젝트: level12/webgrid
    def apply(self, query):
        if self.op == self.default_op and self.value1 is None:
            return query

        if self.op in (ops.between, ops.not_between):
            left = min(self.value1, self.value2)
            right = max(self.value1, self.value2)
            cond = self.sa_col.between(sa.cast(left, sa.Time), sa.cast(right, sa.Time))
            if self.op == ops.not_between:
                cond = ~cond
            return query.filter(cond)

        # Casting this because some SQLAlchemy dialects (MSSQL) convert the value to datetime
        # before binding.
        val = sa.cast(self.value1, sa.Time)

        if self.op == ops.eq:
            query = query.filter(self.sa_col == val)
        elif self.op == ops.not_eq:
            query = query.filter(self.sa_col != val)
        elif self.op == ops.less_than_equal:
            query = query.filter(self.sa_col <= val)
        elif self.op == ops.greater_than_equal:
            query = query.filter(self.sa_col >= val)
        else:
            query = super(TimeFilter, self).apply(query)
        return query
예제 #8
0
파일: __init__.py 프로젝트: xxguo/leopard
    def _investments(self):
        today_investments = Investment.query.filter(
            cast(Investment.added_at, Date) == date.today(),
            Investment.status.in_(
                (get_enum('INVESTMENT_PENDING'),
                 get_enum('INVESTMENT_SUCCESSED'))
            )
        ).order_by("added_at desc").limit(10)

        history_investments = db_session.query(
            func.date_trunc('day', Investment.added_at),
            func.sum(Investment.amount)).group_by(
            func.date_trunc('day', Investment.added_at)
        ).order_by(func.date_trunc('day', Investment.added_at)).all()

        total_investments = db_session.query(
            func.sum(Investment.amount)).scalar()

        today_invest_amount = db_session.query(
            func.sum(Investment.amount)).filter(
            cast(Investment.added_at, Date) == date.today(),
            Investment.status.in_(
                (get_enum('INVESTMENT_PENDING'),
                 get_enum('INVESTMENT_SUCCESSED'))
            )
        ).scalar()
        if not today_invest_amount:
            today_invest_amount = 0

        app.jinja_env.globals['today_invest_amount'] = today_invest_amount
        app.jinja_env.globals['today_investments'] = today_investments
        app.jinja_env.globals['total_investments'] = total_investments
        app.jinja_env.globals['history_investments'] = history_investments
예제 #9
0
파일: __init__.py 프로젝트: xxguo/leopard
    def _repayments(self):
        today_repayments = Plan.query.filter(
            cast(Plan.plan_time, Date) == date.today(),
            Plan.status == get_enum('PLAN_PENDING')
        ).order_by('plan_time desc').limit(10)

        today_repay_amount = db_session.query(
            func.sum(Plan.amount)
        ).filter(
            cast(Plan.plan_time, Date) == date.today(),
            Plan.status == get_enum('PLAN_PENDING')
        ).scalar()

        if not today_repay_amount:
            today_repay_amount = 0

        total_repay_amount = db_session.query(
            func.sum(Plan.amount)
        ).filter(Plan.status == get_enum('PLAN_PENDING')).scalar()

        if not total_repay_amount:
            total_repay_amount = 0

        app.jinja_env.globals['today_repay_amount'] = today_repay_amount
        app.jinja_env.globals['total_repay_amount'] = total_repay_amount
        app.jinja_env.globals['today_repayments'] = today_repayments
    def filter_single_by_time(cls, type, objects, year=None, week_number=None, day=None):

        assert (week_number and year) or day
        start_date, end_date = None, None

        if year and week_number:
            start_date, end_date = get_start_and_end_date_from_week_and_year(
                year,
                week_number
            )
        if day:
            start_date = day
            end_date = day

        objects = objects.filter(
            or_(
                tuple_(
                    cast(type.start_time, Date), cast(type.end_time, Date)
                ).op('overlaps')(
                    tuple_(
                        start_date, end_date
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    cast(type.end_time, Date) == start_date,
                    # Second range ends on the start date of the first
                    end_date == cast(type.start_time, Date)
                )
            )
        )

        return objects
def retrieve_simulation_latest_job(uid, job_type=JOB_TYPE_COMPUTING):
    """Returns set of latest jobs for active simulations.

    :param str uid: Simulation UID.
    :param str job_type: Type of job.

    :returns: Job details.
    :rtype: list

    """
    j = types.Job
    s = types.Simulation
    qry = session.raw_query(
        s.id,                                           #0
        j.typeof,                                       #1
        j.execution_state,                              #2
        cast(j.is_compute_end, Integer),                #3
        cast(j.is_error, Integer),                      #4
        as_datetime_string(j.execution_start_date),     #5
        as_datetime_string(j.execution_end_date)        #6
        )

    qry = qry.join(j, s.uid == j.simulation_uid)
    qry = qry.order_by(j.execution_start_date.desc())

    qry = qry.filter(j.execution_start_date != None)
    qry = qry.filter(j.execution_state != None)
    qry = qry.filter(j.typeof == job_type)
    qry = qry.filter(s.uid == uid)

    return qry.first()
예제 #12
0
 def _log_deposit_action(deposit, user, cashbox: str, final_amount: float, action: str):
     # Without these casts, the strings will end up as type 'unknown' in postgres, where the function lookup will fail due to incorrect type signature
     username = cast(user.username, String)
     action_string = cast(action, String)
     deposit_name = cast(deposit.name, String)
     proc = procs.exam_deposit_action(cash_box_ids[cashbox], action_string, final_amount, username, deposit_name)
     sqla.session.execute(proc)
예제 #13
0
def _get_event_with_enterqueue(session, start, end, match, event):
    start = start.strftime(_STR_TIME_FMT)
    end = end.strftime(_STR_TIME_FMT)

    enter_queues = (session
                    .query(QueueLog.callid,
                           cast(QueueLog.time, TIMESTAMP).label('time'))
                    .filter(and_(QueueLog.event == 'ENTERQUEUE',
                                 between(QueueLog.time, start, end))))

    enter_map = {}
    for enter_queue in enter_queues.all():
        enter_map[enter_queue.callid] = enter_queue.time

    if enter_map:
        res = (session
               .query(QueueLog.event,
                      QueueLog.queuename,
                      cast(QueueLog.time, TIMESTAMP).label('time'),
                      QueueLog.callid,
                      QueueLog.data3)
               .filter(and_(QueueLog.event == match,
                            QueueLog.callid.in_(enter_map))))

        for r in res.all():
            yield {
                'callid': r.callid,
                'queue_name': r.queuename,
                'time': enter_map[r.callid],
                'event': event,
                'talktime': 0,
                'waittime': int(r.data3) if r.data3 else 0
            }
    def get_requested_slots(self, resource, except_applications, start_date, end_date):
        query = current_app.db_session.query(RepeatingSlotRequest)

        objects = query.filter(
            RepeatingSlotRequest.application.has(resource_id=resource.id)
        )

        objects = objects.filter(
            RepeatingSlotRequest.application.has(status="Pending")
        )

        if except_applications:
            objects = objects.filter(
                ~RepeatingSlotRequest.application_id.in_(except_applications)
            )

        objects = objects.filter(
            or_(
                tuple_(
                    RepeatingSlotRequest.start_date, RepeatingSlotRequest.end_date
                ).op('overlaps')(
                    tuple_(
                        cast(start_date, Date), cast(end_date, Date)
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    RepeatingSlotRequest.end_date == cast(start_date, Date),
                    # Second range ends on the start date of the first
                    cast(end_date, Date) == RepeatingSlotRequest.start_date
                )
            )
        )

        return objects.all()
예제 #15
0
    def get_period_schedule(meetings, period, limit_min=None, limit_max=None):
        if period:
            if limit_max:
                meetings = meetings.filter(
                        cast(schedule.Agenda.endtime, Time) < limit_max)
            else:
                meetings = meetings.filter(
                        cast(schedule.Agenda.endtime, Time) < period.end)
            if limit_min:
                meetings = meetings.filter(
                        cast(schedule.Agenda.starttime, Time) > limit_min)
            else:
                meetings = meetings.filter(
                        cast(schedule.Agenda.starttime, Time) > period.begin)
        meetings = meetings.order_by(schedule.Agenda.starttime).all()

        try:
            first_patient_schedule_time = _get_first_meeting_time(meetings[0])
            last_patient_schedule_time = _get_last_meeting_time(meetings[-1])
        except IndexError:
            if not period:
                return ( (None, None), None, (None, None) )
            else:
                return ( (None, period.begin), None, (None, period.end) )
        if not period:  
            return ( (first_patient_schedule_time, None), meetings, 
                        (last_patient_schedule_time, None) 
                    )
        return ( _get_first_scheduled_time( period,
                                            first_patient_schedule_time),
                    meetings, 
                    _get_last_scheduled_time(period, 
                                            last_patient_schedule_time)
                )
예제 #16
0
 def _set_orderby_desc(self, query, model, limit,
                       last_id, offset, descending, orderby):
     """Return an updated query with the proper orderby and desc."""
     if orderby == 'fav_user_ids':
         n_favs = func.coalesce(func.array_length(model.fav_user_ids, 1), 0).label('n_favs')
         query = query.add_column(n_favs)
     if orderby in ['created', 'updated', 'finish_time']:
         if descending:
             query = query.order_by(desc(
                                         cast(getattr(model,
                                                      orderby),
                                              TIMESTAMP)))
         else:
             query = query.order_by(cast(getattr(model, orderby), TIMESTAMP))
     else:
         if orderby != 'fav_user_ids':
             if descending:
                 query = query.order_by(desc(getattr(model, orderby)))
             else:
                 query = query.order_by(getattr(model, orderby))
         else:
             if descending:
                 query = query.order_by(desc("n_favs"))
             else:
                 query = query.order_by("n_favs")
     if last_id:
         query = query.limit(limit)
     else:
         query = query.limit(limit).offset(offset)
     return query
    def get_repeating(self, resource, except_applications, start_date, end_date, statuses):
        query = current_app.db_session.query(RepeatingSlot)
        objects = query.filter(Application.resource == resource,
                               RepeatingSlot.application_id == Application.id,
                               Application.status.in_(statuses))

        if except_applications:
            objects = objects.filter(
                ~RepeatingSlot.application_id.in_(except_applications)
            )

        objects = objects.filter(
            or_(
                tuple_(
                    RepeatingSlot.start_date, RepeatingSlot.end_date
                ).op('overlaps')(
                    tuple_(
                        cast(start_date, Date), cast(end_date, Date)
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    RepeatingSlot.end_date == cast(start_date, Date),
                    # Second range ends on the start date of the first
                    cast(end_date, Date) == RepeatingSlot.start_date
                )
            )
        )

        return objects.all()
예제 #18
0
def weekData(request):
		"""Return an array of location number per day within the last seven days."""
		today = datetime.date.today()
		# Initialize Json object
		data = {
				'label':[str(today - datetime.timedelta(days = i)) for i in range(1,8)],
				'nbArgos': [0] * 7,
				'nbGPS': [0] * 7
		}

		# Argos data
		argos_query = select(
				[cast(Argos.date, Date).label('date'), func.count(Argos.id).label('nb')]
				).where(Argos.date >= today - datetime.timedelta(days = 7)
				).group_by(cast(Argos.date, Date)
		)
		for date, nb in DBSession.execute(argos_query).fetchall():
				try:
						i = data['label'].index(str(date))
						data['nbArgos'][i] = nb
				except: pass

		# GPS data
		gps_query = select(
				[cast(Gps.date, Date).label('date'), func.count(Gps.id).label('nb')]
				).where(Gps.date >= today - datetime.timedelta(days = 7)
				).group_by(cast(Gps.date, Date))
		for date, nb in DBSession.execute(gps_query).fetchall():
				try:
						i = data['label'].index(str(date))
						data['nbGPS'][i] = nb
				except: pass

		return data
예제 #19
0
    def test_eval_none_flag_orm(self):
        from sqlalchemy.ext.declarative import declarative_base
        from sqlalchemy.orm import Session

        Base = declarative_base()

        class Data(Base):
            __table__ = self.tables.data_table

        s = Session(testing.db)

        d1 = Data(name='d1', data=None, nulldata=None)
        s.add(d1)
        s.commit()

        s.bulk_insert_mappings(
            Data, [{"name": "d2", "data": None, "nulldata": None}]
        )
        eq_(
            s.query(
                cast(self.tables.data_table.c.data, String(convert_unicode="force")),
                cast(self.tables.data_table.c.nulldata, String)
            ).filter(self.tables.data_table.c.name == 'd1').first(),
            ("null", None)
        )
        eq_(
            s.query(
                cast(self.tables.data_table.c.data, String(convert_unicode="force")),
                cast(self.tables.data_table.c.nulldata, String)
            ).filter(self.tables.data_table.c.name == 'd2').first(),
            ("null", None)
        )
예제 #20
0
파일: site.py 프로젝트: simlmx/meteomap
def search_route():
    query = request.args.get('q')
    per_page = request.args.get('per_page', 10, type=int)
    page = request.args.get('page', 0, type=int)
    if query is None:
        return json.dumps({'results':[]})
    result = {}
    with session_scope() as session:
        results = session.query(
            City.name,
            City.country,
            func.ST_Y(cast(City.location, Geometry())),
            func.ST_X(cast(City.location, Geometry())),
            City.id
            ) \
            .filter(unaccent(City.name).ilike(unaccent('%' + query + '%'))) \
            .limit(per_page + 1) \
            .offset(page * per_page) \
            .all()

        more = len(results) == per_page + 1
        results = results[:per_page]
        result = json.dumps({
            'results':
            [{'id': c[4], 'text': '{}, {}'.format(c[0], c[1]), 'coords': (c[2], c[3])} for i,c in enumerate(results)],
            'more': more})
    return result
예제 #21
0
def views_filter(request):
	 try:
			#name_vue = request.matchdict['name']
			#table = Base.metadata.tables[name_vue]
			criteria = request.json_body.get('criteria', {})

			viewName = criteria['viewName']

			table = Base.metadata.tables[viewName]

	

			result = {'type':'FeatureCollection', 'features':[]}

			query = select([cast(table.c['LAT'].label('lat'), Float), cast(table.c['LON'].label('lon'), Float), func.count(table.c.values()[0])]).group_by(table.c['LAT'].label('lat'), table.c['LON'])
			

			filterList=criteria['filters']
			for fltr in filterList:
				column=fltr['Column']
				query = query.where(eval_binary_expr(table.c[column], fltr['Operator'], fltr['Value']))


			for lat, lon, nb in  DBSession.execute(query).fetchall():
				 result['features'].append({'type':'Feature', 'properties':{'count': nb}, 'geometry':{'type':'Point', 'coordinates':[lon,lat]}})
			return result
	 except Exception as e:
			print(e)
예제 #22
0
def _add_ordering(sql_query, table, column_type, column_name, order):
    # Special case for this column, which sorts contigs correctly:
    if column_name == 'contig':
        get_contig_num = cast(
            text("SUBSTRING({} FROM '\d+')".format(table.c.contig)),
            type_=Integer)
        starts_with_chr = (text("SUBSTRING({} FROM '^chr(\d+)')"
                                .format(table.c.contig)) != literal(''))
        starts_with_number = (text("SUBSTRING({} FROM '^\d+')"
                                   .format(table.c.contig)) != literal(''))
        # 10000 used here to mean "should be at the end of all the numbers",
        # assuming we never hit a chromosome number >= 10000.
        contig_num_col = case(
            [(starts_with_chr, get_contig_num),
             (starts_with_number, get_contig_num)],
            else_=literal(10000)
        )
        contig_len_col = func.length(table.c.contig)
        contig_col = table.c.contig
        if order == 'desc':
            contig_len_col = desc(contig_len_col)
            contig_col = desc(contig_col)
        return sql_query.order_by(contig_num_col, contig_len_col, contig_col)
    sqla_type = vcf_type_to_sqla_type(column_type)
    column = cast(table.c[column_name], type_=sqla_type)
    column = {'asc': asc(column), 'desc': desc(column)}.get(order)
    return sql_query.order_by(column)
예제 #23
0
    def update_scores(self, model, chunksize=10000):

        # update node scores
        waypoint_nodes = (
            self.session.query(
                Node,
                ST_X(cast(Node.loc, Geometry)),
                ST_Y(cast(Node.loc, Geometry)))
                .filter(Node.num_ways != 0)
                .order_by(func.random()))   # random order

        # process nodes in chunks for memory efficiency.
        # note: normalization of scores is done per chunk, which should be a
        # reasonable approximation to global normalization when the chunks are
        # large since the query specifies random ordering
        for chunk in _grouper(chunksize, waypoint_nodes):
            nodes, x, y = zip(*chunk)
            X = np.vstack((x, y)).T
            scores = model.score_samples(X)
            for node, score in zip(nodes, scores):
                node.score = score

        # update cumulative scores
        sq = (
            self.session.query(
                Waypoint.id.label('id'),
                func.sum(Node.score).over(
                    partition_by=Waypoint.way_id,
                    order_by=Waypoint.idx).label('cscore'))
                .join(Node)
                .subquery())

        (self.session.query(Waypoint)
             .filter(Waypoint.id == sq.c.id)
             .update({Waypoint.cscore: sq.c.cscore}))
예제 #24
0
파일: structures.py 프로젝트: alishir/tcr
def query_recursive_tree():
    structure_tree = (
        DBSession.query(
            Structure.id,
            Structure.name,
            Structure.parent_id,
            cast(1, Integer()).label('depth'),
            array([cast(Structure.name, Text)]).label('name_path'),
            array([Structure.id]).label('path'),
        )
        .filter(Structure.condition_root_level())
        .cte(name='structure_tree', recursive=True)
    )
    st = aliased(structure_tree, name='st')
    s = aliased(Structure, name='s')
    structure_tree = structure_tree.union_all(
        DBSession.query(
            s.id, s.name, s.parent_id,
            (st.c.depth + 1).label('depth'),
            func.array_append(
                st.c.name_path, cast(s.name, Text)
            ).label('name_path'),
            func.array_append(st.c.path, s.id).label('path'),
        )
        .filter(s.parent_id == st.c.id)
    )
    return DBSession.query(structure_tree)
예제 #25
0
def getactivitystatistic():
	"""get activity statistic information"""
	try:
		token = request.json['token']
		activityid = request.json['activityid']
		activity = getactivitybyid(activityid)
		u = getuserinformation(token)
		if u != None and activity != None:
			state = 'successful'
			reason = ''

			registeredTotal = activity.users.count()
			registeredToday = activity.users.filter(cast(models.attentactivity.timestamp, Date) == date.today()).count()
			likedTotal = activity.likeusers.count()
			likedToday = activity.likeusers.filter(cast(models.likeactivity.timestamp, Date) == date.today()).count()
			
			result = {
					  'activity':activity.title,
					  'registeredTotal':registeredTotal,
					  'registeredToday':registeredToday,
					  'likedTotal':likedTotal, 
					  'likedToday':likedToday, 
					 }

		else:
			state = 'fail'
			reason = 'invalid access'
			result = ''

	except Exception,e:
		print e
		state = 'fail'
		reason = 'exception'
		result = ''
예제 #26
0
    def get_stats(cls, date_from=None, date_to=None, limit=100):
        """
        Return overall stats/summary or stats for a given date range if given

        - date_to is inclusive
            - if just date_from is given, only records for that date are returned
        """

        query = db.query(cast(Visit.timestamp, Date),
                         func.count(Visit.id))

        if date_from:
            date_from, date_to = process_date_range(date_from, date_to)
            query = query.filter(Visit.timestamp >= date_from,
                                 Visit.timestamp < date_to)

        query = query.group_by(cast(Visit.timestamp, Date))
        query = query.order_by(cast(Visit.timestamp, Date))

        if limit:
            query = query.limit(limit)

        stats = query.all()

        return stats
예제 #27
0
    def applySearchParam(self, query, searchParam):
        if searchParam.CustomerId:
            query = query.filter(Order.CustomerId == searchParam.CustomerId)
        if searchParam.CustomerName:
            query = query.filter(CustomerContactDetails.FirstName.like("%%%s" % searchParam.CustomerName))
        if searchParam.IpAddress:
            query = query.filter(Order.IpAddress == searchParam.IpAddress)

        if searchParam.FromDate and not searchParam.ToDate:
            query = query.filter(cast(Order.OrderDate, Date) >= searchParam.FromDate)
        if not searchParam.FromDate and searchParam.ToDate:
            query = query.filter(cast(Order.OrderDate, Date) <= searchParam.ToDate)
        if searchParam.FromDate and searchParam.ToDate:
            query = query.filter(
                cast(Order.OrderDate, Date) >= searchParam.FromDate, cast(Order.OrderDate, Date) <= searchParam.ToDate
            )

        if searchParam.MinAmount and not searchParam.MaxAmount:
            query = query.filter(Order.OrderAmount >= searchParam.MinAmount)
        if not searchParam.MinAmount and searchParam.MaxAmount:
            query = query.filter(Order.OrderAmount <= searchParam.MaxAmount)
        if searchParam.MinAmount and searchParam.MaxAmount:
            query = query.filter(Order.OrderAmount >= searchParam.MinAmount, Order.OrderAmount <= searchParam.MaxAmount)

        if searchParam.InvoiceStatus == "opened":
            query = query.filter(or_((Order.OrderAmount - Order.PaidAmount) > 0.5, Order.OrderAmount == 0))
        elif searchParam.InvoiceStatus == "closed":
            query = query.filter(Order.OrderAmount != 0, (Order.PaidAmount - Order.OrderAmount) > 0.001)
        elif searchParam.InvoiceStatus == "overdue":
            query = query.filter((Order.OrderAmount - Order.PaidAmount) > 0.5, Order.DueDate < func.now())
        return query
def core_individuals_stations(request):
    """ Get the stations of an identified individual. Parameter is : id (int)"""
    try:
        id = int(request.params["id"])
        # Query
        query = (
            select(
                [
                    cast(V_Individuals_LatLonDate.c.lat, Float),
                    cast(V_Individuals_LatLonDate.c.lon, Float),
                    V_Individuals_LatLonDate.c.date,
                ]
            )
            .where(V_Individuals_LatLonDate.c.ind_id == id)
            .order_by(desc(V_Individuals_LatLonDate.c.date))
        )
        # Create list of features from query result
        epoch = datetime.utcfromtimestamp(0)
        features = [
            {
                "type": "Feature",
                "properties": {"date": (date - epoch).total_seconds()},
                "geometry": {"type": "Point", "coordinates": [lon, lat]},
            }
            for lat, lon, date in reversed(DBSession.execute(query).fetchall())
        ]

        result = {"type": "FeatureCollection", "features": features}
        return result
    except:
        return []
예제 #29
0
    def result(self):
        subQueryUser = select([User.name], Session.doctor_id == User.id).label('doctorName')
        subQueryService = select([Service.name], Session.service_id == Service.id).label('serviceName')
        subQueryPatient = select([Patient.name], Session.patient_id == Patient.id).label('patientName')

        q = db.session.query( Session.order_id, Session.patient_id, Session.service_id, Session.doctor_id, 
            subQueryUser, subQueryService, subQueryPatient,
            cast(Session.startTime, TIME).label("startTime"), 
            cast(Session.endTime, TIME).label("endTime"),
            Session.sessionDate, Session.state, Session.notes, 
        ).filter(Session.sessionDate.between(self.startDate, self.endDate))

        if self.state is not None:
            q = q.filter(Session.state == self.state)
        
        if self.service_id is not None:
            q = q.filter(Session.service_id == self.service_id)

        if self.doctor_id is not None:
            q = q.filter(Session.doctor_id == self.doctor_id)

        if self.patient_id is not None:
            q = q.filter(Session.patient_id == self.patient_id)

        dbResult = q.all()
        result = [dict(zip(row.keys(), row)) for row in dbResult ]
        return {"details": result, "totalNumberOfSessions": len(result)}
예제 #30
0
    def incr(self, category, ids, time=0, delta=1):
        self.delete_if_expired(category, ids)

        expiration = expiration_from_time(time)

        rp = self.table.update(sa.and_(self.table.c.category==category,
                                       self.table.c.ids==ids,
                                       self.table.c.kind=='num'),
                               values = {
                                         self.table.c.value:
                                         sa.cast(
                                                 sa.cast(self.table.c.value,
                                                          sa.Integer) + delta,
                                                 sa.String),
                                         self.table.c.expiration: expiration
                                         }
                               ).execute()
        if rp.rowcount == 1:
            return self.get(category, ids)
        elif rp.rowcount == 0:
            existing_value = self.get(category, ids)
            if existing_value is None:
                raise ValueError("[%s][%s] can't be incr()ed -- it's not set" %
                                 (category, ids))
            else:
                raise ValueError("[%s][%s] has non-integer value %r" %
                                 (category, ids, existing_value))
        else:
            raise ValueError("Somehow %d rows got updated" % rp.rowcount)
예제 #31
0
파일: compiler.py 프로젝트: tvial/ibis
def _millisecond(t, expr):
    # we get total number of milliseconds including seconds with extract so we
    # mod 1000
    sa_arg, = map(t.translate, expr.op().args)
    return sa.cast(sa.extract('millisecond', sa_arg), sa.SMALLINT) % 1000
예제 #32
0
 def column_expression(self, col):
     return cast(col, Raster)
예제 #33
0
 def bind_expression(self, bindvalue):
     return cast(bindvalue, Raster)
예제 #34
0
	ORDER BY ords."ShippedDate"
"""

from sqlalchemy.sql.expression import literal, func
from sqlalchemy import cast, Numeric, extract
from core_lib import meta
from core_lib import utils

from test_area import model

print("all good")

od = model.order_details
orders = model.orders
summation = func.sum(od.c.UnitPrice * od.c.Quantity * (1 - od.c.Discount))
rounding = func.round(cast(summation, Numeric(10, 2)), 2)
subtotal_cte = (meta.session.query(od.c.OrderID,
                                   rounding.label("total")).group_by(
                                       literal(1))).cte("subtotal")
query = (meta.session.query(
    orders.c.ShippedDate,
    orders.c.OrderID,
    subtotal_cte.c.total,
    extract("year", orders.c.ShippedDate).label("year"),
).join(subtotal_cte, orders.c.OrderID == subtotal_cte.c.OrderID).filter(
    orders.c.ShippedDate >= "1996-12-24").filter(
        orders.c.ShippedDate <= "1997-09-30").filter(
            orders.c.ShippedDate.isnot(None)).order_by(
                orders.c.ShippedDate).limit(10))
utils.print_table(query)
예제 #35
0
def _eq_filter(field: 'Column', value: 'Any') -> 'Any':
    column_type = field.type
    if isinstance(column_type, postgresql.ARRAY):
        value = cast(value, column_type)

    return field == value
예제 #36
0
class FilterSet(graphene.InputObjectType):
    """Filter set for connection field."""

    _custom_filters = set()
    _filter_aliases = '_filter_aliases'
    model = None

    EQ = 'eq'
    NE = 'ne'
    LIKE = 'like'
    ILIKE = 'ilike'
    IS_NULL = 'is_null'
    IN = 'in'
    NOT_IN = 'not_in'
    LT = 'lt'
    LTE = 'lte'
    GT = 'gt'
    GTE = 'gte'
    RANGE = 'range'
    CONTAINS = 'contains'
    CONTAINED_BY = 'contained_by'
    OVERLAP = 'overlap'

    AND = 'and'
    OR = 'or'
    NOT = 'not'

    GRAPHQL_EXPRESSION_NAMES = {
        EQ: '',
        NE: NE,
        LIKE: LIKE,
        ILIKE: ILIKE,
        IS_NULL: IS_NULL,
        IN: IN,
        NOT_IN: NOT_IN,
        LT: LT,
        LTE: LTE,
        GT: GT,
        GTE: GTE,
        RANGE: RANGE,
        CONTAINS: CONTAINS,
        CONTAINED_BY: CONTAINED_BY,
        OVERLAP: OVERLAP,
        AND: AND,
        OR: OR,
        NOT: NOT,
    }

    ALLOWED_FILTERS = {
        types.Boolean: [EQ, NE],
        types.Date: [EQ, LT, LTE, GT, GTE, NE, IN, NOT_IN, RANGE],
        types.Time: [EQ, LT, LTE, GT, GTE, NE, IN, NOT_IN, RANGE],
        types.DateTime: [EQ, LT, LTE, GT, GTE, NE, IN, NOT_IN, RANGE],
        types.String: [EQ, NE, LIKE, ILIKE, IN, NOT_IN],
        TSVectorType: [EQ, NE, LIKE, ILIKE, IN, NOT_IN],
        types.Integer: [EQ, LT, LTE, GT, GTE, NE, IN, NOT_IN, RANGE],
        types.Numeric: [EQ, LT, LTE, GT, GTE, NE, IN, NOT_IN, RANGE],
        postgresql.UUID: [EQ, NE, IN, NOT_IN],
        postgresql.INET: [EQ, NE, IN, NOT_IN],
        postgresql.CIDR: [EQ, NE, IN, NOT_IN],
        postgresql.JSON: [EQ, NE, IN, NOT_IN],
        postgresql.HSTORE: [EQ, NE, IN, NOT_IN],
        postgresql.ARRAY: [
            EQ,
            NE,
            IN,
            NOT_IN,
            LT,
            LTE,
            GT,
            GTE,
            CONTAINS,
            CONTAINED_BY,
            OVERLAP,
        ],
    }

    ALL = [...]

    FILTER_FUNCTIONS = {
        EQ: lambda field, v: _eq_filter(field, v),
        NE: lambda field, v: not_(_eq_filter(field, v)),
        LIKE: lambda field, v: field.like(v),
        ILIKE: lambda field, v: field.ilike(v),
        IS_NULL: lambda field, v: field.is_(None) if v else field.isnot(None),
        IN: lambda field, v: field.in_(v),
        NOT_IN: lambda field, v: field.notin_(v),
        LT: lambda field, v: field < v,
        LTE: lambda field, v: field <= v,
        GT: lambda field, v: field > v,
        GTE: lambda field, v: field >= v,
        RANGE: lambda field, v: field.between(v[RANGE_BEGIN], v[RANGE_END]),
        CONTAINS: lambda field, v: field.contains(cast(v, field.type)),
        CONTAINED_BY: lambda field, v: field.contained_by(cast(v, field.type)),
        OVERLAP: lambda field, v: field.overlap(cast(v, field.type)),
    }

    FILTER_OBJECT_TYPES = {
        AND: lambda type_, _, doc: graphene.List(graphene.NonNull(type_)),
        OR: lambda type_, _, doc: graphene.List(graphene.NonNull(type_)),
        NOT: lambda type_, _, doc: type_,
        IS_NULL: lambda *x: graphene.Boolean(description=x[2]),
        RANGE: _range_filter_type,
        IN: _in_filter_type,
        NOT_IN: _in_filter_type,
    }

    DESCRIPTIONS = {
        EQ:
        'Exact match.',
        NE:
        'Not match.',
        LIKE:
        'Case-sensitive containment test.',
        ILIKE:
        'Case-insensitive containment test.',
        IS_NULL:
        'Takes either `true` or `false`.',
        IN:
        'In a given list.',
        NOT_IN:
        'Not in a given list.',
        LT:
        'Less than.',
        LTE:
        'Less than or equal to.',
        GT:
        'Greater than.',
        GTE:
        'Greater than or equal to.',
        RANGE:
        'Selects values within a given range.',
        CONTAINS: ('Elements are a superset of the elements '
                   'of the argument array expression.'),
        CONTAINED_BY: ('Elements are a proper subset of the elements '
                       'of the argument array expression.'),
        OVERLAP:
        ('Array has elements in common with an argument array expression.'),
        AND:
        'Conjunction of filters joined by ``AND``.',
        OR:
        'Conjunction of filters joined by ``OR``.',
        NOT:
        'Negation of filters.',
    }

    class Meta:
        abstract = True

    @classmethod
    def __init_subclass_with_meta__(cls,
                                    model=None,
                                    fields=None,
                                    _meta=None,
                                    **options):
        if model is None and fields:
            raise AttributeError('Model not specified')

        if not _meta:
            _meta = FilterSetOptions(cls)

        cls.model = model
        _meta.model = model

        extra_expressions = {}
        extra_allowed_filters = {}
        for klass in reversed(cls.__mro__):
            with contextlib.suppress(AttributeError):
                for key, expr in klass.EXTRA_EXPRESSIONS.items():
                    extra_expressions[key] = expr

            with contextlib.suppress(AttributeError):
                for key, exprs in klass.EXTRA_ALLOWED_FILTERS.items():
                    extra_allowed_filters[key] = exprs

        if extra_expressions or extra_allowed_filters:
            cls._register_extra(extra_expressions, extra_allowed_filters)

        filters_fields = {}
        if model is not None:
            # Add default filter objects.
            filters_fields = cls._generate_default_filters(model, fields)

        for op in [cls.AND, cls.OR, cls.NOT]:
            doc = cls.DESCRIPTIONS.get(op)
            graphql_name = cls.GRAPHQL_EXPRESSION_NAMES[op]
            filters_fields[graphql_name] = graphene.InputField(
                cls.FILTER_OBJECT_TYPES[op](cls, False, doc), description=doc)

        if not _meta.fields:
            _meta.fields = {}

        _meta.fields.update(filters_fields)

        default_filter_keys = set(filters_fields.keys())

        # Add custom filter objects.
        super().__init_subclass_with_meta__(_meta=_meta, **options)

        # Save set of custom filter names.
        cls._custom_filters = set()
        meta_fields = set(_meta.fields.keys())
        if meta_fields:
            cls._custom_filters = meta_fields.difference(default_filter_keys)

    @classmethod
    def _register_extra(cls, extra_expressions: dict,
                        extra_allowed_filters: dict):
        """
        Register new expressions and allowed filters.

        Args:
            extra_expressions: New expressions.
            extra_allowed_filters: New allowed filters.

        """
        cls.GRAPHQL_EXPRESSION_NAMES = deepcopy(cls.GRAPHQL_EXPRESSION_NAMES)
        cls.ALLOWED_FILTERS = deepcopy(cls.ALLOWED_FILTERS)
        cls.ALLOWED_FILTERS.update(extra_allowed_filters)
        cls.FILTER_FUNCTIONS = deepcopy(cls.FILTER_FUNCTIONS)
        cls.FILTER_OBJECT_TYPES = deepcopy(cls.FILTER_OBJECT_TYPES)
        cls.DESCRIPTIONS = deepcopy(cls.DESCRIPTIONS)

        for key, data in extra_expressions.items():
            graphql_name = data['graphql_name']
            for_types = data.get('for_types', [])
            filter_ = data['filter']
            object_type = data.get('input_type')
            description = data.get('description')

            cls.GRAPHQL_EXPRESSION_NAMES.update({key: graphql_name})

            for sqla_type in for_types:
                try:
                    all_expr = cls.ALLOWED_FILTERS[sqla_type]
                except KeyError:
                    all_expr = []

                if key not in all_expr:
                    all_expr.append(key)
                cls.ALLOWED_FILTERS[sqla_type] = all_expr
                cls.FILTER_FUNCTIONS[key] = filter_

                if object_type is not None:
                    cls.FILTER_OBJECT_TYPES[key] = object_type

                cls.DESCRIPTIONS[key] = description

    @classmethod
    def aliased(
        cls,
        query,
        element,
        alias=None,
        name=None,
        flat=False,
        adapt_on_names=False,
    ):
        """
        Get an alias of the given element.

        Notes:
            Other arguments are the same as sqlalchemy.orm.aliased.

        Args:
            query: SQLAlchemy query (Deprecated: Graphene resolve info).

        Returns:
            Alias.

        """
        if isinstance(query, Query):
            filter_aliases = cls._aliases_from_query(query)
        else:
            example = cls._build_example_for_deprecation_warning(
                element, alias, name, flat, adapt_on_names)
            warnings.warn(
                'Graphene resolve info is deprecated, use SQLAlchemy query. ' +
                example,
                DeprecationWarning,
                stacklevel=2,
            )
            filter_aliases = cls._aliases_from_info(query)

        key = element, name

        try:
            return filter_aliases[key]
        except KeyError:
            alias = aliased(element, alias, name, flat, adapt_on_names)

            if not isinstance(query, Query):
                filter_aliases[key] = alias

            return alias

    @classmethod
    def _build_example_for_deprecation_warning(cls, element, alias, name, flat,
                                               adapt_on_names) -> str:
        """
        Build message for deprecation warning.

        Returns:
            Example code.

        """
        example = 'Example: cls.aliased(query, Model)'
        with contextlib.suppress(Exception):
            args = {
                'alias': alias,
                'name': name,
                'flat': flat,
                'adapt_on_names': adapt_on_names,
            }
            args_list = []
            for k, v in args.items():
                if not v:
                    continue

                if isinstance(v, str):
                    v = '"{}"'.format(v)
                args_list.append(k + '=' + v)

            example = 'Hint: cls.aliased(query, {}, {})'.format(
                element.__name__, ', '.join(args_list))

        return example

    @classmethod
    def _aliases_from_info(
            cls, info: graphene.ResolveInfo) -> 'Dict[str, _MapperEntity]':
        """
        Get cached aliases from graphene ResolveInfo object.

        Notes:
            Deprecated.

        Args:
            info: Graphene ResolveInfo object.

        Returns:
            Dictionary of model aliases.

        """
        context = info.context

        if isinstance(context, dict):
            filter_aliases = context[cls._filter_aliases]
        elif '__dict__' in context.__dir__():
            filter_aliases = getattr(context, cls._filter_aliases)
        else:
            raise RuntimeError(
                'Not supported with info.context type {}'.format(
                    type(context)))

        return filter_aliases

    @classmethod
    def _aliases_from_query(cls, query: Query) -> 'Dict[str, _MapperEntity]':
        """
        Get aliases from SQLAlchemy query.

        Args:
            query: SQLAlchemy query.

        Returns:
            Dictionary of model aliases.

        """
        aliases = {(mapper._target, mapper.name): mapper.entity
                   for mapper in query._join_entities}

        return aliases

    @classmethod
    def _generate_default_filters(
            cls, model,
            field_filters: 'Dict[str, Union[Iterable[str], Any]]') -> dict:
        """
        Generate GraphQL fields from SQLAlchemy model columns.

        Args:
            model: SQLAlchemy model.
            field_filters: Filters for fields.

        Returns:
            GraphQL fields dictionary:
            field name (key) - field instance (value).

        """
        graphql_filters = {}
        filters_map = cls.ALLOWED_FILTERS
        model_fields = cls._get_model_fields_data(model, field_filters.keys())

        for field_name, field_object in model_fields.items():
            column_type = field_object['type']

            expressions = field_filters[field_name]
            if expressions == cls.ALL:
                if column_type is None:
                    raise ValueError(
                        'Unsupported field type for automatic filter binding')

                type_class = column_type.__class__
                try:
                    expressions = filters_map[type_class].copy()
                except KeyError:
                    for type_, exprs in filters_map.items():
                        if issubclass(type_class, type_):
                            expressions = exprs.copy()
                            break
                    else:
                        raise KeyError('Unsupported column type. '
                                       'Hint: use EXTRA_ALLOWED_FILTERS.')

                if field_object['nullable']:
                    expressions.append(cls.IS_NULL)

            field_type = cls._get_gql_type_from_sqla_type(
                column_type, field_object['column'])

            fields = cls._generate_filter_fields(expressions, field_name,
                                                 field_type,
                                                 field_object['nullable'])
            for name, field in fields.items():
                graphql_filters[name] = get_field_as(field,
                                                     graphene.InputField)

        return graphql_filters

    @classmethod
    def _get_gql_type_from_sqla_type(
        cls, column_type, sqla_column
    ) -> 'Union[Type[graphene.ObjectType], Type[GenericScalar]]':
        """
        Get GraphQL type from SQLAlchemy column.

        Args:
            column_type: SQLAlchemy column type.
            sqla_column: SQLAlchemy column or hybrid attribute.

        Returns:
            GraphQL type.

        """
        if column_type is None:
            return GenericScalar
        else:
            return convert_sqlalchemy_type(column_type, sqla_column)

    @classmethod
    def _get_model_fields_data(cls, model, only_fields: 'Iterable[str]'):
        """
        Get model columns.

        Args:
            model: SQLAlchemy model.
            only_fields: Filter of fields.

        Returns:
            Fields info.

        """
        model_fields = {}

        inspected = inspection.inspect(model)
        for descr in inspected.all_orm_descriptors:
            if isinstance(descr, hybrid_property):
                attr = descr
                name = attr.__name__
                if name not in only_fields:
                    continue

                model_fields[name] = {
                    'column': attr,
                    'type': None,
                    'nullable': True,
                }

            elif isinstance(descr, InstrumentedAttribute):
                attr = descr.property
                name = attr.key
                if name not in only_fields:
                    continue

                column = attr.columns[0]
                model_fields[name] = {
                    'column': column,
                    'type': column.type,
                    'nullable': column.nullable,
                }

        return model_fields

    @staticmethod
    def _is_graphene_enum(obj: 'Any') -> bool:
        """
        Return whether 'obj' is a enum.

        Args:
            obj: lambda or graphene.Field

        Returns:
            boolean

        """
        if gqls_version < (2, 2, 0):
            # https://github.com/graphql-python/graphene-sqlalchemy/blob/v2.1.2/graphene_sqlalchemy/converter.py#L147
            return isinstance(
                obj, graphene.Field) and isinstance(  # pragma: no cover
                    obj._type, graphene.types.enum.EnumMeta)
        elif gqls_version == (2, 2, 0):
            # https://github.com/graphql-python/graphene-sqlalchemy/blob/db3e9f4c3baad3e62c113d4a9ddd2e3983d324f2/graphene_sqlalchemy/converter.py#L150
            return isinstance(obj, graphene.Field) and callable(
                obj._type)  # pragma: no cover
        else:
            # https://github.com/graphql-python/graphene-sqlalchemy/blob/17d535efba03070cbc505d915673e0f24d9ca60c/graphene_sqlalchemy/converter.py#L216
            return callable(obj) and obj.__name__ == '<lambda>'

    @staticmethod
    def _get_enum_from_field(
        enum: 'Union[Callable, graphene.Field]', ) -> graphene.Enum:
        """
        Get graphene enum.

        Args:
            enum: lambda or graphene.Field

        Returns:
            Graphene enum.

        """
        if gqls_version < (2, 2, 0):
            # AssertionError: Found different types
            # with the same name in the schema: ...
            raise AssertionError(  # pragma: no cover
                'Enum is not supported. '
                'Requires graphene-sqlalchemy 2.2.0 or higher.')
        elif gqls_version == (2, 2, 0):
            # https://github.com/graphql-python/graphene-sqlalchemy/compare/2.1.2...2.2.0#diff-9202780f6bf4790a0d960de553c086f1L155
            return enum._type()()  # pragma: no cover
        else:
            # https://github.com/graphql-python/graphene-sqlalchemy/compare/2.2.0...2.2.1#diff-9202780f6bf4790a0d960de553c086f1L150
            return enum()()

    @classmethod
    def _generate_filter_fields(
        cls,
        expressions: 'List[str]',
        field_name: str,
        field_type: 'Type[graphene.ObjectType]',
        nullable: bool,
    ) -> 'Dict[str, graphene.ObjectType]':
        """
        Generate all available filters for model column.

        Args:
            expressions: Allowed expressions. Example: ['eq', 'lt', 'gt'].
            field_name: Model column name.
            field_type: GraphQL field type.
            nullable: Can field be is null.

        Returns:
            GraphQL fields dictionary.

        """
        filters = {}

        for op in expressions:
            key = field_name
            graphql_name = cls.GRAPHQL_EXPRESSION_NAMES[op]
            if graphql_name:
                key += DELIMITER + graphql_name

            doc = cls.DESCRIPTIONS.get(op)
            try:
                filter_field = cls.FILTER_OBJECT_TYPES[op](field_type,
                                                           nullable, doc)
            except KeyError:
                if isinstance(field_type, graphene.List):
                    filter_field = field_type
                elif cls._is_graphene_enum(field_type):
                    filter_field = cls._get_enum_from_field(field_type)
                else:
                    field_type = _get_class(field_type)
                    filter_field = field_type(description=doc)

            filters[key] = filter_field

        return filters

    @classmethod
    def filter(cls, info: ResolveInfo, query: Query,
               filters: 'FilterType') -> Query:
        """
        Return a new query instance with the args ANDed to the existing set.

        Args:
            info: GraphQL execution info.
            query: SQLAlchemy query.
            filters: Filters dictionary.

        Returns:
            Filtered query instance.

        """
        context = info.context

        if isinstance(context, dict):
            context[cls._filter_aliases] = {}
        elif '__dict__' in context.__dir__():
            setattr(context, cls._filter_aliases, {})
        else:
            msg = ('Graphene-SQLAlchemy-Filter: '
                   'info.context has an unsupported type {}. '
                   'Now cls.aliased(info, ...) is not supported. '
                   'Allowed types: dict and object with __dict__ attribute.'
                   ).format(type(context))
            warnings.warn(msg, RuntimeWarning)

        query, sqla_filters = cls._translate_many_filter(info, query, filters)
        if sqla_filters is not None:
            query = query.filter(*sqla_filters)

        return query

    @classmethod
    @lru_cache(maxsize=500)
    def _split_graphql_field(cls, graphql_field: str) -> 'Tuple[str, str]':
        """
        Get model field name and expression.

        Args:
            graphql_field: Field name.

        Returns:
            Model field name and expression name.

        """
        empty_expr = None

        expression_to_name = sorted(cls.GRAPHQL_EXPRESSION_NAMES.items(),
                                    key=lambda x: -len(x[1]))

        for expression, name in expression_to_name:
            if name == '':
                empty_expr = expression
                continue

            key = DELIMITER + name
            if graphql_field.endswith(key):
                return graphql_field[:-len(key)], expression

        if empty_expr is not None:
            return graphql_field, empty_expr

        raise KeyError('Operator not found "{}"'.format(graphql_field))

    @classmethod
    def _translate_filter(cls, info: ResolveInfo, query: Query, key: str,
                          value: 'Any') -> 'Tuple[Query, Any]':
        """
        Translate GraphQL to SQLAlchemy filters.

        Args:
            info: GraphQL resolve info.
            query: SQLAlchemy query.
            key: Filter key: model field, 'or', 'and', 'not', custom filter.
            value: Filter value.

        Returns:
            SQLAlchemy clause.

        """
        if key in cls._custom_filters:
            filter_name = key + '_filter'
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', SAWarning)
                clause = getattr(cls, filter_name)(info, query, value)
                if isinstance(clause, tuple):
                    query, clause = clause

            return query, clause

        if key == cls.GRAPHQL_EXPRESSION_NAMES[cls.AND]:
            return cls._translate_many_filter(info, query, value, and_)

        if key == cls.GRAPHQL_EXPRESSION_NAMES[cls.OR]:
            return cls._translate_many_filter(info, query, value, or_)

        if key == cls.GRAPHQL_EXPRESSION_NAMES[cls.NOT]:
            return cls._translate_many_filter(info, query, value,
                                              lambda *x: not_(and_(*x)))

        field, expression = cls._split_graphql_field(key)
        filter_function = cls.FILTER_FUNCTIONS[expression]

        try:
            model_field = getattr(cls.model, field)
        except AttributeError:
            raise KeyError('Field not found: ' + field)

        model_field_type = model_field.type
        if isinstance(model_field_type, sqltypes.Enum):
            value = model_field_type.enum_class(value)

        clause = filter_function(model_field, value)
        return query, clause

    @classmethod
    def _translate_many_filter(
        cls,
        info: ResolveInfo,
        query: Query,
        filters: 'Union[List[FilterType], FilterType]',
        join_by: 'Callable' = None,
    ) -> 'Tuple[Query, Any]':
        """
        Translate several filters.

        Args:
            info: GraphQL resolve info.
            query: SQLAlchemy query.
            filters: GraphQL filters.
            join_by: Join translated filters.

        Returns:
            SQLAlchemy clause.

        """
        result = []

        # Filters from 'and', 'or', 'not'.
        if isinstance(filters, list):
            for f in filters:
                query, local_filters = cls._translate_many_filter(
                    info, query, f, and_)
                if local_filters is not None:
                    result.append(local_filters)

        else:
            for k, v in filters.items():
                query, r = cls._translate_filter(info, query, k, v)
                if r is not None:
                    result.append(r)

        if not result:
            return query, None

        if join_by is None:
            return query, result

        return query, join_by(*result)
예제 #37
0
파일: compiler.py 프로젝트: tvial/ibis
def _reduce_tokens(tokens, arg):
    # current list of tokens
    curtokens = []

    # reduced list of tokens that accounts for blacklisted values
    reduced = []

    non_special_tokens = (frozenset(_strftime_to_postgresql_rules) -
                          _strftime_blacklist)

    # TODO: how much of a hack is this?
    for token in tokens:

        # we are a non-special token %A, %d, etc.
        if token in non_special_tokens:
            curtokens.append(_strftime_to_postgresql_rules[token])

        # we have a string like DD, to escape this we
        # surround it with double quotes
        elif token in _lexicon_values:
            curtokens.append('"%s"' % token)

        # we have a token that needs special treatment
        elif token in _strftime_blacklist:
            if token == '%w':
                value = sa.extract('dow', arg)  # 0 based day of week
            elif token == '%U':
                value = sa.cast(sa.func.to_char(arg, 'WW'), sa.SMALLINT) - 1
            elif token == '%c' or token == '%x' or token == '%X':
                # re scan and tokenize this pattern
                try:
                    new_pattern = _strftime_to_postgresql_rules[token]
                except KeyError:
                    raise ValueError(
                        'locale specific date formats (%%c, %%x, %%X) are '
                        'not yet implemented for %s' % platform.system())

                new_tokens, _ = _scanner.scan(new_pattern)
                value = reduce(sa.sql.ColumnElement.concat,
                               _reduce_tokens(new_tokens, arg))
            elif token == '%e':
                # pad with spaces instead of zeros
                value = sa.func.replace(sa.func.to_char(arg, 'DD'), '0', ' ')

            reduced += [
                sa.func.to_char(arg, ''.join(curtokens)),
                sa.cast(value, sa.TEXT)
            ]

            # empty current token list in case there are more tokens
            del curtokens[:]

        # uninteresting text
        else:
            curtokens.append(token)
    else:
        # append result to r if we had more tokens or if we have no
        # blacklisted tokens
        if curtokens:
            reduced.append(sa.func.to_char(arg, ''.join(curtokens)))
    return reduced
예제 #38
0
 def test_crit_string_path(self):
     col = self.tables.data_table.c['data']
     self._test_index_criteria(
         cast(col[("nested", "elem2", "elem3", "elem4")],
              String) == '"elem5"', "r5")
예제 #39
0
파일: service.py 프로젝트: yiluzhu/lemur
def render(args):
    query = database.session_query(PendingCertificate)
    time_range = args.pop("time_range")
    destination_id = args.pop("destination_id")
    notification_id = args.pop("notification_id", None)
    show = args.pop("show")
    # owner = args.pop('owner')
    # creator = args.pop('creator')  # TODO we should enabling filtering by owner

    filt = args.pop("filter")

    if filt:
        terms = filt.split(";")

        if "issuer" in terms:
            # we can't rely on issuer being correct in the cert directly so we combine queries
            sub_query = (database.session_query(Authority.id).filter(
                Authority.name.ilike("%{0}%".format(terms[1]))).subquery())

            query = query.filter(
                or_(
                    PendingCertificate.issuer.ilike("%{0}%".format(terms[1])),
                    PendingCertificate.authority_id.in_(sub_query),
                ))

        elif "destination" in terms:
            query = query.filter(
                PendingCertificate.destinations.any(
                    Destination.id == terms[1]))
        elif "notify" in filt:
            query = query.filter(
                PendingCertificate.notify == truthiness(terms[1]))
        elif "active" in filt:
            query = query.filter(
                PendingCertificate.active == truthiness(terms[1]))
        elif "cn" in terms:
            query = query.filter(
                or_(
                    PendingCertificate.cn.ilike("%{0}%".format(terms[1])),
                    PendingCertificate.domains.any(
                        Domain.name.ilike("%{0}%".format(terms[1]))),
                ))
        elif "id" in terms:
            query = query.filter(
                PendingCertificate.id == cast(terms[1], Integer))
        else:
            query = database.filter(query, PendingCertificate, terms)

    if show:
        sub_query = (database.session_query(
            Role.name).filter(Role.user_id == args["user"].id).subquery())
        query = query.filter(
            or_(
                PendingCertificate.user_id == args["user"].id,
                PendingCertificate.owner.in_(sub_query),
            ))

    if destination_id:
        query = query.filter(
            PendingCertificate.destinations.any(
                Destination.id == destination_id))

    if notification_id:
        query = query.filter(
            PendingCertificate.notifications.any(
                Notification.id == notification_id))

    if time_range:
        to = arrow.now().shift(weeks=+time_range).format("YYYY-MM-DD")
        now = arrow.now().format("YYYY-MM-DD")
        query = query.filter(PendingCertificate.not_after <= to).filter(
            PendingCertificate.not_after >= now)

    # Only show unresolved certificates in the UI
    query = query.filter(PendingCertificate.resolved.is_(False))
    return database.sort_and_page(query, PendingCertificate, args)
예제 #40
0
파일: compiler.py 프로젝트: tvial/ibis
def _second(t, expr):
    # extracting the second gives us the fractional part as well, so smash that
    # with a cast to SMALLINT
    sa_arg, = map(t.translate, expr.op().args)
    return sa.cast(sa.extract('second', sa_arg), sa.SMALLINT)
예제 #41
0
    users.c.organization_id,
    organizations.c.organization_name,
    users.c.country_code,
    users.c.country_subdivision_code,
    users.c.github_email.label("email"),
    users.c.is_gpu_enabled,
    ranked_bots.c.bot_id,
    ranked_bots.c.games_played.label("num_games"),
    ranked_bots.c.version_number.label("num_submissions"),
    ranked_bots.c.mu,
    ranked_bots.c.sigma,
    ranked_bots.c.score,
    ranked_bots.c.language,
    ranked_bots.c.update_time,
    # Perform a no-op operation so we can label the column easily
    sqlalchemy.cast(sqlalchemy.sql.text("ranked_bots.bot_rank"),
                    sqlalchemy.Integer).label("rank"),
    ranked_bots.c.compile_status,
]).select_from(
    ranked_bots.join(
        users,
        ranked_bots.c.user_id == users.c.id,
    ).join(organizations,
           organizations.c.id == users.c.organization_id,
           isouter=True)).alias("ranked_bots_users")


# Users, ranked by their best bot
def ranked_users_query(alias="ranked_users"):
    ranked_bots = ranked_bots_query("rurank")
    return sqlalchemy.sql.select([
        users.c.id.label("user_id"),
예제 #42
0
 def test_crit_mixed_path(self):
     col = self.tables.data_table.c['data']
     self._test_index_criteria(
         cast(col[("key3", 1, "six")], String) == '"seven"', "r3")
 def __ne__(self, other):
     return self._case() != cast(other, String)
예제 #44
0
def main(inicio, fin, ci):
    """
    En el archivo bandejas.cfg se guardan los parametros de configuracion para la base de datos. El procedimiento toma el archivo de desde el mismo directorio donde se encuentra. En el archivo config.cfg se guardan parametros de configuracion.
    \nEjecucion
    El procedimiento se ejecuta de la siguiente forma:
    (ejemplo)\n
    $python bj.py --inicio='2018-05-01' --fin='2018-05-14'

        - Novedades en el periodo [inicio, fin) , incluyendo inicio y no incluye la fecha de fin.Las novedades se refiere a las altas de designacion, ceses de designacion, anulaciones y pasajes a suplencias
        - Para todas las personas (PerId) que tuvieron novedades en el periodo indicado, se toman los datos de toda la historia de altas, ceses, con tope el 01/03 del año correspondiente a la fecha de inicio que es pasada como parametro.
    """

    with open('bandejas.cfg', 'r') as ymlfile:
        cdb = yaml.load(ymlfile)

    with open('config.cfg', 'r') as ymlfile:
        cfg = yaml.load(ymlfile)

    engine = create_engine('mysql+pymysql://' + cdb['personal']['usr'] + ':' +
                           cdb['personal']['password'] + '@' +
                           cdb['personal']['host'] + '/' +
                           cdb['personal']['bd'])
    engine_bandeja_in = create_engine('mysql+pymysql://' +
                                      cdb['bandeja_in']['usr'] + ':' +
                                      cdb['bandeja_in']['password'] + '@' +
                                      cdb['bandeja_in']['host'] + '/' +
                                      cdb['bandeja_in']['bd'])
    engine_bandeja_out = create_engine('mysql+pymysql://' +
                                       cdb['bandeja_out']['usr'] + ':' +
                                       cdb['bandeja_out']['password'] + '@' +
                                       cdb['bandeja_out']['host'] + '/' +
                                       cdb['bandeja_out']['bd'])

    puestos_funcion = cfg[
        'puestos_funcion']  # los puestos considerados docencia directa
    parametros = {}
    parametros['p1d'] = dt.date(int(inicio.split('-')[0]),
                                int(inicio.split('-')[1]),
                                int(inicio.split('-')[2]))
    parametros['p2d'] = dt.date(int(fin.split('-')[0]), int(fin.split('-')[1]),
                                int(fin.split('-')[2]))

    # no voy a dejar pasar designaciones que inicien a partir de este tope (el mes siguiente al dado como fin)
    parametros['tope'] = dt.date(
        int(fin.split('-')[0]) + (1 if (fin.split('-')[1] == '12') else 0),
        1 if (fin.split('-')[1] == '12') else int(fin.split('-')[1]) + 1, 1)

    # las causales de suplencia que interesan
    suplcausales = cfg['suplcausales']

    parametros['inicioLectivo'] = dt.datetime(
        int(inicio.split('-')[0]) - (1 if inicio.split('-')[1] < '03' else 0),
        03, 01)

    #cargo metadatos del modelo Personal
    metadata = sa.MetaData()
    relaciones_laborales = sa.Table('RELACIONES_LABORALES',
                                    metadata,
                                    autoload=True,
                                    autoload_with=engine)
    anulaciones = sa.Table('ANULACIONES',
                           metadata,
                           autoload=True,
                           autoload_with=engine)
    funciones_relacion_laboral = sa.Table('FUNCIONES_RELACION_LABORAL',
                                          metadata,
                                          autoload=True,
                                          autoload_with=engine)
    funciones_asignadas = sa.Table('FUNCIONES_ASIGNADAS',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)
    sillas = sa.Table('SILLAS', metadata, autoload=True, autoload_with=engine)
    cargas_horarias = sa.Table('CARGAS_HORARIAS',
                               metadata,
                               autoload=True,
                               autoload_with=engine)
    silla_grupo_materia = sa.Table('SILLAGRUPOMATERIA',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)
    puestos = sa.Table('PUESTOS',
                       metadata,
                       autoload=True,
                       autoload_with=engine)
    denominaciones_cargo = sa.Table('DENOMINACIONES_CARGOS',
                                    metadata,
                                    autoload=True,
                                    autoload_with=engine)
    suplencias = sa.Table('SUPLENCIAS',
                          metadata,
                          autoload=True,
                          autoload_with=engine)
    funciones_agrup_lin = sa.Table('FUNCION_AGRUP_LIN',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine)

    # cargo metadatos de Personas
    personas = sa.Table('PERSONAS',
                        metadata,
                        schema="Personas",
                        autoload=True,
                        autoload_with=engine)
    personas_documentos = sa.Table('PERSONASDOCUMENTOS',
                                   metadata,
                                   schema="Personas",
                                   autoload=True,
                                   autoload_with=engine)

    # cargo los datos de materias de estudiantil
    asignaturas_materias = sa.Table('ASIGNATURAS_MATERIAS',
                                    metadata,
                                    schema="Estudiantil",
                                    autoload=True,
                                    autoload_with=engine)

    # cargo las materias de estudiantil
    query_asignaturas_materias = sa.select([asignaturas_materias])
    df_asignaturas_materias = pd.read_sql_query(query_asignaturas_materias,
                                                engine,
                                                params=parametros)

    # cargo los datos de la base de siap para las dependencias
    tabla_institucional = sa.Table('tabla_institucional',
                                   metadata,
                                   autoload=True,
                                   autoload_with=engine_bandeja_in)
    query_tabla_institucional = sa.select([
        tabla_institucional.c.DEP_AS400.label('dependid'),
        tabla_institucional.c.DEP_DBC.label('dependidSiap')
    ]).select_from(tabla_institucional)
    df_tabla_institucional = pd.read_sql_query(query_tabla_institucional,
                                               engine_bandeja_in,
                                               params=parametros)

    # cargo las funciones para identificar las horas de apoyo o POB , POP, talleristas , codigo 68
    query_funciones_cargo = sa.select([funciones_agrup_lin])
    df_funciones_cargo = pd.read_sql_query(query_funciones_cargo,
                                           engine,
                                           params=parametros)
    df_funciones_hap = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 1, 'FuncionId']
    df_funciones_POB = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 8, 'FuncionId']
    df_funciones_POP = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 7, 'FuncionId']
    df_funciones_68 = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 5, 'FuncionId']
    df_funciones_talleristas = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 9, 'FuncionId']
    df_coordinadores_especiales = df_funciones_cargo.loc[
        df_funciones_cargo.Funcion_Agrup_Cab_Id == 10, 'FuncionId']

    # novedades
    query_novedades = sa. \
        select([relaciones_laborales.c.PersonalPerId, relaciones_laborales.c.RelLabId]). \
        select_from(relaciones_laborales.join(puestos)). \
        where( \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
              # RL designada

              (relaciones_laborales.c.PersonalPerId <> None) & \
              ( \
               # se inicia en el período de la bandeja

               ( \
                (relaciones_laborales.c.RelLabFchIniActividades >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabFchIniActividades < sa.bindparam('p2d')) \
               ) | \
               # o termina en el período de la bandeja

               ( \
                (relaciones_laborales.c.RelLabCeseFchReal >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchReal < sa.bindparam('p2d')) \
               ) | \
               # o cambiaron el alta con retraso

               ( \
                (relaciones_laborales.c.RelLabFchIniActividades < sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabDesignFchAlta >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabDesignFchAlta < sa.bindparam('p2d')) \
               ) | \
               # o cambiaron el cese con retraso

               ( \
                (relaciones_laborales.c.RelLabCeseFchReal < sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchAlta >= sa.bindparam('p1d')) & \
                (relaciones_laborales.c.RelLabCeseFchAlta < sa.bindparam('p2d'))
               ) \
              ) \
             )
    df_novedades = pd.read_sql_query(query_novedades,
                                     engine,
                                     params=parametros)

    # cargo las anulaciones del periodo
    query_anulaciones_periodo = sa. \
        select([relaciones_laborales.c.PersonalPerId,relaciones_laborales.c.RelLabId, anulaciones.c.AnulacionFchAlta]). \
        select_from(anulaciones.join(relaciones_laborales, cast(anulaciones.c.AnulacionValorPkTabla,Integer)==relaciones_laborales.c.RelLabId).join(puestos)). \
        where( \
              (anulaciones.c.AnulacionFchAlta >= sa.bindparam('p1d')) & \
              (anulaciones.c.AnulacionFchAlta < sa.bindparam('p2d')) & \
              (anulaciones.c.AnulacionTipoNombre=='DESIGNACION') & \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) \
             )
    df_anulaciones_periodo = pd.read_sql(query_anulaciones_periodo,
                                         engine,
                                         params=parametros)

    rlt = aliased(relaciones_laborales)  # RL de los titulares
    rls = aliased(relaciones_laborales)  # RL de los suplentes

    # perids que tuvieron novedades o tienen eventos en el período de la bandeja (o el que vino de parámetro)
    if ci != None:  # si me pasaron una ci como parametro me interesan solo las novedades de esa ci
        query_perid = sa.select([personas_documentos.c.PerId
                                 ]).select_from(personas_documentos).where(
                                     (personas_documentos.c.PaisCod == 'UY')
                                     & (personas_documentos.c.DocCod == 'CI')
                                     & (personas_documentos.c.PerDocId == ci))
        set_perids_novedades = pd.read_sql_query(
            query_perid, engine, params=parametros)['PerId'].unique().tolist()
    else:
        # cargo las suplencias del período
        query_suplencias = sa. \
            select([rlt.c.PersonalPerId,suplencias.c.RelLabId,func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades).label('SuplFchAlta'),suplencias.c.SuplCausId,rlt.c.RelLabFchIniActividades,rlt.c.RelLabCeseFchReal,rls.c.RelLabAnulada.label('RelLabAnuladaS'),rls.c.RelLabFchIniActividades.label('RelLabFchIniActividadesS'),rls.c.RelLabCeseFchReal.label('RelLabCeseFchRealS')]). \
            select_from(rlt.join(puestos).join(suplencias, suplencias.c.RelLabId==rlt.c.RelLabId).join(rls, rls.c.RelLabId==suplencias.c.SuplRelLabId)). \
            where((puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
                  (suplencias.c.SuplCausId.in_(suplcausales)) & \
                  (rlt.c.RelLabAnulada==0) & \
                  ((rlt.c.RelLabFchIniActividades < rlt.c.RelLabCeseFchReal) | (rlt.c.RelLabCeseFchReal==None)) & \
                  # la rls podría estar anulada y en ese caso se marca la novedad en RelLabCeseFchAlta

                  ( \
                   # inicio de la suplencia está en el período de la bandeja:

                   ((func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades) < sa.bindparam('p2d')) & \
                    (func.GREATEST(cast(suplencias.c.SuplFchAlta,Date),rlt.c.RelLabFchIniActividades) >= sa.bindparam('p1d')) \
                   ) | \
                   # o el inicio de la suplencia fue modificado en el período de la bandeja:

                   ((cast(suplencias.c.Suplencias_FchUltAct,Date) < sa.bindparam('p2d')) & \
                    (cast(suplencias.c.Suplencias_FchUltAct,Date) >= sa.bindparam('p1d')) \
                   ) | \
                   # o el fin de la suplencia está en el período de la bandeja:

                   (((rls.c.RelLabCeseFchReal < sa.bindparam('p2d')) | (rls.c.RelLabCeseFchReal==None)) & \
                    (rls.c.RelLabCeseFchReal >= sa.bindparam('p1d')) \
                   ) | \
                   # o el fin de la suplencia fue modificado o anulado en el período de la bandeja:

                   ((rls.c.RelLabCeseFchAlta < sa.bindparam('p2d')) & \
                    (rls.c.RelLabCeseFchAlta >= sa.bindparam('p1d')) \
                   ) \
                  ) \
                 )
        df_suplencias = pd.read_sql_query(query_suplencias,
                                          engine,
                                          params=parametros)

        set_perids_novedades = df_novedades['PersonalPerId'].append(
            df_anulaciones_periodo['PersonalPerId']).append(
                df_suplencias['PersonalPerId']).unique().tolist()

    if len(set_perids_novedades) == 0:  #si no tengo cédulas para procesar
        return

    ## Tomo la historia de los perid con novedades
    # join historia básica
    j3 = rlt.join(puestos).join(funciones_relacion_laboral).join(
        funciones_asignadas).join(sillas).join(
            silla_grupo_materia,
            sillas.c.SillaId == silla_grupo_materia.c.SillaId,
            isouter=True).join(
                asignaturas_materias,
                sillas.c.MateriaId == asignaturas_materias.c.MateriaId,
                isouter=True)

    # join suplencias
    jsupl = suplencias.join(rls,
                            ((rls.c.RelLabId == suplencias.c.SuplRelLabId) &
                             (rls.c.RelLabAnulada == 0) &
                             (suplencias.c.SuplCausId.in_(suplcausales))))
    # clone de join suplencias para encontrar la siguiente
    supl_siguiente = aliased(suplencias)  # suplencia consecutiva a la actual
    rls_siguiente = aliased(relaciones_laborales)
    jsupl_siguiente = supl_siguiente.join(
        rls_siguiente,
        ((rls_siguiente.c.RelLabId == supl_siguiente.c.SuplRelLabId) &
         (rls_siguiente.c.RelLabAnulada == 0) &
         (supl_siguiente.c.SuplCausId.in_(suplcausales))))
    # clone de join suplencias para asegurar que no hay una intermedia entre la actual y la siguiente
    supl_intermedia = aliased(suplencias)  # suplencia consecutiva a la actual
    rls_intermedia = aliased(relaciones_laborales)
    jsupl_intermedia = supl_intermedia.join(
        rls_intermedia,
        ((rls_intermedia.c.RelLabId == supl_intermedia.c.SuplRelLabId) &
         (rls_intermedia.c.RelLabAnulada == 0) &
         (supl_intermedia.c.SuplCausId.in_(suplcausales))))

    # historia básica de los perids con novedades, no incluye RL bajadas a suplencia
    query_historia_rl = sa. \
        select([rlt.c.PersonalPerId, puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,rlt.c.RelLabFchIniActividades, rlt.c.RelLabCeseFchReal, rlt.c.CauBajCod,silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId, sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId]). \
        select_from( \
            j3. \
            join(jsupl, ((rlt.c.RelLabId==suplencias.c.RelLabId)), isouter=True) \
        ). \
        where((rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
              (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
              (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
              (suplencias.c.RelLabId==None) \
             )
    df_historia_rl = pd.read_sql_query(query_historia_rl,
                                       engine,
                                       params=parametros)
    df_historia_rl.loc[:, 'Origen'] = ['df_historia_rl']

    # Cambio el número de asignatura de las Coordinaciones
    df_historia_rl.loc[df_historia_rl['AsignId'] == 90,
                       ['AsignId', 'RelLabDesignCaracter']] = [75, 'I']
    # Cambio el número de asignatura de AAM
    df_historia_rl.loc[df_historia_rl['AsignId'] == 98, 'AsignId'] = 77

    # SUPLENCIAS
    # Para cada bajada a suplencia implica (recorriéndolas en orden de fecha) hay que:
    #  (1) agregar un registro desde el fin de la suplencia hasta el final original (luego el paso 2 le puede cambiar el cese)
    #  (2) cesar la RL vigente en la fecha de inicio de la suplencia
    #  (3) si el causal de bajada corresponde, hay que crear un registro (alta) para el período de suplencia paga

    # (1) altas inyectadas en la bandeja para el período posterior a cada licencia
    query_alta_luego_de_suplencia = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(rls.c.RelLabCeseFchReal,1)).label('RelLabFchIniActividades'),func.IF(supl_siguiente.c.SuplId==None,rlt.c.RelLabCeseFchReal,cast(supl_siguiente.c.SuplFchAlta,Date)).label('RelLabCeseFchReal'),func.IF(supl_siguiente.c.SuplId==None,rlt.c.CauBajCod,'50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId]). \
        select_from( \
            jsupl. \
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))). \
            join(jsupl_siguiente,
                 ((supl_siguiente.c.RelLabId==rlt.c.RelLabId) & (supl_siguiente.c.SuplId<>suplencias.c.SuplId) & (supl_siguiente.c.SuplFchAlta>=suplencias.c.SuplFchAlta)), \
                 isouter=True). \
            join(jsupl_intermedia, \
                 ((supl_intermedia.c.RelLabId==rlt.c.RelLabId) & (supl_intermedia.c.SuplId<>suplencias.c.SuplId) & (supl_intermedia.c.SuplFchAlta>=suplencias.c.SuplFchAlta) & (supl_intermedia.c.SuplId<>supl_siguiente.c.SuplId) & (supl_intermedia.c.SuplFchAlta<=supl_siguiente.c.SuplFchAlta)), \
                 isouter=True) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (rls.c.RelLabCeseFchReal<>None)  & \
            (supl_intermedia.c.SuplId==None) & \
            ((supl_siguiente.c.SuplId==None) | ((rls.c.RelLabCeseFchReal<>None) & (cast(supl_siguiente.c.SuplFchAlta,Date) > rls.c.RelLabCeseFchReal))) & \
            (func.ADDDATE(rls.c.RelLabCeseFchReal,1) < func.IF(supl_siguiente.c.SuplId==None,rlt.c.RelLabCeseFchReal,cast(supl_siguiente.c.SuplFchAlta,Date))) \
        )
    df_alta_luego_de_suplencia = pd.read_sql_query(
        query_alta_luego_de_suplencia, engine, params=parametros)
    df_alta_luego_de_suplencia.loc[:,
                                   'Origen'] = ['df_alta_luego_de_suplencia']

    # (2) alta inyectada para el período antes de la primer licencia
    query_primera_suplencia = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,rlt.c.RelLabFchIniActividades,cast(suplencias.c.SuplFchAlta,Date).label('RelLabCeseFchReal'),literal_column('50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId, asignaturas_materias.c.AsignId]). \
        select_from(
            jsupl. \
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))). \
            join(jsupl_intermedia, \
                 ((supl_intermedia.c.RelLabId==rlt.c.RelLabId) & (supl_intermedia.c.SuplId<>suplencias.c.SuplId) & (supl_intermedia.c.SuplFchAlta<=suplencias.c.SuplFchAlta)),
                 isouter=True) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (supl_intermedia.c.SuplId==None) & \
            (rlt.c.RelLabFchIniActividades < cast(suplencias.c.SuplFchAlta,Date)) \
        )
    df_primera_suplencia = pd.read_sql_query(query_primera_suplencia,
                                             engine,
                                             params=parametros)
    df_primera_suplencia.loc[:, 'Origen'] = ['df_primera_suplencia']

    # (3) altas inyectadas en la bandeja para el período de licencia si es Junta Médica o Pase en Comisión
    query_alta_suplencia_paga = sa. \
        select([rlt.c.PersonalPerId,puestos.c.PuestoFuncionId,rlt.c.RelLabId,rlt.c.RelLabDesignCaracter,rlt.c.RelLabCicloPago,func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(cast(suplencias.c.SuplFchAlta,Date),1)).label('RelLabFchIniActividades'),func.IFNULL(rls.c.RelLabCeseFchReal,rlt.c.RelLabFchFinPrevista).label('RelLabCeseFchReal'),literal_column('50').label('CauBajCod'),silla_grupo_materia.c.GrupoMateriaId,sillas.c.TurnoId,sillas.c.SillaDependId,funciones_relacion_laboral.c.FuncRelLabCantHrs,sillas.c.FuncionId,rlt.c.RelLabAnulada,puestos.c.PuestoAsignId,asignaturas_materias.c.AsignId,suplencias.c.SuplCausId]). \
        select_from(
            jsupl.
            join(j3, ((rlt.c.RelLabId==suplencias.c.RelLabId) & (rlt.c.RelLabAnulada==0))) \
        ). \
        where( \
            (rlt.c.RelLabFchIniActividades >= sa.bindparam('inicioLectivo')) & \
            (rlt.c.PersonalPerId.in_(set_perids_novedades)) & \
            (puestos.c.PuestoFuncionId.in_(puestos_funcion)) & \
            (suplencias.c.SuplCausId.in_([16, 17, 162])) & \
            (func.GREATEST(rlt.c.RelLabFchIniActividades,func.ADDDATE(cast(suplencias.c.SuplFchAlta,Date),1)) <= func.IFNULL(rls.c.RelLabCeseFchReal,rlt.c.RelLabFchFinPrevista)) \
        )
    df_alta_suplencia_paga = pd.read_sql_query(query_alta_suplencia_paga,
                                               engine,
                                               params=parametros)
    df_alta_suplencia_paga.loc[:, 'Origen'] = ['df_alta_suplencia_paga']

    # Las Juntas Médicas van con asignatura 162:
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 162,
                               ['AsignId', 'CauBajCod']] = [162, 66]
    # Los pases en comisión DENTRO ANEP van con dependencia 8902
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 16,
                               ['SillaDependId', 'CauBajCod']] = [8902, 66]
    # Los pases en comisión FUERA SECUN van con dependencia 8901
    df_alta_suplencia_paga.loc[df_alta_suplencia_paga['SuplCausId'] == 17,
                               ['SillaDependId', 'CauBajCod']] = [8901, 66]

    del df_alta_suplencia_paga['SuplCausId']

    df_historia_completa = pd.concat([
        df_historia_rl, df_primera_suplencia, df_alta_luego_de_suplencia,
        df_alta_suplencia_paga
    ],
                                     axis=0)
    df_historia_completa = df_historia_completa.rename(
        columns={
            'RelLabFchIniActividades': 'falta',
            'RelLabCeseFchReal': 'fcese',
            'SillaDependId': 'dependid'
        })

    df_historia_completa = df_historia_completa.reset_index(drop=True)
    df_historia_completa.merge(df_anulaciones_periodo,
                               on='RelLabId',
                               how='left')
    df_anulaciones_a_eliminar = df_anulaciones_periodo[
        df_anulaciones_periodo['RelLabId'].isin(df_novedades['RelLabId'])]
    # Elimino los anulaciones de la historia
    df_historia_completa = df_historia_completa[
        df_historia_completa['RelLabId'].isin(
            df_anulaciones_a_eliminar['RelLabId']) == False]

    # obtengo los datos de las personas
    query_personas = sa.select([
        personas.c.PerId.label('PersonalPerId'), personas_documentos.c.PerDocId
    ]).select_from(personas.join(personas_documentos)).where(
        (personas_documentos.c.PaisCod == 'UY')
        & (personas_documentos.c.DocCod == 'CI')
        & (personas.c.PerId.in_(set_perids_novedades)))
    df_personas = pd.read_sql_query(query_personas, engine, params=parametros)
    df_historia_completa = df_historia_completa.merge(df_personas,
                                                      on='PersonalPerId',
                                                      how='left')

    # agrego asignatura 151 a todos los que no la tienen
    df_historia_completa.loc[(
        (df_historia_completa['AsignId'].isnull()) &
        (df_historia_completa['PuestoAsignId'].notnull())),
                             'AsignId'] = df_historia_completa['PuestoAsignId']
    df_historia_completa.loc[(df_historia_completa['AsignId'].isnull()),
                             'AsignId'] = cfg['asignid_otros']

    df_historia_completa = df_historia_completa.loc[:, [
        'PerDocId', 'dependid', 'AsignId', 'RelLabCicloPago',
        'RelLabDesignCaracter', 'FuncRelLabCantHrs', 'falta', 'fcese',
        'CauBajCod', 'GrupoMateriaId', 'FuncionId', 'RelLabAnulada',
        'PersonalPerId', 'RelLabId'
    ]]

    # atributos hardcoded
    df_historia_completa['PerDocTpo'] = 'DO'
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_hap.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_horas_apoyo']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_hap.tolist()),
        'AsignId'] = cfg['asignid_horas_apoyo']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POB.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_pob']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POB.tolist()),
        'AsignId'] = cfg['asignid_pob']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POP.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_pop']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_POP.tolist()),
        'AsignId'] = cfg['asignid_pop']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_68.tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_68']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_68.tolist()),
        'AsignId'] = cfg['asignid_68']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_funciones_talleristas.tolist(
        )), 'RelLabDesignCaracter'] = cfg['caracter_talleristas']
    df_historia_completa.loc[df_historia_completa['FuncionId'].
                             isin(df_funciones_talleristas.tolist()),
                             'AsignId'] = cfg['asignid_talleristas']
    df_historia_completa.loc[
        df_historia_completa['FuncionId'].isin(df_coordinadores_especiales.
                                               tolist()),
        'RelLabDesignCaracter'] = cfg['caracter_especiales']
    df_historia_completa.loc[df_historia_completa['FuncionId'].
                             isin(df_coordinadores_especiales.tolist()),
                             'AsignId'] = cfg['asignid_especiales']
    df_historia_completa.loc[(df_historia_completa['AsignId'] == 75) &
                             (df_historia_completa['fcese'].notnull()),
                             'CauBajCod'] = cfg['causal_coordinacion']
    df_historia_completa.loc[(df_historia_completa['RelLabAnulada'] == 1),
                             'CauBajCod'] = cfg['causal_anulacion']
    df_historia_completa['PerDocPaisCod'] = 'UY'
    df_historia_completa['HorClaCurTpo'] = ''
    df_historia_completa['HorClaCur'] = ''
    df_historia_completa['HorClaArea'] = ''
    df_historia_completa['HorClaAnio'] = 0
    df_historia_completa['HorClaHorTope'] = 0
    df_historia_completa['HorClaObs'] = ''
    df_historia_completa['HorClaNumInt'] = 0
    df_historia_completa['HorClaParPreCod'] = 0
    df_historia_completa['HorClaCompPor'] = 0
    df_historia_completa['HorClaCompPor'] = 0
    df_historia_completa['HorClaLote'] = 0
    df_historia_completa['HorClaAudUsu'] = 0
    df_historia_completa['HorClaMod'] = 0
    df_historia_completa['HorClaEmpCod'] = 1
    df_historia_completa['HorClaCarNum'] = 0
    df_historia_completa['DesFchCarga'] = date.today()
    df_historia_completa['Resultado'] = 'PE'
    df_historia_completa['Mensaje'] = ''
    df_historia_completa['HorClaFchLib'] = df_historia_completa['fcese']
    df_historia_completa.loc[(df_historia_completa['CauBajCod'].isnull()),
                             'CauBajCod'] = 0

    del df_historia_completa['FuncionId']
    del df_historia_completa['PersonalPerId']

    #Transformacion de la dependencia a Siap
    df_historia_completa = df_historia_completa.merge(df_tabla_institucional)
    del df_historia_completa[
        'dependid']  #borro la dependencia ya que voy a usar la dependidSiap

    # filtro los que tienen fcese < falta
    df_historia_completa = df_historia_completa.loc[
        (df_historia_completa['fcese'] >= df_historia_completa['falta']) |
        (df_historia_completa['fcese'].isnull())]

    # filtro los que tienen falta >= tope
    df_historia_completa = df_historia_completa.loc[
        df_historia_completa['falta'] < parametros['tope']]

    if ci != None:  # si me pasaron una ci como parametro filtro la historia solo para esa ci.
        df_historia_completa = df_historia_completa.loc[
            df_historia_completa['PerDocId'] == ci]

    # Le pongo los nombres de los campos que corresponden a la tabla ihorasclase de siap
    df_historia_completa = df_historia_completa.rename(
        columns={
            'PerDocId': 'PerDocNum',
            'RelLabDesignCaracter': 'HorClaCar',
            'RelLabCicloPago': 'HorClaCic',
            'falta': 'HorClaFchPos',
            'fcese': 'HorClaFchCese',
            'CauBajCod': 'HorClaCauBajCod',
            'GrupoMateriaId': 'HorClaGrupo',
            'dependidSiap': 'HorClaInsCod',
            'FuncRelLabCantHrs': 'HorClaHor',
            'AsignId': 'HorClaAsiCod',
            'RelLabAnulada': 'HorClaBajLog'
        })

    df_historia_completa.to_sql(name='ihorasclase',
                                con=engine_bandeja_out,
                                if_exists='append',
                                index=False)
예제 #45
0
def create_obsquery_from_form(form):
    print('\n\nCREATING OBSQUERY FROM FORM\n\n')
    query = db.session.query(Obs)
    joinlist = {}
    warnings = []
    print(dir(form))
    # Handle date stuff:
    if form.singleday.data and form.date_obs.data:
        day = form.date_obs.data.date()
        day_end = day + datetime.timedelta(days=1)
        query = query.filter(Obs.date_obs >= day).filter(
            Obs.date_obs < day_end)
        if form.date_end.data:
            warnings += [
                'Cannot specify both end date and singleday; end date has been ignored.'
            ]

    elif form.date_obs.data:
        query = query.filter(Obs.date_obs >= form.date_obs.data)
    elif form.date_end.data:
        query = query.filter(Obs.date_end <= form.date_end.data)
        if form.data.singleday:
            warnings += [
                'Cannot specify both end date and singleday; end date has been ignored.'
            ]

    # Handle time of day.
    if form.starttime.data:
        starttime = form.starttime.data
        if not form.utc.data == 'utc':
            starttime = datetime.datetime.combine(datetime.date.today(), starttime) +\
                datetime.timedelta(hours=-10)
            starttime = starttime.time()
        query = query.filter(cast(Obs.date_obs, Time) >= starttime)
    if form.endtime.data:
        endtime = form.endtime.data
        if not form.utc.data:
            endtime = datetime.datetime.combine(datetime.date.today(), endtime) +\
                datetime.timedelta(hours=-10)
            endtime = endtime.time()
        query = query.filter(cast(Obs.date_obs, Time) >= endtime)

    # Handle start/end frequency
    if form.frequency_start.data:
        query = query.filter(Acsis.restfreq >= form.frequency_start.data)
        joinlist += [(Acsis, Obs.obsid == Acsis.obsid)]
    if form.frequency_end.data:
        query = query.filter(Acsis.restfreq <= form.frequency_end.data)
        if 'Acsis' not in joinlist:
            joinlist['Acsis'] = (Acsis, Obs.obsid == Acsis.obsid)

    for key in ('obsid', 'project', 'object'):
        values = form.data[key]
        the_attr = getattr(Obs, key)
        if values is not None and values != '':
            query = query.filter(the_attr == values)

    # Handl instrument: complex, need to handl pol-2?
    print('instrument data!', form.instrument.data)
    if form.instrument.data and form.instrument.data != []:
        print('In instrument handling!')
        values = form.data['instrument'].copy()
        theattr = Obs.instrume
        if values and len(values) > 0:
            print('Values were selected!')
            conditions = []
            if 'SCUBA2' in values:
                values.remove('SCUBA2')
                conditions += [
                    and_(Obs.instrume == 'SCUBA-2',
                         or_(Obs.inbeam == None, Obs.inbeam.notlike('%pol%')))
                ]
            if 'POL2' in values:
                values.remove('POL2')
                conditions += [
                    and_(Obs.instrume == 'SCUBA-2', Obs.inbeam.like('%pol%'))
                ]
            conditions += [Obs.instrume.in_(values)]
            query = query.filter(or_(*conditions))
            print(conditions)

    if form.ompstatus.data and form.ompstatus.data != []:
        values = [int(i) for i in form.ompstatus.data]
        query = query.join(Obs.latest_ompcomment).filter(
            omp.obslog.commentstatus.in_(values))
    for key in ('sw_mode', 'obs_type'):
        values = form.data[key]
        the_attr = getattr(Obs, key)
        if key == 'ompstatus' and values:
            values = [int(i) for i in values]
        if values and len(values) > 0:
            query = query.filter(the_attr.in_(values))

    if form.scanmode.data:
        values = form.scanmode.data
        if values is not None and values != []:
            value_lists = [scanpattern_lookup.get(v, [v]) for v in values]
            value_lists_flat = [i for v in value_lists for i in v]
            query = query.filter(Obs.scanmode.in_(value_lists_flat))
    if form.molecule.data:
        values = form.molecule.data
        if values is not None and values != []:
            or_list = []
            for v in values:
                mole, transit = v.split(';')
                transit = transit.replace('-', '%-%')
                or_list.append(
                    and_(Acsis.molecule == mole, Acsis.transiti.like(transit)))
            query = query.filter(or_(*or_list))
            if 'Acsis' not in joinlist:
                joinlist['Acsis'] = (Acsis, Obs.obsid == Acsis.obsid)

    if form.bwmode.data and form.bwmode.data != []:
        or_list = None
        values = form.bwmode.data
        values_notother = [i for i in values if i != 'other']
        if 'other' in values:
            or_list = Acsis.bwmode.notin_(
                [i[0] for i in het_bandwidth if i[0] != 'other'])
        if 'Acsis' not in joinlist:
            joinlist['Acsis'] = (Acsis, Obs.obsid == Acsis.obsid)
        if or_list is not None and values_notother != []:
            query = query.filter(
                or_(Acsis.bwmode.in_(values_notother), or_list))
        elif or_list is not None:
            query = query.filter(or_(or_list))
        else:
            query = query.filter(Acsis.bwmode.in_(values_notother))
    if form.semester.data and form.semester.data != '':
        query = query.filter(Project.semester == form.semester.data.upper())
        if 'Project' not in joinlist:
            joinlist['Project'] = (Project, Obs.project == Project.projectid)

    if form.state.data:
        query = query.filter(Project.state == True)
        if 'Project' not in joinlist:
            joinlist['Project'] = (Project, Obs.project == Project.projectid)
    if form.fop.data and form.fop.data != '':
        query = query.filter(ProjUser.userid == form.fop.data.upper())
        query = query.filter(ProjUser.capacity == 'Support')
        if 'ProjUser' not in joinlist:
            joinlist['ProjUser'] = (ProjUser,
                                    Obs.project == ProjUser.projectid)

    if form.country.data and form.country.data != '':
        query = query.filter(ProjQueue.country == form.country.data.upper())
        if 'ProjQueue' not in joinlist:
            joinlist['ProjQueu'] = (ProjQueue,
                                    Obs.project == ProjQueue.projectid)

    print('joinlist!', joinlist)
    for j in joinlist:
        query = query.join(joinlist[j])
    return query
    def warriors(self, name):
        try:
            c.warrior = db.get_by_name_query(tables.ConquestWarrior,
                                             name).one()
        except NoResultFound:
            return self._not_found()

        c.prev_warrior, c.next_warrior = self._prev_next_id(
            c.warrior, tables.ConquestWarrior, 'id')

        c.rank_count = len(c.warrior.ranks)

        c.perfect_links = (c.warrior.ranks[-1].max_links.filter_by(
            max_link=100).join(tables.PokemonSpecies).order_by(
                tables.PokemonSpecies.conquest_order).all())

        ### Stats
        # Percentiles!  Percentiles are hard.
        stats = tables.ConquestWarriorRankStatMap
        all_stats = sqla.orm.aliased(tables.ConquestWarriorRankStatMap)

        # We need this to be a float so the percentile equation can divide by it
        stat_count = sqla.cast(sqla.func.count(all_stats.base_stat),
                               sqla.types.FLOAT)

        # Grab all of a rank's stats, and also get percentiles
        stat_q = (db.pokedex_session.query(
            stats.warrior_stat_id, stats.base_stat).join(
                all_stats,
                stats.warrior_stat_id == all_stats.warrior_stat_id).group_by(
                    stats.warrior_rank_id, stats.warrior_stat_id, stats.
                    base_stat).order_by(stats.warrior_stat_id).add_columns(
                        sqla.func.sum(
                            sqla.cast(stats.base_stat > all_stats.base_stat,
                                      sqla.types.INT)) / stat_count +
                        sqla.func.sum(
                            sqla.cast(stats.base_stat == all_stats.base_stat,
                                      sqla.types.INT)) / stat_count / 2))

        # XXX There's probably a better way to query all the names
        stat_names = [
            stat.name
            for stat in db.pokedex_session.query(tables.ConquestWarriorStat).
            order_by(tables.ConquestWarriorStat.id).all()
        ]

        # Go through the query for each rank
        c.stats = []
        for rank in c.warrior.ranks:
            c.stats.append([])
            info = stat_q.filter(stats.warrior_rank_id == rank.id).all()

            # We need a bit more info than what the query directly provides
            for stat, value, percentile in info:
                percentile = float(percentile)
                c.stats[-1].append(
                    (stat_names[stat - 1], value, percentile,
                     bar_color(percentile, 0.9), bar_color(percentile, 0.8)))

        ### Max links
        default_link = 70 if c.warrior.archetype else 90

        c.link_form = LinkThresholdForm(request.params, link=default_link)
        c.link_form.validate()

        link_pokemon = (db.pokedex_session.query(
            tables.ConquestMaxLink.pokemon_species_id).filter(
                tables.ConquestMaxLink.warrior_rank_id ==
                c.warrior.ranks[-1].id).filter(
                    tables.ConquestMaxLink.max_link >= c.link_form.link.data))

        max_links = []
        for rank in c.warrior.ranks:
            max_links.append(
                rank.max_links.filter(
                    tables.ConquestMaxLink.pokemon_species_id.in_(link_pokemon)
                ).join(tables.PokemonSpecies).order_by(
                    tables.PokemonSpecies.conquest_order).options(
                        sqla.orm.joinedload('pokemon'),
                        sqla.orm.subqueryload('pokemon.conquest_abilities'),
                        sqla.orm.subqueryload('pokemon.conquest_stats'),
                    ).all())

        c.max_links = izip(*max_links)

        return render('/pokedex/conquest/warrior.mako')
예제 #47
0
    def create_sqla_query(self,
                          session=None,
                          additional_model_attributes=[],
                          additional_filters={},
                          json_column=None):
        """
        Returns a database query that fully describes the filters.

        The logic for construction of syntax describing a filter for key is dependent on whether the key has been previously coerced to a model attribute (i.e. key
        is a table column).

        :param session: The database session.
        :param additional_model_attributes: Additional model attributes to retrieve.
        :param additional_filters: Additional filters to be applied to all clauses.
        :param json_column: Column to be checked if filter key has not been coerced to a model attribute. Only valid if engine instantiated with strict_coerce=False.
        :returns: A database query.
        :raises: FilterEngineGenericError
        """
        all_model_attributes = set(self.mandatory_model_attributes +
                                   additional_model_attributes)

        # Add additional filters, applied as AND clauses to each OR group.
        for or_group in self._filters:
            for _filter in additional_filters:
                or_group.append(list(_filter))

        or_expressions = []
        for or_group in self._filters:
            and_expressions = []
            for and_group in or_group:
                key, oper, value = and_group
                if isinstance(key,
                              sqlalchemy.orm.attributes.InstrumentedAttribute
                              ):  # -> this key filters on a table column.
                    if isinstance(value, str) and any(
                        [char in value for char in ['*', '%']]):  # wildcards
                        if value in (
                                '*', '%', u'*', u'%'
                        ):  # match wildcard exactly == no filtering on key
                            continue
                        else:  # partial match with wildcard == like || notlike
                            if oper == operator.eq:
                                expression = key.like(
                                    value.replace('*', '%').replace('_', '\_'),
                                    escape='\\')  # NOQA: W605
                            elif oper == operator.ne:
                                expression = key.notlike(
                                    value.replace('*', '%').replace('_', '\_'),
                                    escape='\\')  # NOQA: W605
                    else:
                        expression = oper(key, value)
                    if oper == operator.ne:  # set .ne operator to include NULLs.
                        expression = or_(expression, key.is_(None))
                elif json_column:  # -> this key filters on the content of a json column
                    if session.bind.dialect.name == 'oracle':
                        # Compiling for Oracle yields a UnsupportedCompilationError so fall back to support for equality only.
                        # TODO
                        if oper != operator.eq:
                            raise exception.FilterEngineGenericError(
                                "Oracle implementation does not support this operator."
                            )
                        if isinstance(value, str) and any(
                            [char in value for char in ['*', '%']]):
                            raise exception.FilterEngineGenericError(
                                "Oracle implementation does not support wildcards."
                            )
                        expression = text(
                            "json_exists(meta,'$?(@.{} == \"{}\")')".format(
                                key, value))
                    else:
                        if isinstance(value, str) and any(
                            [char in value
                             for char in ['*', '%']]):  # wildcards
                            if value in (
                                    '*', '%', u'*', u'%'
                            ):  # match wildcard exactly == no filtering on key
                                continue
                            else:  # partial match with wildcard == like || notlike
                                if oper == operator.eq:
                                    expression = json_column[key].as_string(
                                    ).like(value.replace('*', '%').replace(
                                        '_', '\_'),
                                           escape='\\')  # NOQA: W605
                                elif oper == operator.ne:
                                    expression = json_column[key].as_string(
                                    ).notlike(value.replace('*', '%').replace(
                                        '_', '\_'),
                                              escape='\\')  # NOQA: W605
                        else:
                            # Infer what type key should be cast to from typecasting the value in the expression.
                            try:
                                if isinstance(
                                        value, int
                                ):  # this could be bool or int (as bool subclass of int)
                                    if type(value) == bool:
                                        expression = oper(
                                            json_column[key].as_boolean(),
                                            value)
                                    else:
                                        expression = oper(
                                            json_column[key].as_float(), value
                                        )  # cast as float, not integer, to avoid potentially losing precision in key
                                elif isinstance(value, float):
                                    expression = oper(
                                        json_column[key].as_float(), value)
                                elif isinstance(value, datetime):
                                    expression = oper(
                                        cast(
                                            cast(json_column[key],
                                                 sqlalchemy.types.Text),
                                            sqlalchemy.types.DateTime), value)
                                else:
                                    expression = oper(
                                        json_column[key].as_string(), value)
                            except Exception as e:
                                raise exception.FilterEngineGenericError(e)
                            if oper == operator.ne:  # set .ne operator to include NULLs.
                                expression = or_(expression,
                                                 json_column[key].is_(None))
                else:
                    raise exception.FilterEngineGenericError(
                        "Requested filter on key without model attribute, but [json_column] not set."
                    )

                and_expressions.append(expression)
            or_expressions.append(and_(*and_expressions))
        return session.query(*all_model_attributes).filter(
            or_(*or_expressions))
 def __eq__(self, other):
     return self._case() == cast(other, String)
예제 #49
0
from sqlalchemy_utils import get_hybrid_properties
from sqlalchemy_utils.functions import cast_if, get_mapper
from sqlalchemy_utils.functions.orm import get_all_descriptors
from sqlalchemy_utils.relationships import (path_to_relationships,
                                            select_correlated_expression)

from .exc import (IdPropertyNotFound, InvalidField, UnknownField,
                  UnknownFieldKey, UnknownModel)
from .hybrids import CompositeId
from .utils import (adapt, chain_if, get_attrs, get_descriptor_columns,
                    get_selectable, s, subpaths)

Parameters = namedtuple('Parameters',
                        ['fields', 'include', 'sort', 'offset', 'limit'])

json_array = sa.cast(postgresql.array([], type_=JSON), postgresql.ARRAY(JSON))
jsonb_array = sa.cast(postgresql.array([], type_=JSONB),
                      postgresql.ARRAY(JSONB))

RESERVED_KEYWORDS = (
    'id',
    'type',
)


class ResourceRegistry(object):
    def __init__(self, model_mapping):
        self.by_type = model_mapping
        self.by_model_class = dict(
            (value, key) for key, value in model_mapping.items())
예제 #50
0
def generate_schedule(arch):
    """ the schedule pages are very different than others index pages """
    log.info('Building the schedule index page for ' + arch + '...')
    title = 'Packages currently scheduled on ' + arch + ' for testing for build reproducibility'

    # 'AND h.name=s.name AND h.suite=s.suite AND h.architecture=s.architecture'
    # in this query and the query below is needed due to not using package_id
    # in the stats_build table, which should be fixed...
    averagesql = select([
        func.coalesce(func.avg(cast(stats_build.c.build_duration, Integer)), 0)
    ]).where(
        and_(
            stats_build.c.status.in_(('reproducible', 'unreproducible')),
            stats_build.c.name == sources.c.name,
            stats_build.c.suite == sources.c.suite,
            stats_build.c.architecture == sources.c.architecture,
        )).as_scalar()

    query = select([
        schedule.c.date_scheduled, sources.c.suite, sources.c.architecture,
        sources.c.name, results.c.status, results.c.build_duration, averagesql
    ]).select_from(sources.join(schedule).join(results, isouter=True)).where(
        and_(
            schedule.c.date_build_started == None,
            sources.c.architecture == bindparam('arch'),
        )).order_by(schedule.c.date_scheduled)

    text = Template(
        '$tot packages are currently scheduled for testing on $arch:')
    html = ''
    rows = query_db(query.params({'arch': arch}))
    html += build_leading_text_section({'text': text}, rows, defaultsuite,
                                       arch)
    html += generate_live_status_table(arch)
    html += '<p><table class="scheduled">\n' + tab
    html += '<tr><th class="center">#</th><th class="center">scheduled at</th><th class="center">suite</th>'
    html += '<th class="center">arch</th><th class="center">source package</th><th class="center">previous build status</th><th class="center">previous build duration</th><th class="center">average build duration</th></tr>\n'
    for row in rows:
        # 0: date_scheduled, 1: suite, 2: arch, 3: pkg name 4: previous status 5: previous build duration 6. avg build duration
        pkg = row[3]
        duration = convert_into_hms_string(row[5])
        avg_duration = convert_into_hms_string(row[6])
        html += tab + '<tr><td>&nbsp;</td><td>' + row[0] + '</td>'
        html += '<td>' + row[1] + '</td><td>' + row[2] + '</td><td><code>'
        html += Package(pkg).html_link(row[1], row[2])
        html += '</code></td><td>' + convert_into_status_html(
            str(row[4])
        ) + '</td><td>' + duration + '</td><td>' + avg_duration + '</td></tr>\n'
    html += '</table></p>\n'
    destfile = DISTRO_BASE + '/index_' + arch + '_scheduled.html'
    desturl = DISTRO_URL + '/index_' + arch + '_scheduled.html'
    suite_arch_nav_template = DISTRO_URI + '/index_{{arch}}_scheduled.html'
    left_nav_html = create_main_navigation(
        arch=arch,
        no_suite=True,
        displayed_page='scheduled',
        suite_arch_nav_template=suite_arch_nav_template)
    write_html_page(title=title,
                    body=html,
                    destfile=destfile,
                    style_note=True,
                    refresh_every=60,
                    left_nav_html=left_nav_html)
    log.info("Page generated at " + desturl)
예제 #51
0
    def test_unsupported_casts(self, type_, expected):

        t = sql.table("t", sql.column("col"))
        with expect_warnings(
                "Datatype .* does not support CAST on MySQL/MariaDb;"):
            self.assert_compile(cast(t.c.col, type_), expected)
예제 #52
0
 def build_included_json_object(self, alias, fields):
     return sa.cast(
         sa.func.json_build_object(
             *self.build_single_included_fields(alias, fields)),
         JSONB).label('included')
예제 #53
0
    def test_cast_literal_bind(self):
        expr = cast(column("foo", Integer) + 5, Integer())

        self.assert_compile(expr,
                            "CAST(foo + 5 AS SIGNED INTEGER)",
                            literal_binds=True)
예제 #54
0
 def test_cast_grouped_expression_non_castable(self):
     with expect_warnings(
             "Datatype FLOAT does not support CAST on MySQL/MariaDb;"):
         self.assert_compile(cast(sql.column("x") + sql.column("y"), Float),
                             "(x + y)")
예제 #55
0
def alter_column(conn, table, column_name, func, schema=None):
    """
    Run given callable against given table and given column in activity table
    jsonb data columns. This function is useful when you want to reflect type
    changes in your schema to activity table.

    In the following example we change the data type of User's age column from
    string to integer.


    ::

        from alembic import op
        from postgresql_audit import alter_column


        def upgrade():
            op.alter_column(
                'user',
                'age',
                type_=sa.Integer
            )

            alter_column(
                op,
                'user',
                'age',
                lambda value, activity_table: sa.cast(value, sa.Integer)
            )


    :param conn:
        An object that is able to execute SQL (either SQLAlchemy Connection,
        Engine or Alembic Operations object)
    :param table:
        The table to run the column name changes against
    :param column_name:
        Name of the column to run callable against
    :param func:
        A callable to run against specific column in activity table jsonb data
        columns. The callable should take two parameters the jsonb value
        corresponding to given column_name and activity table object.
    :param schema:
        Optional name of schema to use.
    """
    activity_table = get_activity_table(schema=schema)
    query = (
        activity_table
        .update()
        .values(
            old_data=(
                activity_table.c.old_data +
                sa.cast(sa.func.json_build_object(
                    column_name,
                    func(
                        activity_table.c.old_data[column_name],
                        activity_table
                    )
                ), JSONB)
            ),
            changed_data=(
                activity_table.c.changed_data +
                sa.cast(sa.func.json_build_object(
                    column_name,
                    func(
                        activity_table.c.changed_data[column_name],
                        activity_table
                    )
                ), JSONB)
            )
        )
        .where(activity_table.c.table_name == table)
    )
    return conn.execute(query)
예제 #56
0
    def test_unsupported_cast_literal_bind(self):
        expr = cast(column("foo", Integer) + 5, Float)

        with expect_warnings(
                "Datatype FLOAT does not support CAST on MySQL/MariaDb;"):
            self.assert_compile(expr, "(foo + 5)", literal_binds=True)
예제 #57
0
 def test_clause(self):
     stmt = select(cast("INT_1", type_=self.MyInteger)).scalar_subquery()
     self._run_test(default=stmt)
예제 #58
0
 def test_cast(self, type_, expected):
     t = sql.table("t", sql.column("col"))
     self.assert_compile(cast(t.c.col, type_), expected)
예제 #59
0
    def get_events(self, event_filter, limit=None):
        """Return an iterable of model.Event objects.

        :param event_filter: EventFilter instance
        """
        if limit == 0:
            return
        session = self._engine_facade.get_session()
        with session.begin():
            session.connection(
                execution_options={'isolation_level': self.isolation_level})

            # Build up the join conditions
            event_join_conditions = [
                models.EventType.id == models.Event.event_type_id
            ]

            if event_filter.event_type:
                event_join_conditions.append(
                    models.EventType.desc == event_filter.event_type)

            # Build up the where conditions
            event_filter_conditions = []
            if event_filter.message_id:
                event_filter_conditions.append(
                    models.Event.message_id == event_filter.message_id)
            if event_filter.start_timestamp:
                event_filter_conditions.append(
                    models.Event.generated >= event_filter.start_timestamp)
            if event_filter.end_timestamp:
                event_filter_conditions.append(
                    models.Event.generated <= event_filter.end_timestamp)

            trait_subq = None
            # Build trait filter
            if event_filter.traits_filter:
                filters = list(event_filter.traits_filter)
                trait_filter = filters.pop()
                key = trait_filter.pop('key')
                op = trait_filter.pop('op', 'eq')
                trait_type, value = list(trait_filter.items())[0]
                trait_subq, t_model = _build_trait_query(
                    session, trait_type, key, value, op)
                for trait_filter in filters:
                    key = trait_filter.pop('key')
                    op = trait_filter.pop('op', 'eq')
                    trait_type, value = list(trait_filter.items())[0]
                    q, model = _build_trait_query(session, trait_type, key,
                                                  value, op)
                    trait_subq = trait_subq.filter(
                        q.filter(model.event_id == t_model.event_id).exists())
                trait_subq = trait_subq.subquery()

            query = (session.query(models.Event.id).join(
                models.EventType, sa.and_(*event_join_conditions)))
            if trait_subq is not None:
                query = query.join(trait_subq,
                                   trait_subq.c.ev_id == models.Event.id)
            if event_filter_conditions:
                query = query.filter(sa.and_(*event_filter_conditions))

            query = query.order_by(models.Event.generated).limit(limit)
            event_list = {}
            # get a list of all events that match filters
            for (id_, generated, message_id, desc, raw) in query.add_columns(
                    models.Event.generated, models.Event.message_id,
                    models.EventType.desc, models.Event.raw).all():
                event_list[id_] = api_models.Event(message_id, desc, generated,
                                                   [], raw)
            # Query all traits related to events.
            # NOTE (gordc): cast is done because pgsql defaults to TEXT when
            #               handling unknown values such as null.
            trait_q = (session.query(
                models.TraitDatetime.event_id, models.TraitDatetime.key,
                models.TraitDatetime.value, sa.cast(sa.null(), sa.Integer),
                sa.cast(sa.null(), sa.Float(53)),
                sa.cast(sa.null(), sa.String(255))).filter(sa.exists().where(
                    models.TraitDatetime.event_id == query.subquery().c.id))
                       ).union_all(
                           session.query(models.TraitInt.event_id,
                                         models.TraitInt.key, sa.null(),
                                         models.TraitInt.value, sa.null(),
                                         sa.null()).filter(sa.exists().where(
                                             models.TraitInt.event_id ==
                                             query.subquery().c.id)),
                           session.query(models.TraitFloat.event_id,
                                         models.TraitFloat.key, sa.null(),
                                         sa.null(), models.TraitFloat.value,
                                         sa.null()).filter(sa.exists().where(
                                             models.TraitFloat.event_id ==
                                             query.subquery().c.id)),
                           session.query(models.TraitText.event_id,
                                         models.TraitText.key, sa.null(),
                                         sa.null(), sa.null(),
                                         models.TraitText.value).filter(
                                             sa.exists().where(
                                                 models.TraitText.event_id ==
                                                 query.subquery().c.id)))

            for id_, key, t_date, t_int, t_float, t_text in (trait_q.order_by(
                    models.TraitDatetime.key)).all():
                if t_int is not None:
                    dtype = api_models.Trait.INT_TYPE
                    val = t_int
                elif t_float is not None:
                    dtype = api_models.Trait.FLOAT_TYPE
                    val = t_float
                elif t_date is not None:
                    dtype = api_models.Trait.DATETIME_TYPE
                    val = t_date
                else:
                    dtype = api_models.Trait.TEXT_TYPE
                    val = t_text

                try:
                    trait_model = api_models.Trait(key, dtype, val)
                    event_list[id_].append_trait(trait_model)
                except KeyError:
                    LOG.warning('Trait key: %(key)s, val: %(val)s, for event: '
                                '%(event)s not valid.' % {
                                    'key': key,
                                    'val': val,
                                    'event': id_
                                })

            return event_list.values()
예제 #60
0
def add_column(conn, table, column_name, default_value=None, schema=None):
    """
    Adds given column to `activity` table jsonb data columns.

    In the following example we reflect the changes made to our schema to
    activity table.

    ::

        import sqlalchemy as sa
        from alembic import op
        from postgresql_audit import add_column


        def upgrade():
            op.add_column('article', sa.Column('created_at', sa.DateTime()))
            add_column(op, 'article', 'created_at')


    :param conn:
        An object that is able to execute SQL (either SQLAlchemy Connection,
        Engine or Alembic Operations object)
    :param table:
        The table to remove the column from
    :param column_name:
        Name of the column to add
    :param default_value:
        The default value of the column
    :param schema:
        Optional name of schema to use.
    """
    activity_table = get_activity_table(schema=schema)
    data = {column_name: default_value}
    query = (
        activity_table
        .update()
        .values(
            old_data=sa.case(
                [
                    (
                        sa.cast(activity_table.c.old_data, sa.Text) != '{}',
                        activity_table.c.old_data + data
                    ),
                ],
                else_=sa.cast({}, JSONB)
            ),
            changed_data=sa.case(
                [
                    (
                        sa.and_(
                            sa.cast(
                                activity_table.c.changed_data,
                                sa.Text
                            ) != '{}',
                            activity_table.c.verb != 'update'
                        ),
                        activity_table.c.changed_data + data
                    )
                ],
                else_=activity_table.c.changed_data
            ),
        )
        .where(activity_table.c.table_name == table)
    )
    return conn.execute(query)