Exemple #1
0
def _add_ordering(sql_query, table, column_type, column_name, order):
    # Special case for this column, which sorts contigs correctly:
    if column_name == 'contig':
        get_contig_num = cast(
            text("SUBSTRING({} FROM '\d+')".format(table.c.contig)),
            type_=Integer)
        starts_with_chr = (text("SUBSTRING({} FROM '^chr(\d+)')"
                                .format(table.c.contig)) != literal(''))
        starts_with_number = (text("SUBSTRING({} FROM '^\d+')"
                                   .format(table.c.contig)) != literal(''))
        # 10000 used here to mean "should be at the end of all the numbers",
        # assuming we never hit a chromosome number >= 10000.
        contig_num_col = case(
            [(starts_with_chr, get_contig_num),
             (starts_with_number, get_contig_num)],
            else_=literal(10000)
        )
        contig_len_col = func.length(table.c.contig)
        contig_col = table.c.contig
        if order == 'desc':
            contig_len_col = desc(contig_len_col)
            contig_col = desc(contig_col)
        return sql_query.order_by(contig_num_col, contig_len_col, contig_col)
    sqla_type = vcf_type_to_sqla_type(column_type)
    column = cast(table.c[column_name], type_=sqla_type)
    column = {'asc': asc(column), 'desc': desc(column)}.get(order)
    return sql_query.order_by(column)
    def test_ne_operator(self):

        self.assert_compile(
            literal(5) != literal(10),
            '%(param_1)s <> %(param_2)s',
            checkparams={'param_1': 5, 'param_2': 10}
        )
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    unicode = self.unicode
    glyph_box = self.glyph_box
    glyph_stencil = self.glyph_stencil
    alignment_directive = self.alignment_directive
    writer = self.writer
    extra_eq = self.extra_eq
    
    generics_to_stencils = select([
      font_name.c.id.label('id'),
      literal(writer).label('writer'),
      literal(0).label('sub_id'),
      font_name.c.val.label('font_name'),
      font_size.c.val.label('font_size'),
      unicode.c.val.label('unicode'),
      case([(alignment_directive.c.x != None, glyph_box.c.x + (alignment_directive.c.x * glyph_box.c.width))], else_=0).label('x'),
      case([(alignment_directive.c.y != None, glyph_box.c.y + glyph_box.c.height - (alignment_directive.c.y * glyph_box.c.height))], else_=0).label('y'),
    ]).select_from(font_name.outerjoin(alignment_directive, onclause = alignment_directive.c.id == font_name.c.id)).\
        where(safe_eq_comp(font_name.c.id, id)).\
        where(and_(glyph_box.c.name == font_name.c.val,
                  glyph_box.c.unicode == unicode.c.val,
                  font_name.c.id == font_size.c.id,
                  font_name.c.id == unicode.c.id,
                  *extra_eq
                  )).\
    cte(name='generics_to_stencils')

    self.register_stmt(generics_to_stencils)

    self.insert = simple_insert(glyph_stencil, generics_to_stencils)
  def _generate_stmt(self, id) :
    name = self.name
    bar_thickness = self.bar_thickness
    staff_symbol = self.staff_symbol
    staff_space = self.staff_space
    n_lines = self.n_lines
    line_stencil = self.line_stencil
    
    bar_lines_to_stencils = select([
      name.c.id.label('id'),
      literal('bar_line_to_stencil').label('writer'),
      literal(0).label('sub_id'),
      literal(0.0).label('x0'),
      literal(0.0).label('y0'),
      literal(0.0).label('x1'),
      (staff_space.c.val * (n_lines.c.val - 1)).label('y1'),
      bar_thickness.c.val.label('thickness'),
    ]).where(safe_eq_comp(name.c.id, id)).\
    where(and_(name.c.val == 'bar_line',
                  name.c.id == bar_thickness.c.id,
                  name.c.id == staff_symbol.c.id,
                  staff_symbol.c.val == staff_space.c.id,
                  staff_symbol.c.val == n_lines.c.id,
                  )).\
    cte(name='bar_lines_to_stencils')

    self.register_stmt(bar_lines_to_stencils)

    self.insert = simple_insert(line_stencil, bar_lines_to_stencils)
Exemple #5
0
 def construct_search(self, field_name):
     if field_name.startswith('^'):
         return literal(field_name[1:]).startswith
     elif field_name.startswith('='):
         return literal(field_name[1:]).op('=')
     else:
         return literal(field_name).contains
 def limit_clause(self, select, **kw):
     text = ""
     if select._limit is not None and select._offset is None:
         text += "\n LIMIT 0, {}".format(select._limit)
     else:
         text += "\n LIMIT %s, %s" % (
             self.process(sql.literal(select._offset)),
             self.process(sql.literal(select._limit)))
     return text
Exemple #7
0
 def limit_clause(self, select):
     limit, offset = select._limit, select._offset
     if limit:
         if offset:
             return ' \nLIMIT %s, %s' % (
                 self.process(expression.literal(offset)),
                 self.process(expression.literal(limit)))
         return ' \nLIMIT %s' % self.process(expression.literal(limit))
     return ''
Exemple #8
0
def listing(request, *args, **kwargs):
    db = pyfaf.storage.getDatabase()
    params = dict(request.REQUEST)
    params.update(kwargs)
    form = ReportFilterForm(db, params)

    filters = { 'new'       : (lambda q: q.filter(Report.problem_id==None)),
                'processed' : (lambda q: q.filter(Report.problem_id!=None)) }

    states = None
    for s in form.get_status_selection():
        # if 's' isn't in filters exceptions is thrown
        # it is intended behaviour - someone has to take care about it
        subquery = filters[s](db.session.query(
                                Report.id.label('id'),
                                literal(s.upper()).label('status')))
        states = states.union_all(subquery) if states else subquery

    # if list of statuses is empty the states variable is None
    # it means that no reports are to be selected
    # hope that there will never be a Report with id equal to -1
    if not states:
        states = (db.session.query(literal(-1).label('id'),
                                   literal('').label('status')))

    states = states.subquery()

    opsysrelease_id = form.os_release_id
    reports = (db.session.query(Report.id, literal(0).label('rank'),
            states.c.status, Report.first_occurence.label('created'),
            Report.last_occurence.label('last_change'),
            OpSysComponent.name.label('component'), Report.type)
        .join(ReportOpSysRelease)
        .join(OpSysComponent)
        .filter(states.c.id==Report.id)
        .filter((ReportOpSysRelease.opsysrelease_id==opsysrelease_id) |
            (opsysrelease_id==-1))
        .order_by(desc('last_change')))

    component_ids = form.get_component_selection()
    if component_ids:
        reports = reports.filter(Report.component_id.in_(component_ids))

    reports = reports.all()

    i = 1
    for rep in reports:
        rep.rank = i
        i += 1

    reports = paginate(reports, request)
    forward = {'reports' : reports,
               'form'  : form}

    return render_to_response('reports/list.html',
        forward, context_instance=RequestContext(request))
  def _generate_stmt(self, id) :
    #print "@@ON ID", id
    ## ugggh for y_position
    ledger_line = self.ledger_line
    n_lines = self.n_lines
    staff_space = self.staff_space
    staff_symbol = self.staff_symbol
    rhythmic_head_width = self.rhythmic_head_width
    y_position = self.y_position
    line_stencil = self.line_stencil

    ledger_line_to_line_stencil = select([
      ledger_line.c.id.label('id'),
      literal('ledger_line_to_line_stencil').label('writer'),
      literal(0).label('sub_id'),
      literal(-0.6).label('x0'),
      (case([(ledger_line.c.val < 0, staff_space.c.val * n_lines.c.val)], else_ = - staff_space.c.val) - y_position.c.val).label('y0'),
      (rhythmic_head_width.c.val + 1.0).label('x1'),
      (case([(ledger_line.c.val < 0, staff_space.c.val * n_lines.c.val)], else_ = - staff_space.c.val) - y_position.c.val).label('y1'),
      literal(0.13).label('thickness')
    ]).\
    where(safe_eq_comp(ledger_line.c.id, id)).\
    where(func.abs(ledger_line.c.val) > 0).\
    where(n_lines.c.id == staff_symbol.c.val).\
    where(staff_space.c.id == staff_symbol.c.val).\
    where(y_position.c.id == ledger_line.c.id).\
    where(staff_symbol.c.id == ledger_line.c.id).\
    where(rhythmic_head_width.c.id == staff_symbol.c.id).\
    cte(name="ledger_line_to_line_stencil", recursive = True)

    #where(safe_eq_comp(ledger_line.c.id, id))

    self.register_stmt(ledger_line_to_line_stencil)

    ledger_line_to_line_stencil_prev = ledger_line_to_line_stencil.alias(name="ledger_line_to_line_stencil_prev")

    ledger_line_to_line_stencil = ledger_line_to_line_stencil.union_all(
     select([
       ledger_line_to_line_stencil_prev.c.id,
       literal('ledger_line_to_line_stencil'),
       ledger_line_to_line_stencil_prev.c.sub_id + 1,
       ledger_line_to_line_stencil_prev.c.x0,
       ledger_line_to_line_stencil_prev.c.y0 + (staff_space.c.val * - 1.0 * ledger_line.c.val / func.abs(ledger_line.c.val)),
       ledger_line_to_line_stencil_prev.c.x1,
       ledger_line_to_line_stencil_prev.c.y1 + (staff_space.c.val * -1.0 * ledger_line.c.val / func.abs(ledger_line.c.val)),
       ledger_line_to_line_stencil_prev.c.thickness
     ]).\
     where(staff_space.c.id == staff_symbol.c.val).\
     where(staff_symbol.c.id == ledger_line_to_line_stencil_prev.c.id).\
     where(ledger_line_to_line_stencil_prev.c.id == ledger_line.c.id).\
     where(ledger_line_to_line_stencil_prev.c.sub_id < func.abs(ledger_line.c.val) - 1)
    )

    self.register_stmt(ledger_line_to_line_stencil)

    self.insert = simple_insert(line_stencil, ledger_line_to_line_stencil)
Exemple #10
0
    def batch_get_all_groups(self, spec_filters, group_filters, user_group_filters):
        '''
        Get a list of groups by querying the group table and/or the user_group table.
        Take the union of the two results.  This method performs the general query:
        - q0: use spec_filters on the public group
        - q1: use spec_filters and group_filters on group
        - q2: use spec_filters and user_group_filters on user_group
        return union(q0, q1, q2)
        '''
        fetch_cols = [cl_group.c.uuid, cl_group.c.name, cl_group.c.owner_id]
        fetch_cols0 = fetch_cols + [cl_group.c.owner_id.label('user_id'), literal(False).label('is_admin')]
        fetch_cols1 = fetch_cols + [cl_group.c.owner_id.label('user_id'), literal(True).label('is_admin')]
        fetch_cols2 = fetch_cols + [cl_user_group.c.user_id, cl_user_group.c.is_admin]

        q0 = None
        q1 = None
        q2 = None

        if spec_filters:
            spec_clause = self.make_kwargs_clause(cl_group, spec_filters)
            q0 = select(fetch_cols0).where(spec_clause)
            q1 = select(fetch_cols1).where(spec_clause)
            q2 = select(fetch_cols2).where(spec_clause).where(cl_group.c.uuid == cl_user_group.c.group_uuid)
        if True:
            if q0 is None:
                q0 = select(fetch_cols0)
            q0 = q0.where(cl_group.c.uuid == self.public_group_uuid)
        if group_filters:
            group_clause = self.make_kwargs_clause(cl_group, group_filters)
            if q1 is None:
                q1 = select(fetch_cols1)
            q1 = q1.where(group_clause)
        if user_group_filters:
            user_group_clause = self.make_kwargs_clause(cl_user_group, user_group_filters)
            if q2 is None:
                q2 = select(fetch_cols2).where(cl_group.c.uuid == cl_user_group.c.group_uuid)
            q2 = q2.where(user_group_clause)

        # Union
        q0 = union(*filter(lambda q : q is not None, [q0, q1, q2]))

        with self.engine.begin() as connection:
            rows = connection.execute(q0).fetchall()
            if not rows:
                return []
            for i, row in enumerate(rows):
                row = dict(row)
                # TODO: remove these conversions once database schema is changed from int to str
                if isinstance(row['user_id'], int): row['user_id'] = str(row['user_id'])
                if isinstance(row['owner_id'], int): row['owner_id'] = str(row['owner_id'])
                rows[i] = row
            values = {row['uuid']: dict(row) for row in rows}
            return [value for value in values.itervalues()]
Exemple #11
0
    def get(self):
        # pylint: disable=singleton-comparison
        # Cannot use `is` in SQLAlchemy filters

        key = "address:%s" % ["public", "all"][self.deep_visible()]

        value = self.cache.get(key)
        if value:
            self.write(value)
            return

        address_list = self.orm.query(
            Address.address_id,
            func.coalesce(Address.latitude, Address.manual_latitude),
            func.coalesce(Address.longitude, Address.manual_longitude),
        ).filter(func.coalesce(
            Address.latitude, Address.manual_latitude,
            Address.longitude, Address.manual_longitude
        ) != None)

        org_list = address_list \
            .join((org_address,
                   Address.address_id == org_address.c.address_id)) \
            .join((Org, Org.org_id == org_address.c.org_id)) \
            .add_columns(Org.org_id, Org.name, literal("org"))

        event_list = address_list \
            .join((event_address,
                   Address.address_id == event_address.c.address_id)) \
            .join((Event, Event.event_id == event_address.c.event_id)) \
            .add_columns(Event.event_id, Event.name, literal("event"))

        today = datetime.datetime.now().date()
        event_list = event_list.filter(Event.start_date >= today)

        if not (self.moderator and self.deep_visible()):
            org_list = org_list.filter(Org.public == True)
            event_list = event_list.filter(Event.public == True)

        address_list = org_list.union(event_list)

        obj_list = []
        for result in address_list.all():
            obj_list.append(dict(list(zip([
                "address_id", "latitude", "longitude",
                "entity_id", "name", "entity"
            ], result))))

        value = self.dump_json(obj_list)
        self.cache.set(key, value)

        self.write(value)
Exemple #12
0
 def limit_clause(self, select):
     text = ""
     if select._limit is not None and select._offset is None:
         text += "\n LIMIT " + self.process(sql.literal(select._limit))
     elif select._limit is not None and select._offset is not None:
         text += "\n LIMIT %s, %s" % (
             self.process(sql.literal(select._offset)),
             self.process(sql.literal(select._limit)))
     elif select._offset is not None:
         raise exc.CompileError(
             "Cannot compile LIMIT clause, SELECT couldn't have only OFFSET"
             " clause without LIMIT")
     return text
Exemple #13
0
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    dots = self.dots
    dot_width = self.dot_width
    rhythmic_head_width = self.rhythmic_head_width
    rhythmic_event_to_dot_padding = self.rhythmic_event_to_dot_padding
    glyph_stencil = self.glyph_stencil

    rhythmic_event_to_dot_padding_a = rhythmic_event_to_dot_padding.alias(name="rhythmic_event_to_dot_padding_default")

    dots_to_stencil = select([
      dots.c.id.label('id'),
      literal('dots_to_stencil').label('writer'),
      literal(0).label('sub_id'),
      font_name.c.val.label('font_name'),
      font_size.c.val.label('font_size'),
      literal("U+E1E7").label('unicode'),
      (rhythmic_head_width.c.val + case([(rhythmic_event_to_dot_padding.c.val != None, rhythmic_event_to_dot_padding.c.val)], else_ = rhythmic_event_to_dot_padding_a.c.val)).label('x'),
      literal(0).label('y')
    ]).select_from(dots.outerjoin(rhythmic_event_to_dot_padding, onclause = rhythmic_event_to_dot_padding.c.id == dots.c.id)).\
    where(safe_eq_comp(dots.c.id, id)).\
    where(dots.c.id == font_name.c.id).\
    where(dots.c.id == font_size.c.id).\
    where(dots.c.id == rhythmic_head_width.c.id).\
    where(dots.c.val > 0).\
    where(rhythmic_event_to_dot_padding_a.c.id == -1).\
    cte(name='dots_to_stencil', recursive = True)

    self.register_stmt(dots_to_stencil)

    dots_to_stencil_left = dots_to_stencil.alias(name="dots_to_stencil_left")
    
    dots_to_stencil = dots_to_stencil.union_all(
      select([
       dots_to_stencil_left.c.id,
       dots_to_stencil_left.c.writer,
       dots_to_stencil_left.c.sub_id + 1,
       dots_to_stencil_left.c.font_name,
       dots_to_stencil_left.c.font_size,
       dots_to_stencil_left.c.unicode,
       (dots_to_stencil_left.c.x + (1.0 * dot_width.c.val / dots.c.val)),
       literal(0).label('y')
     ]).\
     where(dots.c.id == dot_width.c.id).\
     where(dots.c.id == dots_to_stencil_left.c.id).\
     where(dots.c.val > dots_to_stencil_left.c.sub_id + 1)
    )

    self.insert = simple_insert(glyph_stencil, dots_to_stencil)
Exemple #14
0
    def query_current_year(self, session):
        self.event_name = c.EVENT_NAME_AND_YEAR

        # TODO: we're hacking the timezone info out of ESCHATON (final day of event). probably not the right thing to do
        self.end_date = c.DATES['ESCHATON'].replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None)

        # return registrations where people actually paid money
        # exclude: dealers
        reg_per_day = session.query(
                func.date_trunc(literal('day'), Attendee.registered),
                func.count(func.date_trunc(literal('day'), Attendee.registered))
            ) \
            .outerjoin(Attendee.group) \
            .filter(
                (
                    (Attendee.group_id != None) &
                    (Attendee.paid == c.PAID_BY_GROUP) &  # if they're paid by group
                    (Group.tables == 0) &                 # make sure they aren't dealers
                    (Group.amount_paid > 0)               # make sure they've paid something
                ) | (                                     # OR
                    (Attendee.paid == c.HAS_PAID)         # if they're an attendee, make sure they're fully paid
                )
            ) \
            .group_by(func.date_trunc(literal('day'), Attendee.registered)) \
            .order_by(func.date_trunc(literal('day'), Attendee.registered)) \
            .all()

        # now, convert the query's data into the format we need.
        # SQL will skip days without registrations
        # we need all self.num_days_to_report days to have data, even if it's zero

        # create 365 elements in the final array
        self.registrations_per_day = self.num_days_to_report * [0]

        for reg_data in reg_per_day:
            day = reg_data[0]
            reg_count = reg_data[1]

            day_offset = self.num_days_to_report - (self.end_date - day).days
            day_index = day_offset - 1

            if day_index < 0 or day_index >= self.num_days_to_report:
                log.info('ignoring some analytics data because it\'s not in range of the year before c.ESCHATON. either c.ESCHATON is set incorrectly or you have registrations starting 1 year before ESCHATON, or occuring after ESCHATON. day_index=' + str(day_index))
                continue

            self.registrations_per_day[day_index] = reg_count

        self.compute_cumulative_sum_from_registrations_per_day()
Exemple #15
0
    def auto_movie(self, q):
        orig_q = q
        q = q.lstrip().lower().split(' ')
        full_words, q = q[:-1], q[-1]
        if 'the' in full_words:
            full_words.remove('the')

        target = literal(' ').op('||')(Movie.title)

        filters = []
        for word in full_words:
            filters.append(target.ilike('%% %s %%' % word))

        filters.append(target.ilike('%% %s%%' % q))
        if len(filters) > 1:
            filters = and_(*filters)
        else:
            filters = filters[0]


        res = self.session.query(Movie.id, Movie.title, Rating.rating)\
                  .outerjoin((Rating, and_(Rating.movie_id == Movie.id,
                                           Rating.user == self.user)))\
                  .filter(filters)\
                  .order_by(func.similarity(func.lower(target), orig_q).desc())\
                  .limit(7).all()
        self.return_json(res)
 def wrapped(*args):
     largs = []
     for arg in args:
         if not isinstance(arg, expression.ClauseElement):
             arg = expression.literal(arg)
         largs.append(arg)
     return fn(*largs)
Exemple #17
0
    def is_blocked(cls, uri):
        """Return True if the given URI is blocked."""

        if cls.query.filter(expression.literal(uri).like(cls.uri)).all():
            return True
        else:
            return False
Exemple #18
0
 def __init__(self, name, expr=literal(1), distinct=False):
     if distinct:
         agg = aggregates.count_distinct
     else:
         agg = aggregates.count
     super(CountMeasure, self).__init__(name, expr,
                                            agg=agg)
Exemple #19
0
    def aggregate(self, query, field, rel_table, aggregator_name):
        logger.info('aggregating field %s on %s using %s', field, rel_table, aggregator_name)
        specify_model = datamodel.get_table(field.relatedModelName, strict=True)
        aggregatorNode = self.getAggregatorDef(specify_model, aggregator_name)
        if aggregatorNode is None:
            logger.warn("aggregator is not defined")
            return literal("<Aggregator not defined.>")
        logger.debug("using aggregator: %s", ElementTree.tostring(aggregatorNode))
        formatter_name = aggregatorNode.attrib.get('format', None)
        separator = aggregatorNode.attrib.get('separator', ',')
        order_by = aggregatorNode.attrib.get('orderfieldname', '')

        orm_table = getattr(models, field.relatedModelName)
        order_by = [getattr(orm_table, order_by)] if order_by != '' else []

        join_column = list(inspect(getattr(orm_table, field.otherSideName)).property.local_columns)[0]
        subquery = QueryConstruct(
            collection=query.collection,
            objectformatter=self,
            query=orm.Query([]).select_from(orm_table) \
                             .filter(join_column == getattr(rel_table, rel_table._id)) \
                             .correlate(rel_table)
        )
        subquery, formatted = self.objformat(subquery, orm_table, formatter_name)
        aggregated = blank_nulls(group_concat(formatted, separator, *order_by))
        return subquery.query.add_column(aggregated).as_scalar()
Exemple #20
0
def get_word_data(word):
    """For a given word (expressed as a string) return various information
    about it : its name, the publication who should ignore it, and if
    it is to be ignored by all publications."""
    # Note : There should always be ONE word as a result.
    # Sure, a word can be proper and common at the same time,
    # e.g., mobile (the adjective) and Mobile (the town).
    # However, proper nouns are to be recorded with a capital
    # first letter, whereas common nouns are lowered before
    # being inserted. (See the get_stats function of the analyze module).
    res = db_session.query(Word,
                           Forbidden.word_id,
                           Forbidden.publication_id,
                           Publication.id,
                           Publication.name,
                           Forbidden.publication_id == Publication.id).\
            join(Publication, literal(1) == 1).\
            outerjoin(Forbidden, Forbidden.word_id == Word.id).\
            filter(Word.word == word).all()
    if not res:
        raise NonExistingDataException("Word " + word + " does not exist.")
    else:
        result = {}
        result['forbidden_all'] = res[0][1] is not None and res[0][2] is None
        result['word'] = res[0][0]
        result['publications'] = [{'id' : r[3],
                                   'forbidden' : r[5],
                                   'name' : r[4]} for r in res]
        return result
def get_idea_events(event_filter=None):
    # get a mixture of Comments & comments sorted by submission_date
    q1 = session.query(WFCommentData.submission_date.label('date'),
                       UserData.uid.label('user_uid'),
                       StateData.label.label('event'),
                       IdeaData.id.label('idea_id'))
    q1 = q1.join(WFCommentData.to_state,
                 WFCommentData.created_by,
                 WFCommentData.idea_wf,
                 IdeaWFContextData.idea)

    q2 = session.query(CommentData.submission_date.label('date'),
                       UserData.uid.label('user_uid'),
                       literal(u'COMMENT').label('event'),
                       IdeaData.id.label('idea_id'))
    q2 = q2.join(CommentData.created_by, CommentData.idea,
                 IdeaData.wf_context, IdeaWFContextData.state)
    # mask comments for ideas that are not in a published state
    q2 = q2.filter(StateData.label.in_(get_workflow().get_published_states()))

    q = q1.union(q2)

    # mask ideas that are currently in the dsig_basket_state
    q = q.filter(~IdeaData.id.in_(get_dsig_basket_state_ideas()))

    if event_filter:
        q = q.filter(column('event').in_(event_filter))
    q = q.order_by(desc('date'))
    return q
    def mostFrequentVariableAndValue(self, variableNameList):
        """
    :type variableNameList: list(str)
"""

        subQueryList = []
        
        if len(variableNameList) == 0:
            raise EmptyVariableNameListError()

        with closing(self._sessionMaker()) as session:
            # For each variable, retrieve all possible values and their occurrence count.
            for variableName in variableNameList:
                variableNameColumn = literal(variableName).label(self._VARIABLE_NAME_KEY)
                variableValueColumn = getattr(SQLModsecurityAuditEntryMessage, variableName).label(self._VARIABLE_VALUE_KEY)
                variableValueCountColumn = count().label(self._VARIABLE_VALUE_COUNT_KEY)
                
                # Subquery of each variable.
                subQuery = self._makeQuery(session, [variableNameColumn, variableValueColumn, variableValueCountColumn])
                subQuery = subQuery.group_by(self._VARIABLE_NAME_KEY, self._VARIABLE_VALUE_KEY) 
                subQueryList.append(subQuery)
    
            # Merging all subqueries and sorting by reverse count...
            query = union(*subQueryList).order_by(desc(self._VARIABLE_VALUE_COUNT_KEY)).limit(1)
            query = query.order_by(desc(self._VARIABLE_VALUE_COUNT_KEY)).limit(1)
            
            # ... then picking the first one.
            item = session.execute(query).fetchone()
            
            if item is not None:
                return {str(item.variableName): item.variableValue}
            else:
                return None
Exemple #23
0
def recordset(collection, user, user_agent, recordset_info):
    "Create a record set from the records matched by a query."
    spquery = recordset_info['fromquery']
    tableid = spquery['contexttableid']

    with models.session_context() as session:
        recordset = models.RecordSet()
        recordset.timestampCreated = datetime.now()
        recordset.version = 0
        recordset.collectionMemberId = collection.id
        recordset.dbTableId = tableid
        recordset.name = recordset_info['name']
        if 'remarks' in recordset_info:
            recordset.remarks = recordset_info['remarks']
        recordset.type = 0
        recordset.createdByAgentID = user_agent.id
        recordset.SpecifyUserID = user.id
        session.add(recordset)
        session.flush()
        new_rs_id = recordset.recordSetId

        model = models.models_by_tableid[tableid]
        id_field = getattr(model, model._id)

        field_specs = field_specs_from_json(spquery['fields'])

        query, __ = build_query(session, collection, user, tableid, field_specs)
        query = query.with_entities(id_field, literal(new_rs_id)).distinct()
        RSI = models.RecordSetItem
        ins = insert(RSI).from_select((RSI.recordId, RSI.RecordSetID), query)
        session.execute(ins)

    return new_rs_id
Exemple #24
0
    def list_worksheets(self, owner_id=None):
        '''
        Return a list of row dicts, one per worksheet. These dicts do NOT contain
        ALL worksheet items; this method is meant to make it easy for a user to see
        the currently existing worksheets. Included worksheet items are those that
        define metadata that one will likely want to see in a list view (e.g. title).
        '''
        cols_to_select = [cl_worksheet.c.id,
                          cl_worksheet.c.uuid,
                          cl_worksheet.c.name,
                          cl_worksheet.c.owner_id,
                          cl_group_object_permission.c.permission]
        if owner_id is None:
            # query for public worksheets
            stmt = select(cols_to_select).\
                where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\
                where(cl_group_object_permission.c.group_uuid == self.public_group_uuid)
        else:
            # query for worksheets owned by owner_id
            cols1 = cols_to_select[:4]
            cols1.extend([literal(GROUP_OBJECT_PERMISSION_ALL).label('permission')])
            stmt1 = select(cols1).where(cl_worksheet.c.owner_id == owner_id)
            # query for worksheets visible to owner_id or co-owned by owner_id
            stmt2_groups = select([cl_user_group.c.group_uuid]).\
                where(cl_user_group.c.user_id == owner_id)
            stmt2 = select(cols_to_select).\
                where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\
                where(or_(
                    cl_group_object_permission.c.group_uuid.in_(stmt2_groups),
                    cl_group_object_permission.c.group_uuid == self.public_group_uuid)).\
                where(cl_worksheet.c.owner_id != owner_id)
            stmt = union(stmt1, stmt2)

        with self.engine.begin() as connection:
            rows = connection.execute(stmt).fetchall()
            if not rows:
                return []
            uuids = set(row.uuid for row in rows)
            item_rows = connection.execute(
                cl_worksheet_item.select().\
                where(cl_worksheet_item.c.worksheet_uuid.in_(uuids)).\
                where(or_(
                    cl_worksheet_item.c.type == 'title',
                    cl_worksheet_item.c.type == 'description'))
            ).fetchall()

        row_dicts = [dict(row) for row in sorted(rows, key=lambda item: item['id'])]
        uuid_index_map = {}
        for i in range(0, len(row_dicts)):
            row_dict = row_dicts[i]
            row_dict.update({'items': []})
            uuid_index_map[row_dict['uuid']] = i
        for item_row in item_rows:
            idx = uuid_index_map.get(item_row.worksheet_uuid, -1)
            if idx < 0:
                raise IntegrityError('Got item %s without worksheet' % (item_row,))
            row_dicts[idx]['items'].append(dict(item_row))

        return row_dicts
 def where_clause_fn(id) :
   stmt = select([literal(id).label('id')]).cte(name="anchors", recursive=True)
   stmt_prev = stmt.alias(name='stmt_prev')
   stmt = stmt.union_all(
     select([
       anchor.c.id
     ]).where(anchor.c.val == stmt_prev.c.id)
   )
   return exists(select([stmt.c.id]).where(anchored_table.c.id == stmt.c.id))
Exemple #26
0
    def unreads_exist(cls, user):
        """Returns True or False for the passed user if they have unreads
        that exist or not"""

        q = (Session.query(cls)
                    .filter_by(account_id=user._id, active=True))

        return (Session.query(literal(True)).filter(q.exists()).scalar()
                is not None)
Exemple #27
0
    def objformat(self, query, orm_table, formatter_name, join_cache=None):
        logger.info('formatting %s using %s', orm_table, formatter_name)
        specify_model = datamodel.get_table(inspect(orm_table).class_.__name__, strict=True)
        formatterNode = self.getFormatterDef(specify_model, formatter_name)
        if formatterNode is None:
            logger.warn("no dataobjformatter for %s", specify_model)
            return query, literal("<Formatter not defined.>")
        logger.debug("using dataobjformatter: %s", ElementTree.tostring(formatterNode))

        def case_value_convert(value): return value

        switchNode = formatterNode.find('switch')
        single = switchNode.attrib.get('single', 'true') == 'true'
        if not single:
            sp_control_field = specify_model.get_field(switchNode.attrib['field'])
            if sp_control_field.type == 'java.lang.Boolean':
                def case_value_convert(value): return value == 'true'

        def make_expr(query, fieldNode):
            path = fieldNode.text.split('.')
            query, table, model, specify_field = build_join(query, specify_model, orm_table, path, join_cache)
            if specify_field.is_relationship:
                formatter_name = fieldNode.attrib.get('formatter', None)
                query, expr = self.objformat(query, table, formatter_name, join_cache)
            else:
                expr = self._fieldformat(specify_field, getattr(table, specify_field.name))

            if 'format' in fieldNode.attrib:
                expr = self.pseudo_sprintf(fieldNode.attrib['format'], expr)

            if 'sep' in fieldNode.attrib:
                expr = concat(fieldNode.attrib['sep'], expr)

            return query, coalesce(expr, '')

        def make_case(query, caseNode):
            field_exprs = []
            for node in caseNode.findall('field'):
                query, expr = make_expr(query, node)
                field_exprs.append(expr)

            expr = concat(*field_exprs) if len(field_exprs) > 1 else field_exprs[0]
            return query, case_value_convert(caseNode.attrib.get('value', None)), expr

        cases = []
        for caseNode in switchNode.findall('fields'):
            query, value, expr = make_case(query, caseNode)
            cases.append((value, expr))

        if single:
            value, expr = cases[0]
        else:
            control_field = getattr(orm_table, switchNode.attrib['field'])
            expr = case(cases, control_field)

        return query, coalesce(expr, '')
Exemple #28
0
    def list_worksheets(self, user_id=None):
        '''
        Return a list of row dicts, one per worksheet. These dicts do NOT contain
        ALL worksheet items; this method is meant to make it easy for a user to see
        their existing worksheets.
        '''
        cols_to_select = [cl_worksheet.c.id,
                          cl_worksheet.c.uuid,
                          cl_worksheet.c.name,
                          cl_worksheet.c.owner_id,
                          cl_group_object_permission.c.permission]
        cols1 = cols_to_select[:4]
        cols1.extend([literal(GROUP_OBJECT_PERMISSION_ALL).label('permission')])
        if user_id == self.root_user_id:
            # query all worksheets
            stmt = select(cols1)
        elif user_id is None:
            # query for public worksheets (only used by the webserver when user is not logged in)
            stmt = select(cols_to_select).\
                where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\
                where(cl_group_object_permission.c.group_uuid == self.public_group_uuid)
        else:
            # 1) Worksheets owned by owner_id
            stmt1 = select(cols1).where(cl_worksheet.c.owner_id == user_id)

            # 2) Worksheets visible to owner_id or co-owned by owner_id
            stmt2_groups = select([cl_user_group.c.group_uuid]).\
                where(cl_user_group.c.user_id == user_id)
            # List worksheets where one of our groups has permission.
            stmt2 = select(cols_to_select).\
                where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\
                where(or_(
                    cl_group_object_permission.c.group_uuid.in_(stmt2_groups),
                    cl_group_object_permission.c.group_uuid == self.public_group_uuid)).\
                where(cl_worksheet.c.owner_id != user_id)  # Avoid duplicates

            stmt = union(stmt1, stmt2)

        with self.engine.begin() as connection:
            rows = connection.execute(stmt).fetchall()
            if not rows:
                return []

        # Get permissions of the worksheets
        worksheet_uuids = [row.uuid for row in rows]
        uuid_group_permissions = dict(zip(worksheet_uuids, self.batch_get_group_permissions(worksheet_uuids)))

        # Put the permissions into the worksheets
        row_dicts = []
        for row in sorted(rows, key=lambda item: item['id']):
            row = dict(row)
            row['group_permissions'] = uuid_group_permissions[row['uuid']]
            row_dicts.append(row)

        return row_dicts
Exemple #29
0
 def apply_wildcard(cls, session, domain_name):
     qry = session.query(DomainProfile)
     # Convert stored * to LIKE operator %
     replace_percent = func.regexp_replace(DomainProfile.name,
                                           '^\*', '%')
     # Since the pattern is stored in the database,
     # we swap the usual order of comparison
     qry = qry.filter(literal(domain_name).ilike(replace_percent))
     # Order by length, so that we return the most-specific name
     qry = qry.order_by(func.length(DomainProfile.name))
     return qry.first()
Exemple #30
0
 def where_clause(self, expression, values=None):
     values = values or {}
     if isinstance(expression, expr.ClauseElement):
         return expression
     elif expression is None:
         return None
     tree = _compile(expression)
     where_clause = self._walk(values, tree)
     if not isinstance(where_clause, expr.ClauseElement):
         where_clause = expr.literal(where_clause)
     return where_clause
Exemple #31
0
    def write_dag(cls,
                  dag: DAG,
                  min_update_interval: Optional[int] = None,
                  session: Session = None) -> bool:
        """Serializes a DAG and writes it into database.
        If the record already exists, it checks if the Serialized DAG changed or not. If it is
        changed, it updates the record, ignores otherwise.

        :param dag: a DAG to be written into database
        :param min_update_interval: minimal interval in seconds to update serialized DAG
        :param session: ORM Session

        :returns: Boolean indicating if the DAG was written to the DB
        """
        # Checks if (Current Time - Time when the DAG was written to DB) < min_update_interval
        # If Yes, does nothing
        # If No or the DAG does not exists, updates / writes Serialized DAG to DB
        if min_update_interval is not None:
            if (session.query(literal(True)).filter(
                    and_(
                        cls.dag_id == dag.dag_id,
                        (timezone.utcnow() -
                         timedelta(seconds=min_update_interval)) <
                        cls.last_updated,
                    )).first() is not None):
                # TODO: .first() is not None can be changed to .scalar() once we update to sqlalchemy 1.4+
                # as the associated sqlalchemy bug for MySQL was fixed
                # related issue : https://github.com/sqlalchemy/sqlalchemy/issues/5481
                return False

        log.debug("Checking if DAG (%s) changed", dag.dag_id)
        new_serialized_dag = cls(dag)
        serialized_dag_hash_from_db = session.query(
            cls.dag_hash).filter(cls.dag_id == dag.dag_id).scalar()

        if serialized_dag_hash_from_db == new_serialized_dag.dag_hash:
            log.debug(
                "Serialized DAG (%s) is unchanged. Skipping writing to DB",
                dag.dag_id)
            return False

        log.debug("Writing Serialized DAG: %s to the DB", dag.dag_id)
        session.merge(new_serialized_dag)
        log.debug("DAG: %s written to the DB", dag.dag_id)
        return True
Exemple #32
0
def legacy_select_events_context_id(start_day: dt, end_day: dt,
                                    context_id: str) -> Select:
    """Generate a legacy events context id select that also joins states."""
    # This can be removed once we no longer have event_ids in the states table
    return (select(
        *EVENT_COLUMNS,
        literal(value=None, type_=sqlalchemy.String).label("shared_data"),
        *STATE_COLUMNS,
        NOT_CONTEXT_ONLY,
    ).outerjoin(States, (Events.event_id == States.event_id)).where(
        (States.last_updated == States.last_changed)
        | States.last_changed.is_(None)).where(
            _not_continuous_entity_matcher()).outerjoin(
                StateAttributes,
                (States.attributes_id == StateAttributes.attributes_id
                 )).where((Events.time_fired > start_day)
                          & (Events.time_fired < end_day)).where(
                              Events.context_id == context_id))
    def mostFrequentVariableAndValue(self, variableNameList):
        """
    :type variableNameList: list(str)
"""

        subQueryList = []

        if len(variableNameList) == 0:
            raise EmptyVariableNameListError()

        with closing(self._sessionMaker()) as session:
            # For each variable, retrieve all possible values and their occurrence count.
            for variableName in variableNameList:
                variableNameColumn = literal(variableName).label(
                    self._VARIABLE_NAME_KEY)
                variableValueColumn = getattr(SQLModsecurityAuditEntryMessage,
                                              variableName).label(
                                                  self._VARIABLE_VALUE_KEY)
                variableValueCountColumn = count().label(
                    self._VARIABLE_VALUE_COUNT_KEY)

                # Subquery of each variable.
                subQuery = self._makeQuery(session, [
                    variableNameColumn, variableValueColumn,
                    variableValueCountColumn
                ])
                subQuery = subQuery.group_by(self._VARIABLE_NAME_KEY,
                                             self._VARIABLE_VALUE_KEY)
                subQueryList.append(subQuery)

            # Merging all subqueries and sorting by reverse count...
            query = union(*subQueryList).order_by(
                desc(self._VARIABLE_VALUE_COUNT_KEY)).limit(1)
            query = query.order_by(desc(
                self._VARIABLE_VALUE_COUNT_KEY)).limit(1)

            # ... then picking the first one.
            item = session.execute(query).fetchone()

            if item is not None:
                return {str(item.variableName): item.variableValue}
            else:
                return None
    def expect_column_values_to_not_match_regex_list(self,
                                                     column,
                                                     regex_list,
                                                     mostly=None,
                                                     result_format=None,
                                                     include_config=True,
                                                     catch_exceptions=None,
                                                     meta=None):

        regex_fn = self._get_dialect_regex_fn(positive=False)
        if regex_fn is None:
            logger.warning("Regex is not supported for dialect %s" %
                           str(self.engine.dialect))
            raise NotImplementedError

        return sa.and_(*[
            BinaryExpression(sa.column(column), literal(regex),
                             custom_op(regex_fn)) for regex in regex_list
        ])
Exemple #35
0
def make_recordset(request):
    try:
        recordset_info = json.load(request)
    except ValueError as e:
        return HttpResponseBadRequest(e)

    spquery = recordset_info['fromquery']
    tableid = spquery['contexttableid']

    with models.session_context() as session:
        recordset = models.RecordSet()
        recordset.timestampCreated = datetime.now()
        recordset.version = 0
        recordset.collectionMemberId = request.specify_collection.id
        recordset.dbTableId = tableid
        recordset.name = recordset_info['name']
        if 'remarks' in recordset_info:
            recordset.remarks = recordset_info['remarks']
        recordset.type = 0
        recordset.createdByAgentID = request.specify_user_agent.id
        recordset.SpecifyUserID = request.specify_user.id
        session.add(recordset)
        session.flush()
        new_rs_id = recordset.recordSetId

        model = models.models_by_tableid[tableid]
        id_field = getattr(model, model._id)

        field_specs = [
            QueryField.from_spqueryfield(EphemeralField.from_json(data))
            for data in sorted(spquery['fields'],
                               key=lambda field: field['position'])
        ]

        query, __ = build_query(session, request.specify_collection,
                                request.specify_user, tableid, field_specs)
        query = query.with_entities(id_field, literal(new_rs_id)).distinct()
        RSI = models.RecordSetItem
        ins = insert(RSI).from_select((RSI.recordId, RSI.RecordSetID), query)
        session.execute(ins)

    return HttpResponseRedirect(uri_for_model('recordset', new_rs_id))
Exemple #36
0
    def get_average_age(self,
                        gender: str) -> Tuple[List[Tuple[Any]], List[str]]:
        """Returns average age of genders and average age of all rows persons. Information are fetched based on tables:
        Person, DateOfBirth"""
        session = self.__get_session()
        avg_age_by_sex = session.query(Person.gender.label('gender'), func.avg(DayOfBirth.age).label('age')) \
            .select_from(Person)\
            .join(DayOfBirth)\
            .group_by(Person.gender)

        avg_age_on_all = session.query(literal('both').label('gender'), func.avg(DayOfBirth.age).label('age')) \
            .select_from(Person) \
            .join(DayOfBirth)

        avg_age = avg_age_by_sex.union_all(avg_age_on_all).subquery()

        avg_age_round = session.query(avg_age.c.gender.label('Gender'), func.round(avg_age.c.age, 2).label('Average_age')) \
            .select_from(avg_age)\
            .filter(avg_age.c.gender == gender)
        return self.__get_query_result(avg_age_round)
Exemple #37
0
    def _insert_trash_entities(self, target, status_id):
        insert_columns = (
            TrashContent.content_id,
            TrashContent.created_at,
        )
        select_columns = (
            Content.id,
            literal(datetime.utcnow()).label("created_at"),
        )
        children_query = self._children_query(
            target, select_columns
        ).filter_by(
            status_id=Content.status.AVAILABLE
        )

        self._session.execute(
            insert(TrashContent).from_select(
                insert_columns, children_query
            )
        )
Exemple #38
0
def participant_autocomplete(request):
    ctx = request.context
    keyword = request.GET.get('q')
    if not keyword:
        raise HTTPBadRequest("please specify search terms (q)")
    limit = request.GET.get('limit', 20)
    try:
        limit = int(limit)
    except:
        raise HTTPBadRequest("limit must be an integer")
    if limit > 100:
        raise HTTPBadRequest("be reasonable")
    query = AgentProfile.default_db.query(
            AgentProfile.id, AgentProfile.name, User.username
        ).outerjoin(User).filter((User.verified == True) | (User.id == None))
    discussion = ctx.get_instance_of_class(Discussion)
    if discussion:
        query = query.filter(AgentProfile.id.in_(
            discussion.get_participants_query(True, True).subquery()))

    if len(keyword) < 6:
        query = query.add_column(literal(0))
        matchstr = '%'.join(keyword)
        matchstr = '%'.join(('', matchstr, ''))
        agents = query.filter(AgentProfile.name.ilike(matchstr) |
                             User.username.ilike(matchstr)
            ).limit(limit * 5).all()
        agents.sort(key=lambda u: max(
            jaro_winkler(u[1], keyword),
            jaro_winkler(u[2], keyword) if u[2] else 0
            ), reverse=True)
        num = min(len(agents), limit)
        agents = agents[:num]
    else:
        matchstr = keyword
        query, rank = add_simple_text_search(
            query, [AgentProfile.name], keyword.split())
        agents = query.order_by(rank.desc()).limit(limit).all()
    return {'results': [{
        'id': AgentProfile.uri_generic(id),
        'text': name} for (id, name, username, rank) in agents]}
Exemple #39
0
                def decorated(*fargs, **fkwargs):
                    """Auth decoratorated"""
                    from pystil.db import Keys
                    uuid = request.args.get('uuid', False)
                    site = request.view_args.get('site', False)

                    if uuid and site:
                        if (Keys.query.filter((literal(site).like('%' +
                                                                  Keys.host))
                                              & (Keys.key == uuid)).first()):
                            return fun(*fargs, **fkwargs)
                        else:
                            current_app.logger.warn(
                                "Bad uuid attempt on %s uuid %s" %
                                (site, uuid))
                            abort(403)

                    auth = request.authorization
                    if (not auth
                            or not check_auth(auth.username, auth.password)):
                        return authenticate()
                    return fun(*fargs, **fkwargs)
Exemple #40
0
def url_ok(url, acl):
    session = Session()

    query_result = session.query(UrlList.whitelist).\
        join(AclContents, AclContents.urlListId == UrlList.id).\
        join(UrlMask, UrlMask.urlListId == UrlList.id).\
        filter(AclContents.aclId == acl, literal(url).like('%' + UrlMask.name + '%')).\
        order_by(AclContents.orderNumber).first()

    if query_result is None:
        query_result = session.query(UrlList.whitelist).\
            join(AclContents, AclContents.urlListId == UrlList.id).\
            filter(AclContents.aclId == acl, UrlList.whitelist == 1).first()

        Session.remove()

        return True if query_result is None else False
    else:
        Session.remove()

    for whitelist in query_result:
        return whitelist
Exemple #41
0
def find_sccp_general_settings(session):
    rows = session.query(SCCPGeneralSettings).all()

    voicemail_consult_exten = (session.query(
        literal('vmexten').label('option_name'),
        Extension.exten.label('option_value')).filter(
            and_(Extension.type == 'extenfeatures',
                 Extension.typeval == 'vmusermsg')).first())

    res = []
    for row in rows:
        tmp = {}
        tmp['option_name'] = row.option_name
        tmp['option_value'] = row.option_value
        res.append(tmp)

    res.append({
        'option_name': voicemail_consult_exten.option_name,
        'option_value': voicemail_consult_exten.option_value
    })

    return res
Exemple #42
0
def exists_in_table(session: Session, table_: Table, *criteria: Any) -> bool:
    """
    Implements an efficient way of detecting if a record or records exist;
    should be faster than ``COUNT(*)`` in some circumstances.

    Args:
        session: SQLAlchemy :class:`Session`, :class:`Engine`, or
            :class:`Connection` object
        table_: SQLAlchemy :class:`Table` object
        criteria: optional SQLAlchemy "where" criteria

    Returns:
        a boolean

    Prototypical use:

    .. code-block:: python

        return exists_in_table(session,
                               table,
                               column(fieldname1) == value2,
                               column(fieldname2) == value2)
    """
    exists_clause = exists().select_from(table_)
    # ... EXISTS (SELECT * FROM tablename)
    for criterion in criteria:
        exists_clause = exists_clause.where(criterion)
    # ... EXISTS (SELECT * FROM tablename WHERE ...)

    if session.get_bind().dialect.name == SqlaDialectName.MSSQL:
        query = select([literal(True)]).where(exists_clause)
        # ... SELECT 1 WHERE EXISTS (SELECT * FROM tablename WHERE ...)
    else:
        query = select([exists_clause])
        # ... SELECT EXISTS (SELECT * FROM tablename WHERE ...)

    result = session.execute(query).scalar()
    return bool(result)
Exemple #43
0
    def put_data(self,
                 key: bytes,
                 value: bytes,
                 is_result: bool = False) -> None:
        key = ensure_bytes(key)
        if self.engine.name == "postgresql":
            from sqlalchemy.dialects.postgresql import insert as pg_insert

            insert_stmt = pg_insert(KV).values(queue=self.name,
                                               key=key,
                                               value=value)
            self.engine.execute(
                insert_stmt.on_conflict_do_update(
                    index_elements=[KV.c.queue, KV.c.key],
                    set_={"value": insert_stmt.excluded.value},
                ))
        elif self.engine.name == "mysql":
            from sqlalchemy.dialects.mysql import insert as mysql_insert

            insert_stmt = mysql_insert(KV).values(queue=self.name,
                                                  key=key,
                                                  value=value)
            self.engine.execute(
                insert_stmt.on_conflict_do_update(
                    value=insert_stmt.inserted.value))
        else:
            with self.engine.begin() as conn:
                exists = conn.execute(
                    self.kvs(select,
                             literal(True)).where(KV.c.key == key)).scalar()
                if exists is None:
                    query = KV.insert().values(queue=self.name,
                                               key=key,
                                               value=value)
                else:
                    query = self.kvs(update).where(KV.c.key == key).values(
                        value=value)
                conn.execute(query)
Exemple #44
0
def _path_path_property(path):
    """
    WITH RECURSIVE closure(depth, basename, parent) AS (
        SELECT 0 as depth, path.basename as basename, path.parent_id as parent
        from path
        where path.id = 874758
        union all
        select depth + 1 as depth, path.basename as basename, path.parent_id as parent
        from path join closure on closure.parent = path.id
    )   
    select group_concat(basename, '/') from (
        select basename from closure order by depth desc
    )
    """

    cte_path = aliased(Path)

    cte = (select([
        cte_path.id.label('target_id'),
        cte_path.parent_id.label('parent_id'),
        cte_path.basename.label('basename'),
        literal(0).label('depth'),
    ]).cte(name='path_cte', recursive=True))
    cte = (cte.union_all(
        select([
            cte.c.target_id,
            cte_path.parent_id.label('parent_id'),
            cte_path.basename.label('basename'),
            (cte.c.depth + 1).label('depth'),
        ]).select_from(cte.join(cte_path, cte.c.parent_id == cte_path.id))))

    sub = (select([
        cte.c.basename
    ]).where(cte.c.target_id == path.id).correlate(path).order_by(
        cte.c.depth.desc()).alias(name='basenames'))
    q = select([string_agg(sub.c.basename, '/')])

    return q
Exemple #45
0
    def save(self):
        data = self.get_data()
        if not data:
            return

        raw = self.get_raw_data()
        if not raw:
            return

        session = Session()
        key = md5(raw.encode()).hexdigest()

        query = session.query(literal(True)).\
         filter(DJBRExtractedData.hash == key).first()

        if query:
            return

        fullname = ' '.join([data['nombre'], data['apellido']])
        djbr = DJBRExtractedData(fullname, data, key)
        session.add(djbr)
        session.commit()
        session.close()
Exemple #46
0
def _make_path_closure():
    """
    Helper function to construct the path closure CTE
    """
    from sqlalchemy.orm import aliased

    recursive = (select([
        Path.id.label("child_id"),
        Path.id.label("parent_id"),
        literal(0).label('depth'),
    ]).cte(name='closure', recursive=True))

    path_alias = aliased(Path)
    r_alias = aliased(recursive, 'r')

    recursive = recursive.union_all(
        select([
            path_alias.id.label('child_id'),
            r_alias.c.parent_id,
            (r_alias.c.depth + 1).label('depth'),
        ]).where(path_alias.parent_id == r_alias.c.child_id))

    return recursive.alias(name='path_closure')
Exemple #47
0
def bool_from_exists_clause(session: Session, exists_clause: Exists) -> bool:
    """
    Database dialects are not consistent in how ``EXISTS`` clauses can be
    converted to a boolean answer. This function manages the inconsistencies.

    See:
    
    - https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists
    - http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists
    
    Specifically, we want this:
    
    *SQL Server*
    
    .. code-block:: sql
    
        SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...)
        -- ... giving 1 or None (no rows)
        -- ... fine for SQL Server, but invalid for MySQL (no FROM clause)
        
    *Others, including MySQL*
    
    .. code-block:: sql
    
        SELECT EXISTS (SELECT 1 FROM table WHERE ...)
        -- ... giving 1 or 0
        -- ... fine for MySQL, but invalid syntax for SQL Server
    
    """  # noqa
    if session.get_bind().dialect.name == SqlaDialectName.MSSQL:
        # SQL Server
        result = session.query(literal(True)).filter(exists_clause).scalar()
    else:
        # MySQL, etc.
        result = session.query(exists_clause).scalar()
    return bool(result)
Exemple #48
0
    def get_clusters_per_point_query(self, session, gridpoints, riskdate):
        days_prev = self.dycast_parameters.temporal_domain
        enddate = riskdate
        startdate = riskdate - datetime.timedelta(days=(days_prev))

        points_query = self.get_points_query_from_gridpoints(gridpoints)

        return session.query(func.array_agg(
            func.json_build_object(
                "case_id",
                Case.id,
                "report_date",
                Case.report_date,
                "location",
                func.ST_AsText(Case.location)
            )).label('case_array'),
                             points_query.c.point.geom.label('point')) \
            .join(points_query, literal(True)) \
            .filter(Case.report_date >= startdate,
                    Case.report_date <= enddate,
                    func.ST_DWithin(Case.location,
                                    points_query.c.point.geom,
                                    self.dycast_parameters.spatial_domain)) \
            .group_by(points_query.c.point.geom)
Exemple #49
0
    Events.context_parent_id.label("context_parent_id"),
)

STATE_COLUMNS = (
    States.state_id.label("state_id"),
    States.state.label("state"),
    States.entity_id.label("entity_id"),
    SHARED_ATTRS_JSON["icon"].as_string().label("icon"),
    OLD_FORMAT_ATTRS_JSON["icon"].as_string().label("old_format_icon"),
)

STATE_CONTEXT_ONLY_COLUMNS = (
    States.state_id.label("state_id"),
    States.state.label("state"),
    States.entity_id.label("entity_id"),
    literal(value=None, type_=sqlalchemy.String).label("icon"),
    literal(value=None, type_=sqlalchemy.String).label("old_format_icon"),
)

EVENT_COLUMNS_FOR_STATE_SELECT = [
    literal(value=None, type_=sqlalchemy.Text).label("event_id"),
    # We use PSUEDO_EVENT_STATE_CHANGED aka None for
    # state_changed events since it takes up less
    # space in the response and every row has to be
    # marked with the event_type
    literal(value=PSUEDO_EVENT_STATE_CHANGED, type_=sqlalchemy.String).label(
        "event_type"
    ),
    literal(value=None, type_=sqlalchemy.Text).label("event_data"),
    States.last_updated.label("time_fired"),
    States.context_id.label("context_id"),
 def date_trunc_day(*args, **kwargs):
     # sqlite doesn't support date_trunc
     if c.SQLALCHEMY_URL.startswith('sqlite'):
         return func.date(*args, **kwargs)
     else:
         return func.date_trunc(literal('day'), *args, **kwargs)
 def not_iregexp(self, other):
     return RegexMatchExpression(self.expr, literal(other),
                                 custom_op('!~*'))
 def select_data(self, task: int):
     if task == 1:
         return [item.__repr__() for item in self.session.query(Item) if item.description]
     elif task == 2:
         return list({department.sphere for department in self.session.query(Department)
                      if department.staff_amount > 200})
     elif task == 3:
         return [shop.address for shop in filter(lambda x: x.address, self.session.query(Shop))
                 if shop.address.startswith(('i', 'I'))]
     elif task == 4:
         return [item.name for item in self.session.query(Item).filter(
             and_(
                 Item.department_id == Department.id,
                 Department.sphere == "Furniture"
             )
         )]
     elif task == 5:
         return [shop.name for shop in self.session.query(Shop).filter(
             and_(
                 Shop.id == Department.shop_id,
                 Item.department_id == Department.id, Item.description.isnot(None))
         )]
     elif task == 6:
         return [(item.name, department.sphere, shop.name) for item, department, shop
                 in self.session.query(Item, Department, Shop).filter(
             and_(
                 Item.department_id == Department.id,
                 Department.shop_id == Shop.id
             )
         )]
     elif task == 7:
         return self.session.query(Item).order_by(Item.name).offset(2).limit(2)
     elif task == 8:
         return [(item.name, department.sphere) for item, department
                 in self.session.query(Item, Department).join(Department)]
     elif task == 9:
         return [(item.name, department.sphere if department else None) for item, department
                 in self.session.query(Item, Department).outerjoin(Department)]
     elif task == 10:
         return [(item.name if item else None, department.sphere) for department, item
                 in self.session.query(Department, Item).outerjoin(Item)]
     elif task == 11:
         return [(item.name if item else None, department.sphere if department else None) for department, item
                 in self.session.query(Department, Item).join(Item, full=True)]
     elif task == 12:
         return [(item.name, department.sphere) for department, item
                 in self.session.query(Department, Item).join(Item, literal(True))]
     elif task == 13:
         return self.session.execute(
             select([
                 Shop.name,
                 func.count(Item.name),
                 func.sum(Item.price),
                 func.max(Item.price),
                 func.min(Item.price),
                 func.avg(Item.price)
             ]).select_from(join(Item, join(Department, Shop))).group_by(Shop.id).having(func.count(Item.id) > 1)
         ).fetchall()
     elif task == 14:
         return self.session.execute(
             select([Shop.name, func.array_agg(Item.name)]).select_from(
                 join(Item, join(Department, Shop))
             ).group_by(Shop.id)
         ).fetchall()
Exemple #53
0
    "humidifier",
    "input_datetime",
    "thermostat",
    "water_heater",
}

BASE_STATES = [
    States.entity_id,
    States.state,
    States.last_changed,
    States.last_updated,
]
BASE_STATES_NO_LAST_CHANGED = [
    States.entity_id,
    States.state,
    literal(value=None, type_=Text).label("last_changed"),
    States.last_updated,
]
QUERY_STATE_NO_ATTR = [
    *BASE_STATES,
    literal(value=None, type_=Text).label("attributes"),
    literal(value=None, type_=Text).label("shared_attrs"),
]
QUERY_STATE_NO_ATTR_NO_LAST_CHANGED = [
    *BASE_STATES_NO_LAST_CHANGED,
    literal(value=None, type_=Text).label("attributes"),
    literal(value=None, type_=Text).label("shared_attrs"),
]
# Remove QUERY_STATES_PRE_SCHEMA_25
# and the migration_in_progress check
# once schema 26 is created
Exemple #54
0
    def update_idnum_index_for_upload(
            cls,
            session: SqlASession,
            indexed_at_utc: Pendulum,
            tablechanges: UploadTableChanges) -> None:
        """
        Updates the index for a device's upload.

        - Deletes index entries for records that are on the way out.
        - Creates index entries for records that are on the way in.
        - Should be called after both the Patient and PatientIdNum tables are
          committed; see special ordering in
          :func:`camcops_server.cc_modules.client_api.commit_all`.

        Args:
            session:
                an SQLAlchemy Session
            indexed_at_utc:
                current time in UTC
            tablechanges:
                a :class:`camcops_server.cc_modules.cc_client_api_core.UploadTableChanges`
                object describing the changes to a table
        """  # noqa
        # noinspection PyUnresolvedReferences
        indextable = PatientIdNumIndexEntry.__table__  # type: Table
        indexcols = indextable.columns
        # noinspection PyUnresolvedReferences
        idnumtable = PatientIdNum.__table__  # type: Table
        idnumcols = idnumtable.columns
        # noinspection PyUnresolvedReferences
        patienttable = Patient.__table__  # type: Table
        patientcols = patienttable.columns

        # Delete the old
        removal_pks = tablechanges.idnum_delete_index_pks
        if removal_pks:
            log.debug("Deleting old ID number indexes: server PKs {}",
                      removal_pks)
            session.execute(
                indextable.delete()
                .where(indextable.c.idnum_pk.in_(removal_pks))
            )

        # Create the new
        addition_pks = tablechanges.idnum_add_index_pks
        if addition_pks:
            log.debug("Adding ID number indexes: server PKs {}", addition_pks)
            # noinspection PyPep8,PyProtectedMember
            session.execute(
                indextable.insert().from_select(
                    # Target:
                    [indexcols.idnum_pk,
                     indexcols.indexed_at_utc,
                     indexcols.patient_pk,
                     indexcols.which_idnum,
                     indexcols.idnum_value],
                    # Source:
                    (
                        select([idnumcols._pk,
                                literal(indexed_at_utc),
                                patientcols._pk,
                                idnumcols.which_idnum,
                                idnumcols.idnum_value])
                        .select_from(
                            join(
                                idnumtable,
                                patienttable,
                                and_(
                                    idnumcols._device_id == patientcols._device_id,  # noqa
                                    idnumcols._era == patientcols._era,
                                    idnumcols.patient_id == patientcols.id,
                                )
                            )
                        )
                        .where(idnumcols._pk.in_(addition_pks))
                        .where(patientcols._current == True)
                    )
                )
            )
Exemple #55
0
def get_diagnosis_inc_exc_report_query(req: CamcopsRequest,
                                       diagnosis_class: Type[DiagnosisBase],
                                       item_class: Type[DiagnosisItemBase],
                                       item_fk_fieldname: str, system: str,
                                       which_idnum: int,
                                       inclusion_dx: List[str],
                                       exclusion_dx: List[str],
                                       age_minimum_y: int,
                                       age_maximum_y: int) -> SelectBase:
    """
    As for get_diagnosis_report_query, but this makes some modifications to
    do inclusion and exclusion criteria.

    - We need a linking number to perform exclusion criteria.
    - Therefore, we use a single ID number, which must not be NULL.
    """
    # The basics:
    desc = req.get_id_desc(which_idnum) or "BAD_IDNUM"
    select_fields = [
        Patient.surname.label("surname"),
        Patient.forename.label("forename"),
        Patient.dob.label("dob"),
        Patient.sex.label("sex"),
        PatientIdNum.idnum_value.label(desc),
        diagnosis_class.when_created.label("when_created"),
        literal(system).label("system"),
        item_class.code.label("code"),
        item_class.description.label("description"),
    ]
    # noinspection PyUnresolvedReferences
    select_from = (
        Patient.__table__.join(
            diagnosis_class.__table__,
            and_(
                diagnosis_class.patient_id == Patient.id,
                diagnosis_class._device_id == Patient._device_id,
                diagnosis_class._era == Patient._era,
                diagnosis_class._current == True,
            )).join(
                item_class.__table__,
                and_(
                    getattr(item_class,
                            item_fk_fieldname) == diagnosis_class.id,
                    item_class._device_id == diagnosis_class._device_id,
                    item_class._era == diagnosis_class._era,
                    item_class._current == True,
                )).join(
                    PatientIdNum.__table__,
                    and_(
                        PatientIdNum.patient_id == Patient.id,
                        PatientIdNum._device_id == Patient._device_id,
                        PatientIdNum._era == Patient._era,
                        PatientIdNum._current == True,
                        PatientIdNum.which_idnum == which_idnum,
                        PatientIdNum.idnum_value.isnot(None),  # NOT NULL
                    )))  # nopep8
    wheres = [
        Patient._current == True,
    ]  # nopep8
    if not req.user.superuser:
        # Restrict to accessible groups
        group_ids = req.user.ids_of_groups_user_may_report_on
        wheres.append(diagnosis_class._group_id.in_(group_ids))
    else:
        group_ids = []  # type: List[int]  # to stop type-checker moaning below

    # Age limits are simple, as the same patient has the same age for
    # all diagnosis rows.
    today = req.today
    if age_maximum_y is not None:
        # Example: max age is 40; earliest (oldest) DOB is therefore 41
        # years ago plus one day (e.g. if it's 15 June 2010, then earliest
        # DOB is 16 June 1969; a person born then will be 41 tomorrow).
        earliest_dob = pendulum_date_to_datetime_date(
            today.subtract(years=age_maximum_y + 1).add(days=1))
        wheres.append(Patient.dob >= earliest_dob)
    if age_minimum_y is not None:
        # Example: min age is 20; latest (youngest) DOB is therefore 20
        # years ago (e.g. if it's 15 June 2010, latest DOB is 15 June 1990;
        # if you're born after that, you're not 20 yet).
        latest_dob = pendulum_date_to_datetime_date(
            today.subtract(years=age_minimum_y))
        wheres.append(Patient.dob <= latest_dob)

    # Diagnosis criteria are a little bit more complex.
    #
    # We can reasonably do inclusion criteria as "show the diagnoses
    # matching the inclusion criteria" (not the more complex "show all
    # diagnoses for patients having at least one inclusion diagnosis",
    # which is likely to be too verbose for patient finding).
    inclusion_criteria = []  # type: List[ColumnElement]
    for idx in inclusion_dx:
        inclusion_criteria.append(item_class.code.like(idx))
    wheres.append(or_(*inclusion_criteria))

    # Exclusion criteria are the trickier: we need to be able to link
    # multiple diagnoses for the same patient, so we need to use a linking
    # ID number.
    if exclusion_dx:
        # noinspection PyUnresolvedReferences
        edx_items = item_class.__table__.alias("edx_items")
        # noinspection PyUnresolvedReferences
        edx_sets = diagnosis_class.__table__.alias("edx_sets")
        # noinspection PyUnresolvedReferences
        edx_patient = Patient.__table__.alias("edx_patient")
        # noinspection PyUnresolvedReferences
        edx_idnum = PatientIdNum.__table__.alias("edx_idnum")
        edx_joined = (
            edx_items.join(
                edx_sets,
                and_(
                    getattr(edx_items.c,
                            item_fk_fieldname) == edx_sets.c.id,  # noqa
                    edx_items.c._device_id == edx_sets.c._device_id,
                    edx_items.c._era == edx_sets.c._era,
                    edx_items.c._current == True,
                )).join(
                    edx_patient,
                    and_(
                        edx_sets.c.patient_id == edx_patient.c.id,
                        edx_sets.c._device_id == edx_patient.c._device_id,
                        edx_sets.c._era == edx_patient.c._era,
                        edx_sets.c._current == True,
                    )).join(
                        edx_idnum,
                        and_(
                            edx_idnum.c.patient_id == edx_patient.c.id,
                            edx_idnum.c._device_id == edx_patient.c._device_id,
                            edx_idnum.c._era == edx_patient.c._era,
                            edx_idnum.c._current == True,
                            edx_idnum.c.which_idnum == which_idnum,
                        )))
        exclusion_criteria = []  # type: List[ColumnElement]
        for edx in exclusion_dx:
            exclusion_criteria.append(edx_items.c.code.like(edx))
        edx_wheres = [
            edx_items.c._current == True,
            edx_idnum.c.idnum_value == PatientIdNum.idnum_value,
            or_(*exclusion_criteria)
        ]  # nopep8
        # Note the join above between the main and the EXISTS clauses.
        # We don't use an alias for the main copy of the PatientIdNum table,
        # and we do for the EXISTS version. This is fine; e.g.
        # https://msdn.microsoft.com/en-us/library/ethytz2x.aspx example:
        #   SELECT boss.name, employee.name
        #   FROM employee
        #   INNER JOIN employee boss ON employee.manager_id = boss.emp_id;
        if not req.user.superuser:
            # Restrict to accessible groups
            # group_ids already defined from above
            edx_wheres.append(edx_sets.c._group_id.in_(group_ids))
            # ... bugfix 2018-06-19: "wheres" -> "edx_wheres"
        exclusion_select = (select(["*"]).select_from(edx_joined).where(
            and_(*edx_wheres)))
        wheres.append(not_(exists(exclusion_select)))

    query = select(select_fields).select_from(select_from).where(and_(*wheres))
    return query
Exemple #56
0
def get_diagnosis_report_query(req: CamcopsRequest,
                               diagnosis_class: Type[DiagnosisBase],
                               item_class: Type[DiagnosisItemBase],
                               item_fk_fieldname: str,
                               system: str) -> SelectBase:
    # SELECT surname, forename, dob, sex, ...
    select_fields = [
        Patient.surname.label("surname"),
        Patient.forename.label("forename"),
        Patient.dob.label("dob"),
        Patient.sex.label("sex"),
    ]
    from_clause = (
        # FROM patient
        Patient.__table__
        # INNER JOIN dxset ON (dxtable.patient_id == patient.id AND ...)
        .join(
            diagnosis_class.__table__,
            and_(diagnosis_class.patient_id == Patient.id,
                 diagnosis_class._device_id == Patient._device_id,
                 diagnosis_class._era == Patient._era))
        # INNER JOIN dxrow ON (dxrow.fk_dxset = dxset.pk AND ...)
        .join(
            item_class.__table__,
            and_(
                getattr(item_class, item_fk_fieldname) == diagnosis_class.id,
                item_class._device_id == diagnosis_class._device_id,
                item_class._era == diagnosis_class._era)))
    for iddef in req.idnum_definitions:
        n = iddef.which_idnum
        desc = iddef.short_description
        aliased_table = PatientIdNum.__table__.alias("i{}".format(n))
        # ... [also] SELECT i1.idnum_value AS 'NHS' (etc.)
        select_fields.append(aliased_table.c.idnum_value.label(desc))
        # ... [from] OUTER JOIN patientidnum AS i1 ON (...)
        from_clause = from_clause.outerjoin(
            aliased_table,
            and_(
                aliased_table.c.patient_id == Patient.id,
                aliased_table.c._device_id == Patient._device_id,
                aliased_table.c._era == Patient._era,
                # Note: the following are part of the JOIN, not the WHERE:
                # (or failure to match a row will wipe out the Patient from the
                # OUTER JOIN):
                aliased_table.c._current == True,
                aliased_table.c.which_idnum == n,
            ))  # nopep8
    select_fields += [
        diagnosis_class.when_created.label("when_created"),
        literal(system).label("system"),
        item_class.code.label("code"),
        item_class.description.label("description"),
    ]
    # WHERE...
    wheres = [
        Patient._current == True,
        diagnosis_class._current == True,
        item_class._current == True,
    ]  # nopep8
    if not req.user.superuser:
        # Restrict to accessible groups
        group_ids = req.user.ids_of_groups_user_may_report_on
        wheres.append(diagnosis_class._group_id.in_(group_ids))
        # Helpfully, SQLAlchemy will render this as "... AND 1 != 1" if we
        # pass an empty list to in_().
    query = select(select_fields).select_from(from_clause).where(and_(*wheres))
    return query
Exemple #57
0
 def is_blocked(cls, session, uri):
     """Return True if the given URI is blocked."""
     uri_matches = expression.literal(uri).like(cls.uri)
     return session.query(cls).filter(uri_matches).count() > 0
 def get_close_time_only(self, cases_in_cluster_query):
     subquery = cases_in_cluster_query.subquery()
     query = cases_in_cluster_query.join(subquery, literal(True)) \
         .filter(func.abs(Case.report_date - subquery.c.report_date) <= self.dycast_parameters.close_in_time,
                 Case.id < subquery.c.id)
     return database_service.get_count_for_query(query)
Exemple #59
0
    latest_sales = (meta.session.query(model.Sale).join(
        latest_sales_sq,
        and_(
            model.Sale.id == latest_sales_sq.c.id,
            latest_sales_sq.c.row_number == 1,
        )).order_by(model.Sale.id))

    print("window functions to find the latest sale at each store")
    print_table(latest_sales)
    print("")

    sales_by_store = (meta.session.query(
        model.Sale.store_id.label("store_id"),
        func.sum(model.Sale.amount).label("amount"),
    ).group_by(literal(1))).cte("stores")

    sales_report = (meta.session.query(
        func.left(sales_by_store.c.store_id, 4).label("region"),
        func.sum(sales_by_store.c.amount).over(partition_by=func.left(
            sales_by_store.c.store_id, 4)).label("sales_for_region"),
        func.left(sales_by_store.c.store_id, 7).label("state"),
        func.sum(sales_by_store.c.amount).over(partition_by=func.left(
            sales_by_store.c.store_id, 7)).label("sales_for_state"),
        sales_by_store.c.store_id.label("store"),
        sales_by_store.c.amount.label("sales_for_store"),
    ).order_by(
        literal(2).desc(),
        literal(4).desc(),
        literal(6).desc(),
    ))
Exemple #60
0
if __name__ == "__main__":

    # some sort of generic record object with tags. imagine these to be tickets
    # in an issue tracking system
    meta.session.add_all([
        model.Record(tags=["defect", "minor_change", "backend"]),
        model.Record(tags=["defect", "major_change", "backend"]),
        model.Record(tags=["enhancement", "ui/ux", "frontend"]),
    ])
    meta.session.flush()

    # filter the result with &&, the overlaps operator, which says "if either
    # of these two arrays have any elements in common, include them in the
    # output". works like checking against a non-empty set intersection.
    records = (meta.session.query(model.Record).filter(
        model.Record.tags.op("&&")(["defect", "backend"])))

    print("filtering with &&")
    print_table(records)
    print("")

    # unnest takes the array and expands it into rows. this allows you to, for
    # example, calculate statistics on tags, find unique tags, etc.
    counts = (meta.session.query(
        func.unnest(model.Record.tags).label("tag"),
        func.count().label("count"),
    ).group_by(literal(1)).order_by(literal(2).desc(), literal(1)))

    print("unnest and counting tags")
    print_table(counts)