コード例 #1
0
ファイル: database_wrapper.py プロジェクト: mattjw/cf4sq
 def get_friendship_from_database( self, userA, userB, crawl_id=None ):
     """
     Input
         userA, userB: user objects.
         crawl_id: (optional) if included, only looks for the friendship 
             in the particular run. otherwise, all copies of the friendship
             are returned.
     
     Retrieves the specified friendship from the database. There may be
     multiple matches because the same friendship maybe have been mined
     and stored on multiple occasions.
     
     Output  List of friendship objects. If none found, list length is 0.
     """
     userA_id = userA.id
     userB_id = userB.id
     if crawl_id is not None:
         results = self.session.query( Friendship ).filter( and_(
             Friendship.userA_id==userA_id,
             Friendship.userB_id==userB_id,
             Friendship.crawl_id==crawl_id ) ).all()
     else:
         results = self.session.query( Friendship ).filter( and_(
             Friendship.userA_id==userA_id,
             Friendship.userB_id==userB_id ) ).all()
     return results 
コード例 #2
0
ファイル: util.py プロジェクト: assembl/assembl
def users_with_permission(discussion_id, permission, id_only=True):
    from ..models import Discussion
    # assume all ids valid
    db = Discussion.default_db
    user_ids = db.query(User.id).join(
        LocalUserRole, Role, DiscussionPermission, Permission).filter(and_(
            Permission.name == permission,
            LocalUserRole.requested == False,  # noqa: E712
            LocalUserRole.discussion_id == discussion_id,
            DiscussionPermission.discussion_id == discussion_id)
        ).union(
            db.query(User.id).join(
                UserRole, Role, DiscussionPermission, Permission).filter(
                and_(
                    Permission.name == permission,
                    DiscussionPermission.discussion_id == discussion_id))
        ).union(
            db.query(User.id).join(
                UserRole, Role).filter(
                and_(
                    Role.name == R_SYSADMIN,
                    DiscussionPermission.discussion_id == discussion_id))
        ).distinct()
    if id_only:
        return [AgentProfile.uri_generic(id) for (id, ) in user_ids]
    else:
        return db.query(AgentProfile).filter(AgentProfile.id.in_(user_ids)).all()
コード例 #3
0
ファイル: search.py プロジェクト: mitemitreski/myflicks
    def auto_movie(self, q):
        orig_q = q
        q = q.lstrip().lower().split(' ')
        full_words, q = q[:-1], q[-1]
        if 'the' in full_words:
            full_words.remove('the')

        target = literal(' ').op('||')(Movie.title)

        filters = []
        for word in full_words:
            filters.append(target.ilike('%% %s %%' % word))

        filters.append(target.ilike('%% %s%%' % q))
        if len(filters) > 1:
            filters = and_(*filters)
        else:
            filters = filters[0]


        res = self.session.query(Movie.id, Movie.title, Rating.rating)\
                  .outerjoin((Rating, and_(Rating.movie_id == Movie.id,
                                           Rating.user == self.user)))\
                  .filter(filters)\
                  .order_by(func.similarity(func.lower(target), orig_q).desc())\
                  .limit(7).all()
        self.return_json(res)
コード例 #4
0
def get_linked_xreports(document, lang):
    condition_as_child = and_(
        Association.child_document_id == Xreport.document_id,
        Association.parent_document_id == document.document_id
    )
    condition_as_parent = and_(
        Association.child_document_id == document.document_id,
        Association.parent_document_id == Xreport.document_id
    )

    if document.type in [WAYPOINT_TYPE, USERPROFILE_TYPE,
                         ARTICLE_TYPE, IMAGE_TYPE]:
        condition = condition_as_parent
    elif document.type in [ROUTE_TYPE, OUTING_TYPE]:
        condition = condition_as_child

    xreport_ids = get_first_column(
        DBSession.query(Xreport.document_id).
        filter(Xreport.redirects_to.is_(None)).
        join(
            Association, condition).
        group_by(Xreport.document_id).
        all())

    return get_documents_for_ids(
        xreport_ids, lang, xreport_documents_config).get('documents')
コード例 #5
0
ファイル: onFit.py プロジェクト: LamCiuLoeng/gap
    def index( self, **kw ):
        values = {
                  'division_id' : kw.get( 'division_id', None ) or None,
                  'category_id' : kw.get( 'category_id', None ) or None
                  }
        cds = [
               OnclFit.active == 0 , OnclDivision.active == 0 , OnclCategory.active == 0,
               OnclFit.category_id == OnclCategory.id, OnclCategory.division_id == OnclDivision.id,
               ]

        if values['division_id']:
            cds.append( OnclDivision.id == values['division_id'] )
            cats = DBSession.query( OnclCategory ).filter( and_( OnclCategory.active == 0,
                                                      OnclCategory.division_id == values['division_id'] ) ).order_by( OnclCategory.name )
        else:
            cats = []

        if values['category_id']:
            cds.append( OnclCategory.id == values['category_id'] )

        result = DBSession.query( OnclDivision, OnclCategory, OnclFit ).filter( and_( *cds ) ).order_by( OnclDivision.name,
                                                                                                          OnclCategory.name,
                                                                                                          OnclFit.name
                                                                                                          )
        divs = DBSession.query( OnclDivision ).filter( and_( OnclDivision.active == 0 ) ).order_by( OnclDivision.name )
        return {'result' : result, 'divs' : divs , 'cats' : cats, 'values' : values }
コード例 #6
0
def get_linked_articles(document, lang):
    condition_as_child = and_(
        Association.child_document_id == Article.document_id,
        Association.parent_document_id == document.document_id
    )
    condition_as_parent = and_(
        Association.child_document_id == document.document_id,
        Association.parent_document_id == Article.document_id
    )

    if document.type == IMAGE_TYPE:
        condition = condition_as_parent
    elif document.type in [WAYPOINT_TYPE,
                           OUTING_TYPE,
                           ROUTE_TYPE,
                           BOOK_TYPE,
                           XREPORT_TYPE,
                           USERPROFILE_TYPE]:
        condition = condition_as_child

    elif document.type == ARTICLE_TYPE:
        condition = or_(condition_as_child, condition_as_parent)

    article_ids = get_first_column(
        DBSession.query(Article.document_id).
        filter(Article.redirects_to.is_(None)).
        join(
            Association, condition).
        group_by(Article.document_id).
        all())

    return get_documents_for_ids(
        article_ids, lang, article_documents_config).get('documents')
コード例 #7
0
def get_linked_routes(document, lang):
    condition_as_child = and_(
        Association.child_document_id == Route.document_id,
        Association.parent_document_id == document.document_id)
    condition_as_parent = and_(
        Association.child_document_id == document.document_id,
        Association.parent_document_id == Route.document_id)

    if document.type == WAYPOINT_TYPE:
        condition = condition_as_child
    elif document.type in [OUTING_TYPE, IMAGE_TYPE, ARTICLE_TYPE,
                           XREPORT_TYPE]:
        condition = condition_as_parent
    else:
        condition = or_(condition_as_child, condition_as_parent)

    route_ids = get_first_column(
        DBSession.query(Route.document_id).
        filter(Route.redirects_to.is_(None)).
        join(Association, condition).
        group_by(Route.document_id).
        all())

    return get_documents_for_ids(
        route_ids, lang, route_documents_config).get('documents')
コード例 #8
0
ファイル: user.py プロジェクト: ianmackinnon/mango
def get_user_pending_org_address(orm, user, org_id):
    Address_v_all = aliased(Address_v)
    Address_v_new = aliased(Address_v)

    query = orm.query(Address_v_all) \
        .outerjoin((
            Address,
            Address.address_id == Address_v_all.address_id
        )) \
        .join((
            org_address_v,
            and_(
                org_address_v.c.address_id == Address_v_all.address_id,
                org_address_v.c.org_id == org_id,
                org_address_v.c.existence == 1,
            )
        )) \
        .filter(
            Address_v_all.moderation_user_id == user.user_id,
            ~exists().where(and_(
                Address_v_new.address_id == Address_v_all.address_id,
                Address_v_new.a_time > Address_v_all.a_time,
            )),
            or_(
                Address.a_time == None,
                Address_v_all.a_time > Address.a_time,
            )
        ) \
        .order_by(Address_v_all.a_time.desc())

    return query.all()
コード例 #9
0
ファイル: changes.py プロジェクト: andrewcooke/ypod
 def to_remove(self):
     # first, remove albums deleted from disk
     while True:
         album = (
             self._session.query(Album)
             .filter(
                 and_(
                     Album.disk_generation != self._disk_gen,
                     Album.ipod_generation != self._ipod_gen + 1,
                     Album.loaded == True,
                 )
             )
             .order_by(Album.disk_generation.asc())
             .first()
         )
         if not album:
             break
         album.loaded = False
         self._session.commit()
         yield album
     # next, remove any albums not loaded in this load
     while True:
         album = (
             self._session.query(Album)
             .filter(and_(Album.ipod_generation != self._ipod_gen + 1, Album.loaded == True))
             .order_by(Album.disk_generation.asc())
             .first()
         )
         if not album:
             break
         album.loaded = False
         self._session.commit()
         yield album
コード例 #10
0
 def constructQuery(self, context):
     session= Session()
     trusted=removeSecurityProxy(context)
     user_id = getattr(trusted, self.value_field, None)
     if user_id:
         query = session.query(domain.User 
                ).filter(domain.User.user_id == 
                     user_id).order_by(domain.User.last_name,
                         domain.User.first_name,
                         domain.User.middle_name)
         return query
     else:
         sitting = trusted.__parent__
         group_id = sitting.group_id
         group_sitting_id = sitting.group_sitting_id
         all_member_ids = sql.select([schema.user_group_memberships.c.user_id], 
                 sql.and_(
                     schema.user_group_memberships.c.group_id == group_id,
                     schema.user_group_memberships.c.active_p == True))
         attended_ids = sql.select([schema.group_sitting_attendance.c.member_id],
                  schema.group_sitting_attendance.c.group_sitting_id == group_sitting_id)
         query = session.query(domain.User).filter(
             sql.and_(domain.User.user_id.in_(all_member_ids),
                 ~ domain.User.user_id.in_(attended_ids))).order_by(
                         domain.User.last_name,
                         domain.User.first_name,
                         domain.User.middle_name)
         return query
コード例 #11
0
ファイル: user.py プロジェクト: ianmackinnon/mango
def get_user_pending_event_contact(orm, user, event_id):
    Contact_v_all = aliased(Contact_v)
    Contact_v_new = aliased(Contact_v)

    query = orm.query(Contact_v_all) \
        .outerjoin((
            Contact,
            Contact.contact_id == Contact_v_all.contact_id
            )) \
        .join((
            event_contact_v,
            and_(
                event_contact_v.c.contact_id == Contact_v_all.contact_id,
                event_contact_v.c.event_id == event_id,
                event_contact_v.c.existence == 1,
                )
            )) \
        .filter(
            Contact_v_all.moderation_user_id == user.user_id,
            ~exists().where(and_(
                Contact_v_new.contact_id == Contact_v_all.contact_id,
                Contact_v_new.a_time > Contact_v_all.a_time,
            )),
            or_(
                Contact.a_time == None,
                Contact_v_all.a_time > Contact.a_time,
            )
        ) \
        .order_by(Contact_v_all.a_time.desc())

    return query.all()
コード例 #12
0
ファイル: tasks.py プロジェクト: GliderGeek/skylines
def find_meetings(flight_id):
    logger.info("Searching for near flights of flight %d" % flight_id)

    flight = Flight.get(flight_id)

    # Update FlightPathChunks of current flight
    FlightPathChunks.update_flight_path(flight)

    other_flights = FlightPathChunks.get_near_flights(flight)

    # delete all previous detected points between src and dst
    for key in other_flights:
        FlightMeetings.query() \
            .filter(or_(and_(FlightMeetings.source == flight, FlightMeetings.destination_id == key),
                        and_(FlightMeetings.destination == flight, FlightMeetings.source_id == key))) \
            .delete()

    # Insert new meetings into table
    for flight_id, meetings in other_flights.iteritems():
        other_flight = Flight.get(flight_id)

        for meeting in meetings:
            FlightMeetings.add_meeting(flight, other_flight, meeting['times'][0], meeting['times'][-1])

    db.session.commit()
コード例 #13
0
    def configureListeners(self, farmNo, loadBalancerNo) :
        table = self.conn.getTable("LOAD_BALANCER_LISTENER")
        listeners = self.conn.select(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))

        # リスナーの起動・停止処理
        for listener in listeners :
            status = self.getStatusString(listener["STATUS"])
            if isBit(listener["ENABLED"]):
                if status == self.STOPPED :
                    # 有効で停止しているリスナーは処理対象
                    self.startListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
                elif status == self.RUNNING:
                    # 有効で起動しているリスナーの場合、処理を行わずにフラグを変更する
                    if isBit(listener["CONFIGURE"]):
                        listener["CONFIGURE"] = "0"
                        sql = table.update(and_(table.c.LOAD_BALANCER_NO ==listener["LOAD_BALANCER_NO"], table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
                        self.conn.execute(sql)

            else :
                if (status == self.RUNNING or status == self.WARNING) :
                    # 無効で起動または異常なリスナーは処理対象
                    self.stopListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
                elif (status == self.STOPPED) :
                    # 無効で停止しているリスナーの場合、処理を行わずにフラグを変更する
                    if isBit(listener["CONFIGURE"]):
                        listener["CONFIGURE"] = "0"
                        sql = table.update(and_(table.c.LOAD_BALANCER_NO ==loadBalancerNo, table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
                        self.conn.execute(sql)
コード例 #14
0
ファイル: viewlets.py プロジェクト: kohsah/bungeni-portal
 def update(self):
     user_id = self.context.user_id
     parliament_id = self.context.group_id
     session = Session()
     # add cosigned items
     signed_pi_ids = [sgn.item_id for sgn in
         session.query(domain.Signatory).filter(
             sql.and_(domain.Signatory.user_id == user_id,
                 domain.Signatory.status.in_(
                         get_states("signatory", tagged=["public"])
                     ),
             )
         ).all()
     ]
     if len(signed_pi_ids) > 0:
         self.query = self.query.union(
             session.query(domain.ParliamentaryItem).filter(
                 sql.and_(
                     domain.ParliamentaryItem.parliament_id == parliament_id,
                     domain.ParliamentaryItem.status.in_(self.states),
                     domain.ParliamentaryItem.parliamentary_item_id.in_(
                         signed_pi_ids
                     )
                 )
             )
         )
     self.query = self.query.order_by(
         domain.ParliamentaryItem.parliamentary_item_id.desc()
     )
コード例 #15
0
ファイル: association.py プロジェクト: arnaud-morvan/v6_api
def _get_load_associations_query(document, doc_types_to_load):
    query_parents = DBSession. \
        query(
            Association.parent_document_id.label('id'),
            Document.type.label('t'),
            literal_column('1').label('p')). \
        join(
            Document,
            and_(
                Association.child_document_id == document.document_id,
                Association.parent_document_id == Document.document_id,
                Document.type.in_(doc_types_to_load))). \
        subquery()
    query_children = DBSession. \
        query(
            Association.child_document_id.label('id'),
            Document.type.label('t'),
            literal_column('0').label('p')). \
        join(
            Document,
            and_(
                Association.child_document_id == Document.document_id,
                Association.parent_document_id == document.document_id,
                Document.type.in_(doc_types_to_load))). \
        subquery()

    return DBSession \
        .query('id', 't', 'p') \
        .select_from(union(query_parents.select(), query_children.select()))
コード例 #16
0
ファイル: erpfee.py プロジェクト: LamCiuLoeng/budget
    def index(self, **kw):
        companies = DBSession.query(Company, Currency).filter(and_(Company.active == 0,
                                                                  Currency.active == 0,
                                                                  Company.currency_id == Currency.id,
                                                                  )).order_by(Company.name)

        subline = DBSession.query(Subline).filter(and_(Subline.active == 0)).order_by(Subline.label)
        saletype = DBSession.query(SaleType).filter(and_(SaleType.active == 0)).order_by(SaleType.label)

        result = {
                'companies' : companies,
                'subline'   : subline,
                'saletype'  : saletype,
                }

        if has_permission('FIN_VIEW_ALL'):  # if FIN team
            teams = DBSession.query(LogicTeam).filter(and_(LogicTeam.active == 0, LogicTeam.for_sale == 0)).order_by(LogicTeam.order).all()
            result['is_fin'] = True
        else:
            # get the user's belonging team
            result['is_fin'] = False
            teams = []
            try:
                mp = DBSession.query(Permission).filter(Permission.permission_name == 'MANAGER_VIEW').one()
                for g in request.identity["user"].groups:
                    if mp in g.permissions and g.logicteams:
                        teams.extend(g.logicteams)
            except:
                traceback.print_exc()
                pass
        result['teams'] = teams
        return result
コード例 #17
0
ファイル: queue_log_dao.py プロジェクト: jaunis/xivo-dao
def _get_event_with_enterqueue(session, start, end, match, event):
    start = start.strftime(_STR_TIME_FMT)
    end = end.strftime(_STR_TIME_FMT)

    enter_queues = (session
                    .query(QueueLog.callid,
                           cast(QueueLog.time, TIMESTAMP).label('time'))
                    .filter(and_(QueueLog.event == 'ENTERQUEUE',
                                 between(QueueLog.time, start, end))))

    enter_map = {}
    for enter_queue in enter_queues.all():
        enter_map[enter_queue.callid] = enter_queue.time

    if enter_map:
        res = (session
               .query(QueueLog.event,
                      QueueLog.queuename,
                      cast(QueueLog.time, TIMESTAMP).label('time'),
                      QueueLog.callid,
                      QueueLog.data3)
               .filter(and_(QueueLog.event == match,
                            QueueLog.callid.in_(enter_map))))

        for r in res.all():
            yield {
                'callid': r.callid,
                'queue_name': r.queuename,
                'time': enter_map[r.callid],
                'event': event,
                'talktime': 0,
                'waittime': int(r.data3) if r.data3 else 0
            }
コード例 #18
0
ファイル: model_utils.py プロジェクト: blueshed/blueshed-py
def overlaps(cls, from_date, to_date):
    return or_(and_(cls.from_date <= to_date, #starts
                    cls.from_date >= from_date),
               and_(cls.to_date <= to_date, #ends
                    cls.to_date >= from_date),
               and_(cls.from_date <= from_date, #spans
                    cls.to_date >= to_date))
コード例 #19
0
 def update(self):
     user_id = self.context.user_id
     parliament_id = self.context.group_id
     wf = capi.get_type_info("signatory").workflow
     session = Session()
     # add cosigned items
     signed_pi_ids = [sgn.head_id for sgn in
         session.query(domain.Signatory).filter(
             sql.and_(domain.Signatory.user_id == user_id,
                 domain.Signatory.status.in_(
                     wf.get_state_ids(tagged=["public"])
                 ),
             )
         ).all()
     ]
     if len(signed_pi_ids) > 0:
         self.query = self.query.union(
             session.query(domain.Doc).filter(
                 sql.and_(
                     domain.Doc.parliament_id == parliament_id,
                     domain.Doc.status.in_(self.states),
                     domain.Doc.doc_id.in_(
                         signed_pi_ids
                     )
                 )
             )
         )
     self.query = self.query.order_by(
         domain.Doc.doc_id.desc()
     )
コード例 #20
0
    def del_relation(category, tag_id_list):
        for tag_id in tag_id_list:
            if category.current_level == 1:
                # 已经存在关系?
                relation = TagCategoryRelation.query.filter(
                    and_(TagCategoryRelation.category1_id == category.id, TagCategoryRelation.category2_id == 0,
                         TagCategoryRelation.category3_id == 0, TagCategoryRelation.tag_id == tag_id)).first()
                if relation:
                    relation.num_entries = TagCategoryRelation.__table__.c.num_entries - 1

            if category.current_level == 2:
                relation = TagCategoryRelation.query.filter(
                    and_(TagCategoryRelation.category1_id == category.path[0].id,
                         TagCategoryRelation.category2_id == category.id, TagCategoryRelation.category3_id == 0,
                         TagCategoryRelation.tag_id == tag_id)).first()
                if relation:
                    relation.num_entries = TagCategoryRelation.__table__.c.num_entries - 1

            if category.current_level == 3:
                relation = TagCategoryRelation.query.filter(
                    and_(TagCategoryRelation.category1_id == category.path[0].id,
                         TagCategoryRelation.category2_id == category.path[1].id,
                         TagCategoryRelation.category3_id == category.id, TagCategoryRelation.tag_id == tag_id)).first()
                if relation:
                    relation.num_entries = TagCategoryRelation.__table__.c.num_entries - 1

        db.session.commit()
コード例 #21
0
ファイル: reader_configs.py プロジェクト: jcrudy/oreader
 def create_expression(self):
     if self.last_result is None:
         try:
             expr = select(self.table.columns).order_by(*[safe_collate(self.table.columns[nm], None) for nm in self.sort_key])
             if self.starter is not None:
                 expr = expr.where(and_(self.starter, self.filter))
             else:
                 expr = expr.where(self.filter)
         except:
             raise
     else:
         try:
             where_clause = vector_greater_than([self.table.columns[nm] for nm in self.sort_key], \
                                               [self.last_result[n] for n in self.sort_index])
             expr = (select(self.table.columns).order_by(*[safe_collate(self.table.columns[nm], None) for nm in self.sort_key]) \
                    .where(and_(where_clause, self.filter)))
         except:
             raise
     
     if self.limit_per is not None:
         expr = expr.limit(self.limit_per)
         
     if self.stream:
         expr = expr.execution_options(stream_results=True)
     
     return expr
コード例 #22
0
ファイル: parser.py プロジェクト: bradleyjones/gertty
 def p_is_term(p):
     '''is_term : OP_IS string'''
     #TODO: implement starred, watched, owner, reviewer, draft
     username = p.parser.username
     if p[2] == 'reviewed':
         filters = []
         filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
         filters.append(gertty.db.approval_table.c.value != 0)
         s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
         p[0] = gertty.db.change_table.c.key.in_(s)
     elif p[2] == 'open':
         p[0] = gertty.db.change_table.c.status.notin_(['MERGED', 'ABANDONED'])
     elif p[2] == 'closed':
         p[0] = gertty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
     elif p[2] == 'submitted':
         p[0] = gertty.db.change_table.c.status == 'SUBMITTED'
     elif p[2] == 'merged':
         p[0] = gertty.db.change_table.c.status == 'MERGED'
     elif p[2] == 'abandoned':
         p[0] = gertty.db.change_table.c.status == 'ABANDONED'
     elif p[2] == 'owner':
         p[0] = and_(gertty.db.change_table.c.account_key == gertty.db.account_table.c.key,
                     gertty.db.account_table.c.username == username)
     elif p[2] == 'reviewer':
         filters = []
         filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
         filters.append(gertty.db.approval_table.c.account_key == gertty.db.account_table.c.key)
         filters.append(gertty.db.account_table.c.username == username)
         s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
         p[0] = gertty.db.change_table.c.key.in_(s)
     else:
         raise gertty.search.SearchSyntaxError('Syntax error: has:%s is not supported' % p[2])
コード例 #23
0
ファイル: users.py プロジェクト: nous-consulting/ututi
    def all_watched_subjects(self):
        from ututi.model import Subject
        subjects_table = meta.metadata.tables['subjects']
        umst = meta.metadata.tables['user_monitored_subjects']
        directly_watched_subjects = meta.Session.query(Subject)\
            .join((umst,
                   and_(umst.c.subject_id==subjects_table.c.id,
                        umst.c.subject_id==subjects_table.c.id)))\
            .filter(and_(umst.c.user_id == self.id,
                         umst.c.ignored == False))

        user_ignored_subjects = meta.Session.query(Subject)\
            .join((umst,
                   and_(umst.c.subject_id==subjects_table.c.id,
                        umst.c.subject_id==subjects_table.c.id)))\
            .filter(and_(umst.c.user_id == self.id,
                         umst.c.ignored == True))

        gwst = meta.metadata.tables['group_watched_subjects']
        gmt = meta.metadata.tables['group_members']
        gt = meta.metadata.tables['groups']
        group_watched_subjects = meta.Session.query(Subject)\
            .join((gwst,
                   and_(gwst.c.subject_id==subjects_table.c.id,
                        gwst.c.subject_id==subjects_table.c.id)))\
            .join((gmt, gmt.c.group_id == gwst.c.group_id))\
            .join((gt, gmt.c.group_id == gt.c.id))\
            .filter(gmt.c.user_id == self.id)
        return directly_watched_subjects.union(
            group_watched_subjects.except_(user_ignored_subjects))\
            .order_by(Subject.title.asc())\
            .all()
コード例 #24
0
def find_sccp_speeddial_settings(session):
    query = (session.query(FuncKeyMapping.position.label('fknum'),
                           FuncKeyMapping.label.label('label'),
                           cast(FuncKeyMapping.blf, Integer).label('supervision'),
                           FuncKeyDestCustom.exten.label('exten'),
                           UserFeatures.id.label('user_id'),
                           SCCPDevice.device.label('device'))
             .join(UserFeatures,
                   FuncKeyMapping.template_id == UserFeatures.func_key_private_template_id)
             .join(FuncKeyDestCustom,
                   FuncKeyDestCustom.func_key_id == FuncKeyMapping.func_key_id)
             .join(UserLine,
                   and_(
                       UserLine.user_id == UserFeatures.id,
                       UserLine.main_user == True))
             .join(LineFeatures,
                   UserLine.line_id == LineFeatures.id)
             .join(SCCPLine,
                   and_(
                       LineFeatures.protocol == 'sccp',
                       LineFeatures.protocolid == SCCPLine.id))
             .join(SCCPDevice,
                   SCCPLine.name == SCCPDevice.line)
             .filter(LineFeatures.commented == 0))

    keys = [{'exten': row.exten,
             'fknum': row.fknum,
             'label': row.label,
             'supervision': row.supervision,
             'user_id': row.user_id,
             'device': row.device}
            for row in query]

    return keys
コード例 #25
0
ファイル: excel_helper.py プロジェクト: LamCiuLoeng/budget
def parseExcel(file_path):
    pythoncom.CoInitialize()
    excelObj = DispatchEx('Excel.Application')
    excelObj.Visible = False
    workBook = excelObj.Workbooks.open(file_path)
    excelSheet = workBook.Sheets(1)

    items = DBSession.query(FeeItem).filter(and_(FeeItem.active == 0, FeeItem.import_excel_row != None)).order_by(FeeItem.order)
    teams = DBSession.query(LogicTeam).filter(and_(LogicTeam.active == 0, LogicTeam.import_excel_column != None)).order_by(LogicTeam.order)

    data = []
    y_header = []
    def _f(v):
        try:
            return int(v)
        except:
            return None

    for item in items:
        y_header.append(item.label)
        data.append([(team.id, item.id, _f(excelSheet.Cells(item.import_excel_row , team.import_excel_column).Value))  for team in teams])

    result = {
              'x_header' : [team.label for team in teams],
              'y_header' : y_header,
              'data' : data
              }
    workBook.Close()

    return result
コード例 #26
0
ファイル: venue.py プロジェクト: BenoitTalbot/bungeni-portal
def check_venue_bookings( start, end, venue, sitting=None ):
    """
    return all sittings (but sitting if given) a venue is booked for
    in the period
    """
    assert( type(start) == datetime.datetime )
    assert( type(end) == datetime.datetime ) 
    session = Session()
    b_filter = sql.and_(sql.or_( 
                    sql.between(schema.sittings.c.start_date, start, end), 
                    sql.between(schema.sittings.c.end_date, start, end),
                    sql.between(start, schema.sittings.c.start_date, 
                                schema.sittings.c.end_date),
                    sql.between(end, schema.sittings.c.start_date, 
                                schema.sittings.c.end_date)
                        ),
                    schema.sittings.c.venue_id == venue.venue_id)
    if sitting:
        if sitting.sitting_id:
            b_filter = sql.and_(b_filter,
                        schema.sittings.c.sitting_id != sitting.sitting_id)
    query = session.query(BookedVenue).filter(b_filter)
    venues = query.all()
    #session.close()
    return venues
コード例 #27
0
ファイル: venue.py プロジェクト: BenoitTalbot/bungeni-portal
def get_available_venues( start, end, sitting=None ):
    """get all venues that are not booked for a sitting
    (but sitting if given)
    in the given time period 
    SQL:
    SELECT * 
    FROM venues 
    WHERE venues.venue_id NOT IN (SELECT sitting.venue_id 
        FROM sitting 
        WHERE (sitting.start_date BETWEEN '2000-01-01' AND '2000-01-02' 
            OR sitting.end_date BETWEEN '2000-01-01'  AND '2000-01-02'
            OR '2000-01-01'  BETWEEN sitting.start_date AND sitting.end_date 
            OR '2000-01-02'  BETWEEN sitting.start_date AND sitting.end_date) 
        AND sitting.venue_id IS NOT NULL)
    """
    session = Session()
    query = session.query(domain.Venue)
    b_filter = sql.and_(
        sql.or_( 
            sql.between(schema.sitting.c.start_date, start, end), 
            sql.between(schema.sitting.c.end_date, start, end),
            sql.between(start, schema.sitting.c.start_date, 
                schema.sitting.c.end_date),
            sql.between(end, schema.sitting.c.start_date, 
                schema.sitting.c.end_date)
        ),
        schema.sitting.c.venue_id != None)
    if sitting:
        if sitting.sitting_id:
            b_filter = sql.and_(b_filter,
                schema.sitting.c.sitting_id != sitting.sitting_id)
    query = query.filter(sql.not_(schema.venue.c.venue_id.in_(
                sql.select( [schema.sitting.c.venue_id] ).where(b_filter) )))
    venues = query.all()
    return venues
コード例 #28
0
ファイル: sync.py プロジェクト: c2corg/v6_api
def get_changed_outings_ro_uo(session, last_update):
    """ Returns the outings when associations between outing and route, or
    between outing and user have been created/removed.

    E.g. when an association between outing O1 and route R1 is created,
    outing O1 has to be updated so that all waypoints associated to R1 are
    listed under `associated_waypoints_ids`, and so that R1 is listed under
    `associated_routes_ids`.
    """
    return session. \
        query(
            AssociationLog.child_document_id.label('outing_id'),
            literal(OUTING_TYPE).label('type')). \
        filter(or_(
            and_(
                AssociationLog.parent_document_type == ROUTE_TYPE,
                AssociationLog.child_document_type == OUTING_TYPE
            ),
            and_(
                AssociationLog.parent_document_type == USERPROFILE_TYPE,
                AssociationLog.child_document_type == OUTING_TYPE
            )
        )). \
        filter(AssociationLog.written_at >= last_update). \
        group_by('outing_id', 'type'). \
        all()
コード例 #29
0
ファイル: user.py プロジェクト: ianmackinnon/mango
def get_user_pending_contact_event(orm, user, contact_id):
    Event_v_all = aliased(Event_v)
    Event_v_new = aliased(Event_v)

    query = orm.query(Event_v_all) \
        .outerjoin((
            Event,
            Event.event_id == Event_v_all.event_id
            )) \
        .join((
            event_contact_v,
            and_(
                event_contact_v.c.event_id == Event_v_all.event_id,
                event_contact_v.c.contact_id == contact_id,
                event_contact_v.c.existence == 1,
                )
            )) \
        .filter(
            Event_v_all.moderation_user_id == user.user_id,
            ~exists().where(and_(
                Event_v_new.event_id == Event_v_all.event_id,
                Event_v_new.a_time > Event_v_all.a_time,
            )),
            or_(
                Event.a_time == None,
                Event_v_all.a_time > Event.a_time,
            )
        ) \
        .order_by(Event_v_all.a_time.desc()) \

    return query.all()
コード例 #30
0
 def constructQuery(self, context):
     session= Session()
     trusted=removeSecurityProxy(context)
     parliament_id = self._get_parliament_id(context)
     trusted = removeSecurityProxy(context)
     assigned_committee_ids = []
     if IGroupGroupItemAssignment.providedBy(trusted):
         committee_id = getattr(trusted, "group_id", None)
         if committee_id:
             query = session.query(domain.Committee).filter(
                 sql.and_(
                     domain.Committee.parent_group_id == parliament_id,
                     domain.Committee.group_id == committee_id
                 )
             )
             return query
         else:
             assigned_committee_ids = \
                 [ comm.group_id for comm in trusted.__parent__.values() ]
     else:
         assigned_committee_ids = \
             [ comm.group_id for comm in trusted.values() ]
     query = session.query(domain.Committee).filter(
         sql.and_(
             domain.Committee.status == 'active',
             domain.Committee.parent_group_id == parliament_id,
             sql.not_(
                 domain.Committee.group_id.in_(assigned_committee_ids)
             )
         )
     )
     return query
            if contributor in unresolved_mysteries:
                print " Deleting now-resolved mystery."
                now_resolved = unresolved_mysteries[contributor]
                resolved_mysteries.add(now_resolved)
                del unresolved_mysteries[contributor]
                _db.delete(now_resolved)

_db = production_session()
contribution2 = aliased(Contribution)


# Find Editions where one Contributor is listed both in an 'Unknown' role
# and some other role. Also find editions where one Contributor is listed
# twice in author roles.
unknown_role_or_duplicate_author_role = or_(
    and_(Contribution.role==Contributor.UNKNOWN_ROLE,
         contribution2.role != Contributor.UNKNOWN_ROLE),
    and_(
        Contribution.role.in_(Contributor.AUTHOR_ROLES),
        contribution2.role.in_(Contributor.AUTHOR_ROLES),
    )
)

qu = _db.query(Edition).join(Edition.contributions).join(
    contribution2, contribution2.edition_id==Edition.id).filter(
        contribution2.id != Contribution.id).filter(
            contribution2.contributor_id==Contribution.contributor_id
        ).filter(
            unknown_role_or_duplicate_author_role
        )

print "Fixing %s Editions." % qu.count()
コード例 #32
0
ファイル: openeye.py プロジェクト: tlb-lab/credoscript
    def fetch_all_by_sim_oe(self, smiles, *expr, **kwargs):
        """
        Returns all Chemical Components that match the given SMILES string with at
        least the given similarity threshold using chemical fingerprints.

        Parameters
        ----------
        smi : str
            The query rdmol in SMILES format.
        threshold : float, default=0.5
            The similarity threshold that will be used for searching.
        fp : {'circular','atompair','torsion'}
            RDKit fingerprint type to be used for similarity searching.
        *expr : BinaryExpressions, optional
            SQLAlchemy BinaryExpressions that will be used to filter the query.

        Queried Entities
        ----------------
        ChemComp, ChemCompOEFP

        Returns
        -------
        hits : list
            List of tuples in the form (ChemComp, similarity)

        Examples
        --------

        Requires
        --------
        .. important:: OpenEye cartridge.
        """
        session = Session()

        threshold = kwargs.get('threshold')
        metric = kwargs.get('metric', 'tanimoto')
        fp = kwargs.get('fp', 'circular')
        limit = kwargs.get('limit', 100)

        # set the similarity threshold for the selected metric
        if threshold:
            statement = text(
                "SELECT openeye.set_oefp_similarity_limit(:threshold, :metric)"
            )
            session.execute(
                statement.params(threshold=threshold, metric=metric))

        if fp == 'circular':
            query = func.openeye.make_circular_fp(smiles)
            target = ChemCompOEFP.circular_fp

        elif fp == 'maccs166':
            query = func.openeye.make_maccs166_fp(smiles)
            target = ChemCompOEFP.maccs166_fp

        elif fp == 'path':
            query = func.openeye.make_path_fp(smiles)
            target = ChemCompOEFP.path_fp

        elif fp == 'tree':
            query = func.openeye.make_tree_fp(smiles)
            target = ChemCompOEFP.tree_fp

        else:
            raise ValueError(
                "cannot create fingerprint: type {0} does not exist.".format(
                    fp))

        # compile similarity metric and the correspoding GIST index / KNN-GIST
        if metric == 'tanimoto':
            similarity = func.openeye.tanimoto(query, target)
            index = func.openeye.tanimoto_is_above_limit(target, query)
            orderby = target.op('OPERATOR(openeye.<%%>)')(query)  # escape %

        elif metric == 'dice':
            similarity = func.openeye.dice(query, target)
            index = func.openeye.dice_is_above_limit(target, query)
            orderby = target.op('OPERATOR(openeye.<#>)')(query)

        elif metric == 'manhattan':
            similarity = func.openeye.manhattan(query, target)
            index = func.openeye.manhattan_is_above_limit(target, query)
            orderby = target.op('OPERATOR(openeye.<~>)')(query)

        elif metric == 'cosine':
            similarity = func.openeye.cosine(query, target)
            index = func.openeye.cosine_is_above_limit(target, query)
            orderby = target.op('OPERATOR(openeye.<@>)')(query)

        elif metric == 'euclidean':
            similarity = func.openeye.euclidean(query, target)
            index = func.openeye.euclidean_is_above_limit(target, query)
            orderby = target.op('OPERATOR(openeye.<->)')(query)

        else:
            raise ValueError(
                "{} is not a valid similarity metric.".format(metric))

        query = ChemComp.query.add_column(similarity)
        query = query.join('OEFP').filter(and_(index, *expr))
        query = query.order_by(orderby)

        return query
コード例 #33
0
 def getFilter(date):
     return sql.or_(
         sql.between(date, schema.groups.c.start_date,
                     schema.groups.c.end_date),
         sql.and_(schema.groups.c.start_date <= date,
                  schema.groups.c.end_date == None))
コード例 #34
0
def getAllUser():
    global sess
    try:
        user_info = sess.query(User).all()
        # print "----------- user_info ------------"
        # print user_info
        user_list = []
        for user in user_info:
            user_dict = {}
            user_value = helper.serialize(user)
            user_dict = user_value
            user_group_info = sess.query(User_Relation.level,User_Group.name,User_Group.id).outerjoin(User_Group,(User_Relation.id_group == User_Group.id)).filter(and_(User_Relation.id_user == user_value['id'],User_Relation.inuse == True )).first()
            user_dict['group_name'] = user_group_info.name
            user_dict['group_id'] = user_group_info.id
            user_dict['user_level'] = user_group_info.level
            user_list.append(user_dict)
            # print "----------------------------------------------- user_list -------------------"
            # print user_dict

        print "----------------------------------------------- end_list -------------------"
        print user_list
        # print user_list
        return json_response({"user": user_list}, 200)
    except Exception as e:
        print 'error api report user: ', e
        return json_response({"message": e.message},400)
コード例 #35
0
    def update_moc_cell(self,
                        moc_id,
                        moc_object,
                        block_size=1000,
                        block_pause=1):
        """
        Update the moc_cell database table.

        The entries for the MOC identified by moc_id are updated so that
        they match the entries of the MOC provided as moc_object.

        This method works in an order-by-order manner and attempts
        to determine the most efficient update method for each order.
        If the number of original cells is greater than the number of
        unchanged cells, then all exising entries are deleted and the new
        cells inserted.  Otherwise removed cells are deleted individually
        and newly added cells inserted.

        For debugging purposes, this method returns a dictionary indicating
        the action taken for each order.
        """

        debug_info = {}

        moc_existing = self._get_moc_from_cell(moc_id)

        for order in range(0, max(moc_existing.order, moc_object.order) + 1):
            # MOC object gives us a frozenset object of cells for the order.
            # We want to work in terms of these sets in order to have the
            # database updated to exactly the same representation.  If we
            # tried taking the intersection of MOCs directly, for example,
            # then the MOC object may alter the representation, such as by
            # split cells.
            existing = moc_existing[order]
            replacement = moc_object[order]

            bulk_delete = False
            delete = None
            insert = None

            # Determine what to do: simpler cases first.
            if not replacement:
                if not existing:
                    # No cells in either MOC: nothing to do.
                    pass

                else:
                    # No cells in the replacement moc: delete existing cells.
                    debug_info[order] = 'delete'
                    bulk_delete = True

            elif not existing:
                # No exising cells present: just insert everything.
                debug_info[order] = 'insert'
                insert = replacement

            else:
                # There are existing and replacement cells: determine best
                # strategy.
                intersection = existing.intersection(replacement)

                if len(existing) > len(intersection):
                    # Bulk deletion seems most efficient.
                    debug_info[order] = 'bulk'
                    bulk_delete = True
                    insert = replacement
                else:
                    # Compute sets of individual deletions and replacements.
                    delete = existing.difference(intersection)
                    insert = replacement.difference(intersection)

                    if (not delete) and (not insert):
                        # No cells changed: nothing to do.
                        debug_info[order] = 'unchanged'
                        continue

                    debug_info[order] = 'individual'

            # Now go ahead and perform update actions.
            if bulk_delete:
                with self._transaction() as conn:
                    conn.execute(moc_cell.delete().where(
                        and_(moc_cell.c.moc_id == moc_id,
                             moc_cell.c.order == order)))

            if delete is not None:
                for delete_block in list_in_blocks(delete, block_size):
                    sleep(block_pause)
                    with self._transaction() as conn:
                        for cell in delete_block:
                            conn.execute(moc_cell.delete().where(
                                and_(moc_cell.c.moc_id == moc_id,
                                     moc_cell.c.order == order,
                                     moc_cell.c.cell == cell)))

            if insert is not None:
                for insert_block in list_in_blocks(insert, block_size):
                    sleep(block_pause)
                    with self._transaction() as conn:
                        for cell in insert_block:
                            conn.execute(moc_cell.insert().values({
                                moc_cell.c.moc_id:
                                moc_id,
                                moc_cell.c.order:
                                order,
                                moc_cell.c.cell:
                                cell,
                            }))

        return debug_info
コード例 #36
0
    def aggregate(self, measures=['amount'], drilldowns=[], cuts=[],
                  page=1, pagesize=10000, order=[]):
        """ Query the dataset for a subset of cells based on cuts and
        drilldowns. It returns a structure with a list of drilldown items
        and a summary about the slice cutted by the query.

        ``measures``
            The numeric units to be aggregated over, defaults to
            [``amount``]. (type: `list`)
        ``drilldowns``
            Dimensions to drill down to. (type: `list`)
        ``cuts``
            Specification what to cut from the cube. This is a
            `list` of `two-tuples` where the first item is the dimension
            and the second item is the value to cut from. It is turned into
            a query where multible cuts for the same dimension are combined
            to an *OR* query and then the queries for the different
            dimensions are combined to an *AND* query.
        ``page``
            Page the drilldown result and return page number *page*.
            type: `int`
        ``pagesize``
            Page the drilldown result into page of size *pagesize*.
            type: `int`
        ``order``
            Sort the result based on the dimension *sort_dimension*.
            This may be `None` (*default*) or a `list` of two-`tuples`
            where the first element is the *dimension* and the second
            element is the order (`False` for ascending, `True` for
            descending).
            Type: `list` of two-`tuples`.

        Raises:

        :exc:`ValueError`
            If a cube is not yet computed. Call :meth:`compute` to compute
            the cube.
        :exc:`KeyError`
            If a drilldown, cut or order dimension is not part of this
            cube or the order dimensions are not a subset of the drilldown
            dimensions.

        Returns: A `dict` containing the drilldown and the summary:

          {"drilldown": [
              {"num_entries": 5545,
               "amount": 41087379002.0,
               "cofog1": {"description": "",
                          "label": "Economic affairs"}},
              ... ]
           "summary": {"amount": 7353306450299.0,
                       "num_entries": 133612}}

        """

        # Get the joins (aka alias) and the dataset
        joins = alias = self.alias
        dataset = self

        # Aggregation fields are all of the measures, so we create individual
        # summary fields with the sum function of SQLAlchemy
        fields = [func.sum(alias.c[m]).label(m) for m in measures]
        # We append an aggregation field that counts the number of entries
        fields.append(func.count(alias.c.id).label("entries"))
        # Create a copy of the statistics fields (for later)
        stats_fields = list(fields)

        # Create label map for time columns (year and month) for lookup
        # since they are found under the time attribute
        labels = {
            'year': dataset['time']['year'].column_alias.label('year'),
            'month': dataset['time']['yearmonth'].column_alias.label('month'),
        }

        # Get the dimensions we're interested in. These would be the drilldowns
        # and the cuts. For compound dimensions we are only interested in the
        # most significant one (e.g. for from.name we're interested in from)
        dimensions = drilldowns + [k for k, v in cuts]
        dimensions = [d.split('.')[0] for d in dimensions]

        # Loop over the dimensions as a set (to avoid multiple occurances)
        for dimension in set(dimensions):
            # If the dimension is year or month we're interested in 'time'
            if dimension in labels:
                dimension = 'time'
            # If the dimension table isn't in the automatic joins we add it
            if dimension not in [c.table.name for c in joins.columns]:
                joins = dataset[dimension].join(joins)

        # Drilldowns are performed using group_by SQL functions
        group_by = []
        for key in drilldowns:
            # If drilldown is in labels we append its mapped column to fields
            if key in labels:
                column = labels[key]
                group_by.append(column)
                fields.append(column)
            else:
                # Get the column from the dataset
                column = dataset.key(key)
                # If the drilldown is a compound dimension or the columns table
                # is in the joins we're already fetching the column so we just
                # append it to fields and the group_by
                if '.' in key or column.table == alias:
                    fields.append(column)
                    group_by.append(column)
                else:
                    # If not we add the column table to the fields and add all
                    # of that tables columns to the group_by
                    fields.append(column.table)
                    for col in column.table.columns:
                        group_by.append(col)

        # Cuts are managed using AND statements and we use a dict with set as
        # the default value to create the filters (cut on various values)
        conditions = and_()
        filters = defaultdict(set)

        for key, value in cuts:
            # If the key is in labels (year or month) we get the mapped column
            # else we get the column from the dataset
            if key in labels:
                column = labels[key]
            else:
                column = dataset.key(key)
            # We add the value to the set for that particular column
            filters[column].add(value)

        # Loop over the columns in the filter and add that to the conditions
        # For every value in the set we create and OR statement so we get e.g.
        # year=2007 AND (from.who == 'me' OR from.who == 'you')
        for attr, values in filters.items():
            conditions.append(or_(*[attr == v for v in values]))

        # Ordering can be set by a parameter or ordered by measures by default
        order_by = []
        # If no order is defined we default to order of the measures in the
        # order they occur (furthest to the left is most significant)
        if order is None or not len(order):
            order = [(m, True) for m in measures]

        # We loop through the order list to add the columns themselves
        for key, direction in order:
            # If it's a part of the measures we have to order by the
            # aggregated values (the sum of the measure)
            if key in measures:
                column = func.sum(alias.c[key]).label(key)
            # If it's in the labels we have to get the mapped column
            elif key in labels:
                column = labels[key]
            # ...if not we just get the column from the dataset
            else:
                column = dataset.key(key)
            # We append the column and set the direction (True == descending)
            order_by.append(column.desc() if direction else column.asc())

        # query 1: get overall sums.
        # Here we use the stats_field we saved earlier
        query = select(stats_fields, conditions, joins)
        rp = dataset.bind.execute(query)
        # Execute the query and turn them to a list so we can pop the
        # entry count and then zip the measurements and the totals together
        stats = list(rp.fetchone())
        num_entries = stats.pop()
        total = zip(measures, stats)

        # query 2: get total count of drilldowns
        if len(group_by):
            # Select 1 for each group in the group_by and count them
            query = select(['1'], conditions, joins, group_by=group_by)
            query = select([func.count('1')], '1=1', query.alias('q'))
            rp = dataset.bind.execute(query)
            num_drilldowns, = rp.fetchone()
        else:
            # If there are no drilldowns we still have to do one
            num_drilldowns = 1

        # The drilldown result list
        drilldown = []
        # The offset in the db, based on the page and pagesize (we have to
        # modify it since page counts starts from 1 but we count from 0
        offset = int((page - 1) * pagesize)

        # query 3: get the actual data
        query = select(fields, conditions, joins, order_by=order_by,
                       group_by=group_by, use_labels=True,
                       limit=pagesize, offset=offset)
        rp = dataset.bind.execute(query)

        while True:
            # Get each row in the db result and append it, decoded, to the
            # drilldown result. The decoded version is a json represenation
            row = rp.fetchone()
            if row is None:
                break
            result = decode_row(row, dataset)
            drilldown.append(result)

        # Create the summary based on the stats_fields and other things
        # First we add a the total for each measurement in the root of the
        # summary (watch out!) and then we add various other, self-explanatory
        # statistics such as page, number of entries. The currency value is
        # strange since it's redundant for multiple measures but is left as is
        # for backwards compatibility
        summary = {key: value for (key, value) in total}
        summary.update({
            'num_entries': num_entries,
            'currency': {m: dataset.currency for m in measures},
            'num_drilldowns': num_drilldowns,
            'page': page,
            'pages': int(math.ceil(num_drilldowns / float(pagesize))),
            'pagesize': pagesize
        })

        return {'drilldown': drilldown, 'summary': summary}
コード例 #37
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_sip_trunk_settings(session):
    rows = session.query(UserSIP).filter(
        and_(UserSIP.category == 'trunk', UserSIP.commented == 0)).all()

    return [row.todict() for row in rows]
コード例 #38
0
    def statement(cls, bag, criteria):
        """ Create a statement from criteria
        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: sqlalchemy.sql.elements.BooleanClauseList
        """
        # Assuming a dict of { column: value } and { column: { $op: value } }

        conditions = []
        for col_name, criteria in criteria.items():
            # Boolean expressions?
            if col_name in {'$or', '$and', '$nor'}:
                assert isinstance(
                    criteria, (list, tuple)
                ), 'Criteria: {} argument must be a list'.format(col_name)
                if len(criteria) == 0:
                    continue  # skip empty

                criteria = map(lambda s: cls.statement(bag, s),
                               criteria)  # now a list of expressions
                if col_name == '$or':
                    cc = or_(*criteria)
                elif col_name == '$and':
                    cc = and_(*criteria)
                elif col_name == '$nor':
                    cc = or_(*criteria)
                    conditions.append(
                        ~cc.self_group() if len(criteria) > 1 else ~cc)
                    continue
                else:
                    raise KeyError('Unknown operator ' + col_name)

                conditions.append(cc.self_group() if len(criteria) > 1 else cc)
                continue
            elif col_name == '$not':
                assert isinstance(
                    criteria, dict), 'Criteria: $not argument must be a dict'
                criteria = cls.statement(bag, criteria)
                conditions.append(not_(criteria))
                continue

            # Prepare
            col = bag.columns[col_name]
            col_array = bag.columns.is_column_array(col_name)
            col_json = bag.columns.is_column_json(col_name)

            # Fake equality
            if not isinstance(criteria, dict):
                criteria = {
                    '$eq': criteria
                }  # fake the missing equality operator for simplicity

            # Iterate over operators
            for op, value in criteria.items():
                value_array = isinstance(value, (list, tuple))

                # Coerce operand
                if col_array and value_array:
                    value = cast(pg.array(value), pg.ARRAY(col.type.item_type))
                if col_json:
                    coerce_type = col.type.coerce_compared_value(
                        '=', value)  # HACKY: use sqlalchemy type coercion
                    col = cast(col.astext, coerce_type)

                # Operators
                if op == '$eq':
                    if col_array:
                        if value_array:
                            conditions.append(col == value)  # Array equality
                        else:
                            conditions.append(
                                col.any(value)
                            )  # ANY(array) = value, for scalar values
                    else:
                        conditions.append(
                            col == value)  # array == value, for both scalars
                elif op == '$ne':
                    if col_array and not value_array:
                        if value_array:
                            conditions.append(col != value)  # Array inequality
                        else:
                            conditions.append(
                                col.all(value, operators.ne)
                            )  # ALL(array) != value, for scalar values
                    else:
                        conditions.append(
                            col != value)  # array != value, for both scalars
                elif op == '$lt':
                    conditions.append(col < value)
                elif op == '$lte':
                    conditions.append(col <= value)
                elif op == '$gte':
                    conditions.append(col >= value)
                elif op == '$gt':
                    conditions.append(col > value)
                elif op == '$in':
                    assert value_array, 'Criteria: $in argument must be a list'
                    if col_array:
                        conditions.append(
                            col.overlap(value))  # field && ARRAY[values]
                    else:
                        conditions.append(col.in_(value))  # field IN(values)
                elif op == '$nin':
                    assert value_array, 'Criteria: $nin argument must be a list'
                    if col_array:
                        conditions.append(~col.overlap(value)
                                          )  # NOT( field && ARRAY[values] )
                    else:
                        conditions.append(
                            col.notin_(value))  # field NOT IN(values)
                elif op == '$exists':
                    if value:
                        conditions.append(col != None)  # IS NOT NULL
                    else:
                        conditions.append(col == None)  # IS NULL
                elif op == '$all':
                    assert col_array, 'Criteria: $all can only be applied to an array column'
                    assert value_array, 'Criteria: $all argument must be a list'
                    conditions.append(col.contains(value))
                elif op == '$size':
                    assert col_array, 'Criteria: $all can only be applied to an array column'
                    if value == 0:
                        conditions.append(func.array_length(
                            col, 1) == None)  # ARRAY_LENGTH(field, 1) IS NULL
                    else:
                        conditions.append(func.array_length(
                            col,
                            1) == value)  # ARRAY_LENGTH(field, 1) == value
                else:
                    raise AssertionError(
                        'Criteria: unsupported operator "{}"'.format(op))
        if conditions:
            cc = and_(*conditions)
            return cc.self_group() if len(conditions) > 1 else cc
        else:
            return True
コード例 #39
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_queue_general_settings(session):
    rows = session.query(StaticQueue).filter(
        and_(StaticQueue.commented == 0,
             StaticQueue.category == 'general')).all()

    return [row.todict() for row in rows]
コード例 #40
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_iax_trunk_settings(session):
    rows = session.query(UserIAX).filter(
        and_(UserIAX.commented == 0, UserIAX.category == 'trunk')).all()

    return [row.todict() for row in rows]
コード例 #41
0
ファイル: sync.py プロジェクト: millerf/v6_api
def get_changed_routes_and_outings_ww(session, last_update):
    """ Returns the routes and outings when associations between waypoint
    and waypoint have been created/removed.
    E.g. when an association between waypoint W1 and W2 is created,
    all routes associated to W2, all routes associated to the direct
    children of W2 and all outings associated to these routes have to be
    updated.

    For example given the following associations:
    W1 -> W2, W2 -> W3, W3 -> R1
    Route R1 has the following `associated_waypoint_ids`: W3, W2, W1

    When association W1 -> W2 is deleted, all routes linked to W2 and all
    routes linked to the direct waypoint children of W2 (in this case W3) have
    to be updated.
    After the update, `associated_waypoint_ids` of R1 is: W3, W2
    """
    select_changed_waypoints = session. \
        query(AssociationLog.child_document_id.label('waypoint_id')). \
        filter(and_(
            AssociationLog.parent_document_type == WAYPOINT_TYPE,
            AssociationLog.child_document_type == WAYPOINT_TYPE
        )). \
        filter(AssociationLog.written_at >= last_update). \
        cte('changed_waypoints')
    select_changed_waypoint_children = session. \
        query(Association.child_document_id.label('waypoint_id')). \
        select_from(select_changed_waypoints). \
        join(
            Association,
            and_(
                Association.parent_document_id ==
                select_changed_waypoints.c.waypoint_id,
                Association.child_document_type == WAYPOINT_TYPE
            )). \
        cte('changed_waypoint_children')

    select_all_changed_waypoints = union(
        select_changed_waypoints.select(),
        select_changed_waypoint_children.select()). \
        cte('all_changed_waypoints')

    select_changed_routes = session. \
        query(
            Association.child_document_id.label('route_id')
            ). \
        select_from(select_all_changed_waypoints). \
        join(
            Association,
            and_(
                Association.parent_document_id ==
                select_all_changed_waypoints.c.waypoint_id,
                Association.child_document_type == ROUTE_TYPE
            )). \
        group_by(Association.child_document_id). \
        cte('changed_routes')

    select_changed_outings = session. \
        query(
            Association.child_document_id.label('outing_id')). \
        select_from(select_changed_routes). \
        join(
            Association,
            and_(
                Association.parent_document_id ==
                select_changed_routes.c.route_id,
                Association.child_document_type == OUTING_TYPE
            )). \
        group_by(Association.child_document_id). \
        cte('changed_outings')

    select_changed_routes_and_outings = union(
        session.query(
            select_changed_routes.c.route_id.label('document_id'),
            literal(ROUTE_TYPE).label('type')
        ).select_from(select_changed_routes),
        session.query(
            select_changed_outings.c.outing_id.label('document_id'),
            literal(OUTING_TYPE).label('type')
        ).select_from(select_changed_outings)). \
        cte('changed_routes_and_outings')

    return session. \
        query(
            select_changed_routes_and_outings.c.document_id,
            select_changed_routes_and_outings.c.type). \
        select_from(select_changed_routes_and_outings). \
        all()
コード例 #42
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_general_features_settings(session):
    rows = session.query(Features).filter(
        and_(Features.commented == 0, Features.category == 'general')).all()

    return [row.todict() for row in rows]
コード例 #43
0
    def validate_records(self, session, cohort):
        """
        Fetches the wiki_user(s) already added for self.cohort_id and validates
        their raw_id_or_name field against their stated project as either a user_id
        or user_name.  Once done, sets the valid state and deletes any duplicates.
        Then, it finishes filling in the data model by inserting corresponding
        records into the cohort_wiki_users table.

        This is meant to execute asynchronously on celery

        Parameters
            session : an active wikimetrics db session to use
            cohort  : the cohort to validate; must belong to session
        """
        # reset the cohort validation status so it can't be used for reports
        cohort.validated = False
        session.execute(WikiUserStore.__table__.update().values(
            valid=None).where(WikiUserStore.validating_cohort == cohort.id))
        session.execute(CohortWikiUserStore.__table__.delete().where(
            CohortWikiUserStore.cohort_id == cohort.id))
        session.commit()

        wikiusers = session.query(WikiUserStore) \
            .filter(WikiUserStore.validating_cohort == cohort.id) \
            .all()

        deduplicated = deduplicate_by_key(
            wikiusers, lambda r:
            (r.raw_id_or_name, normalize_project(r.project) or r.project))

        wikiusers_by_project = {}
        for wu in deduplicated:
            normalized_project = normalize_project(wu.project)
            if normalized_project is None:
                wu.reason_invalid = 'invalid project: {0}'.format(wu.project)
                wu.valid = False
                continue

            wu.project = normalized_project
            if wu.project not in wikiusers_by_project:
                wikiusers_by_project[wu.project] = []
            wikiusers_by_project[wu.project].append(wu)

            # validate bunches of records to update the UI but not kill performance
            if len(wikiusers_by_project[wu.project]) > 999:
                validate_users(wikiusers_by_project[wu.project], wu.project,
                               self.validate_as_user_ids)
                session.commit()
                wikiusers_by_project[wu.project] = []

        # validate anything that wasn't big enough for a batch
        for project, wikiusers in wikiusers_by_project.iteritems():
            if len(wikiusers) > 0:
                validate_users(wikiusers, project, self.validate_as_user_ids)
        session.commit()

        session.execute(CohortWikiUserStore.__table__.insert(),
                        [{
                            'cohort_id': cohort.id,
                            'wiki_user_id': wu.id,
                        } for wu in deduplicated])

        # clean up any duplicate wiki_user records
        session.execute(WikiUserStore.__table__.delete().where(
            and_(WikiUserStore.validating_cohort == cohort.id,
                 WikiUserStore.id.notin_([wu.id for wu in deduplicated]))))
        cohort.validated = True
        session.commit()
コード例 #44
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_exten_xivofeatures_setting(session):
    rows = (session.query(Extension).filter(
        and_(Extension.context == 'xivo-features',
             Extension.commented == 0)).order_by('exten').all())

    return [row.todict() for row in rows]
コード例 #45
0
ファイル: models.py プロジェクト: rdarley/MaxBot
 def combine_filter(self, filter_value):
     return expression.and_(filter_value[0])
コード例 #46
0
    def restrict_to_ready_deliverable_works(
        cls, query, work_model, edition_model=None, collection_ids=None,
        show_suppressed=False, allow_holds=True,
    ):
        """Restrict a query to show only presentation-ready works present in
        an appropriate collection which the default client can
        fulfill.

        Note that this assumes the query has an active join against
        LicensePool.

        :param query: The query to restrict.

        :param work_model: Either Work or MaterializedWorkWithGenre

        :param edition_model: Either Edition or MaterializedWorkWithGenre

        :param show_suppressed: Include titles that have nothing but
        suppressed LicensePools.

        :param collection_ids: Only include titles in the given
        collections.

        :param allow_holds: If false, pools with no available copies
        will be hidden.
        """
        edition_model = edition_model or work_model

        # Only find presentation-ready works.
        #
        # Such works are automatically filtered out of
        # the materialized view, but we need to filter them out of Work.
        if work_model == Work:
            query = query.filter(
                work_model.presentation_ready == True,
            )

        # Only find books that have some kind of DeliveryMechanism.
        LPDM = LicensePoolDeliveryMechanism
        exists_clause = exists().where(
            and_(LicensePool.data_source_id==LPDM.data_source_id,
                LicensePool.identifier_id==LPDM.identifier_id)
        )
        query = query.filter(exists_clause)

        # Some sources of audiobooks may be excluded because the
        # server can't fulfill them or the expected client can't play
        # them.
        _db = query.session
        excluded = ConfigurationSetting.excluded_audio_data_sources(_db)
        if excluded:
            audio_excluded_ids = [
                DataSource.lookup(_db, x).id for x in excluded
            ]
            query = query.filter(
                or_(edition_model.medium != EditionConstants.AUDIO_MEDIUM,
                    ~LicensePool.data_source_id.in_(audio_excluded_ids))
            )

        # Only find books with unsuppressed LicensePools.
        if not show_suppressed:
            query = query.filter(LicensePool.suppressed==False)

        # Only find books with available licenses.
        query = query.filter(
                or_(LicensePool.licenses_owned > 0, LicensePool.open_access)
        )

        # Only find books in an appropriate collection.
        if collection_ids is not None:
            query = query.filter(
                LicensePool.collection_id.in_(collection_ids)
            )

        # If we don't allow holds, hide any books with no available copies.
        if not allow_holds:
            query = query.filter(
                or_(LicensePool.licenses_available > 0, LicensePool.open_access)
            )
        return query
コード例 #47
0
  def updateFileStatus(self, fileStatusDict, ftsGUID=None):
    """Update the file ftsStatus and error
        The update is only done if the file is not in a final state
        (To avoid bringing back to life a file by consuming MQ a posteriori)



        TODO: maybe it should query first the status and filter the rows I want to update !

       :param fileStatusDict : { fileID : { status , error, ftsGUID } }
       :param ftsGUID: If specified, only update the rows where the ftsGUID matches this value.
                       This avoids two jobs handling the same file one after another to step on each other foot.
                       Note that for the moment it is an optional parameter, but it may turn mandatory soon.

    """

    # This here is inneficient as we update every files, even if it did not change, and we commit every time.
    # It would probably be best to update only the files that changed.
    # However, commiting every time is the recommendation of MySQL
    # (https://dev.mysql.com/doc/refman/5.7/en/innodb-deadlocks-handling.html)

    for fileID, valueDict in fileStatusDict.iteritems():

      session = self.dbSession()
      try:

        updateDict = {FTS3File.status: valueDict['status']}

        # We only update error if it is specified
        if 'error' in valueDict:
          newError = valueDict['error']
          # Replace empty string with None
          if not newError:
            newError = None
          updateDict[FTS3File.error] = newError

        # We only update ftsGUID if it is specified
        if 'ftsGUID' in valueDict:
          newFtsGUID = valueDict['ftsGUID']
          # Replace empty string with None
          if not newFtsGUID:
            newFtsGUID = None
          updateDict[FTS3File.ftsGUID] = newFtsGUID

        # We only update the lines matching:
        # * the good fileID
        # * the status is not Final

        whereConditions = [FTS3File.fileID == fileID,
                           ~ FTS3File.status.in_(FTS3File.FINAL_STATES)]

        # If an ftsGUID is specified, add it to the `where` condition
        if ftsGUID:
          whereConditions.append(FTS3File.ftsGUID == ftsGUID)

        updateQuery = update(FTS3File)\
            .where(and_(*whereConditions)
                   )\
            .values(updateDict)

        session.execute(updateQuery)

        session.commit()

      except SQLAlchemyError as e:
        session.rollback()
        self.log.exception("updateFileFtsStatus: unexpected exception", lException=e)
        return S_ERROR("updateFileFtsStatus: unexpected exception %s" % e)
      finally:
        session.close()

    return S_OK()
コード例 #48
0
ファイル: __init__.py プロジェクト: onefinestay/taal
    def suggest_translation(self, translatable, from_language, to_language):
        """Suggest a translation for a translatable, into `to_language`, given
        an existing translation in `from_language` based on other occurances
        of the same context being translated into a matching value for other
        message ids

        given the following translations table
            +----------------------------+------------+----------+----------+
            | context                    | message_id | language | value    |
            +----------------------------+------------+----------+----------+
            | taal:sa_field:model.column | [1]        | en       | Value    |
            | taal:sa_field:model.column | [1]        | fr       | Valeur   |
            | taal:sa_field:model.column | [2]        | en       | Value    |
            +----------------------------+------------+----------+----------+

        suggest_translation(
            TranslatableString(
                context="taal:sa_field:model.column",
                message_id=2),
            from_language='en',
            to_language='fr'
        )

        returns 'Valeur'

        If multiple suggestions are possible, the most frequently occuring one
        is returned
        """
        session = self.session
        model = self.model

        from_value = session.query(model.value).filter(
            model.context == translatable.context,
            model.message_id == translatable.message_id,
            model.language == from_language,
        ).scalar()

        if from_value is None:
            return None

        from_alias = aliased(model, name="from_language")
        to_alias = aliased(model, name="to_language")
        query = session.query(to_alias.value).outerjoin(
            from_alias,
            and_(
                from_alias.context == to_alias.context,
                from_alias.message_id == to_alias.message_id,
            )
        ).filter(
            from_alias.context == translatable.context,
            from_alias.language == from_language,
            from_alias.value == from_value,
            to_alias.language == to_language,
            to_alias.value != NULL,
        ).group_by(
            to_alias.value
        ).order_by(
            desc(func.count())
        )

        return query.limit(1).scalar()
    def __call__(self, user_ids, session):
        """
        Parameters:
            user_ids    : list of mediawiki user ids to restrict computation to
            session     : sqlalchemy session open on a mediawiki database

        Returns:
            {
                user id: 1 if they're a rolling surviving new active editor, 0 otherwise
                for all cohort users, or all users that have edits in the time period
            }
        """
        number_of_edits = int(self.number_of_edits.data)
        rolling_days = int(self.rolling_days.data)
        end_date = self.end_date.data
        mid_date = end_date - timedelta(days=rolling_days)
        start_date = end_date - timedelta(days=rolling_days * 2)

        newly_registered = session.query(Logging.log_user) \
            .filter(Logging.log_type == 'newusers') \
            .filter(Logging.log_action == 'create') \
            .filter(between(Logging.log_timestamp, start_date, mid_date))

        # subquery to select only the users registered between start and mid date
        filtered_new = self.filter(newly_registered,
                                   user_ids,
                                   column=Logging.log_user).subquery()

        rev_user = label('user_id', Revision.rev_user)
        ar_user = label('user_id', Archive.ar_user)
        # count edits between start and mid date, for Revision
        rev_count_one = _get_count('count_one',
                                   Revision.rev_timestamp <= mid_date)
        # count edits between start and mid date, for Archive
        ar_count_one = _get_count('count_one',
                                  Archive.ar_timestamp <= mid_date)
        # count edits between mid and end date, for Revision
        rev_count_two = _get_count('count_two',
                                   Revision.rev_timestamp > mid_date)
        # count edits between mid and end date, for Archive
        ar_count_two = _get_count('count_two', Archive.ar_timestamp > mid_date)

        # get both counts by user for Revision
        revisions = session.query(rev_user, rev_count_one, rev_count_two)\
            .filter(between(Revision.rev_timestamp, start_date, end_date))\
            .filter(Revision.rev_user.in_(filtered_new))\
            .group_by(Revision.rev_user)

        # get both counts by user for Archive
        archived = session.query(ar_user, ar_count_one, ar_count_two)\
            .filter(between(Archive.ar_timestamp, start_date, end_date))\
            .filter(Archive.ar_user.in_(filtered_new))\
            .group_by(Archive.ar_user)

        bot_user_ids = session.query(MediawikiUserGroups.ug_user)\
            .filter(MediawikiUserGroups.ug_group == 'bot')\
            .subquery()

        # For each user, with both counts from both tables,
        #   sum the count_one values together, check it's >= number_of_edits
        #   sum the count_two values together, check it's >= number_of_edits
        new_edits = revisions.union_all(archived).subquery()
        new_edits_by_user = session.query(new_edits.c.user_id)\
            .filter(new_edits.c.user_id.notin_(bot_user_ids))\
            .group_by(new_edits.c.user_id)\
            .having(and_(
                func.SUM(new_edits.c.count_one) >= number_of_edits,
                func.SUM(new_edits.c.count_two) >= number_of_edits,
            ))

        metric_results = {r[0]: {self.id: 1} for r in new_edits_by_user.all()}

        if user_ids is None:
            return metric_results
        else:
            return {
                uid: metric_results.get(uid, self.default_result)
                for uid in user_ids
            }
コード例 #50
0
ファイル: meals.py プロジェクト: madjar/re-eat
 def _meals_in_range(self):
     return (Session.query(Meal).options(joinedload('recipes')).filter(
         and_(self.fro <= Meal.date, Meal.date < self.to)).all())
コード例 #51
0
        result = []
        lim = tg.config.get(constants.TaskPaneLimit)
        ago = datetime.utcnow() + timedelta(days=-long(lim))

        limit = 200
        try:
            limit = int(tg.config.get(constants.task_panel_row_limit, "200"))
        except Exception, e:
            print "Exception: ", e

        LOGGER.debug("get_tasks query start : " + to_str(datetime.utcnow()))
        tasks=DBSession.query(Task.task_id, Task.name, Task.user_name, Task.entity_name, Task.cancellable, \
                                TaskResult.timestamp, TaskResult.endtime, TaskResult.status, TaskResult.results, \
                                Task.entity_type, Task.short_desc).\
             join(TaskResult).\
             filter(or_(and_(Task.repeating == True,TaskResult.visible == True),\
             and_(Task.repeating == False,Task.entity_id != None))).\
             filter(Task.submitted_on >= ago).\
             order_by(TaskResult.timestamp.desc()).limit(limit).all()
        LOGGER.debug("get_tasks query end   : " + to_str(datetime.utcnow()))
        result = self.format_task_result_details(tasks)
        return result

    def get_task_details(self, task_ids):
        result = []
        LOGGER.debug("get_task_details query start : " +
                     to_str(datetime.utcnow()))
        task=DBSession.query(Task).filter(Task.task_id.in_(task_ids)).\
                                options(eagerload("result")).all()
        LOGGER.debug("get_task_details query end   : " +
                     to_str(datetime.utcnow()))
コード例 #52
0
    def test_view_other_comps(self, mock_get_stream, mock_get_inv, db):

        components_count = {}  # group_id -> num expected components
        # where group_id is the tuple (event_id, channel.location)
        for sess in db.session.query(Segment):
            group_id = (sess.event_id, sess.channel.location)
            if group_id not in components_count:
                # we should have created views also for the other components. To get
                # other components, use the segment channel and event id
                other_comps_count = db.session.query(Segment).join(Segment.channel).\
                    filter(and_(Segment.event_id == sess.event_id,
                                Channel.location == sess.channel.location)).count()

                components_count[group_id] = other_comps_count

        def assert_(plotlist, segment, preprocessed, is_invalidated=False):
            '''does some assertion
            :preprocessed: if the plotlist refers toa  preprocessed segment
            :is_invalidated: if the plotlist has been invalidated. Valid only
            if preprocessed=False (otherwise plotlist should be None)
            This is used for checking that plotlist.data['sn_windows'] is None
            '''
            # raw:
            # gap: stream: ok, sn_spectra exc, sn_windows: none
            # err: stream exc, sn_spectra: exc, sn_windows: none
            # other: stream ok, sn_spectra:ok, sn_windows: non-none
            # preprocessed
            iserr = 'err' in segment.channel.location
            hasgaps = 'gap' in segment.channel.location
            isinverr = isinstance(plotlist.data['stream'], Exception) and \
                "inventory" in str(plotlist.data['stream'])
            if preprocessed:
                if iserr or hasgaps or isinverr:
                    assert len("".join(plotlist[0].warnings))
                    assert isinstance(plotlist.data['stream'], Exception)
                    # if stream has an exception, as we use the stream for the sn_windows, assert
                    # the exception is the same:
                    assert plotlist.data['sn_windows'] == plotlist.data[
                        'stream']
                    if plotlist[1] is not None:
                        assert len("".join(plotlist[1].warnings))
                else:
                    assert not len("".join(plotlist[0].warnings))
                    assert isinstance(plotlist.data['stream'], Stream)
                    # assert sn_windows are correct:
                    sn_wdw = plotlist.data['sn_windows']
                    assert len(sn_wdw) == 2
                    assert all(
                        isinstance(_, UTCDateTime)
                        for _ in list(sn_wdw[0]) + list(sn_wdw[1]))
                    if plotlist[1] is not None:
                        assert not len("".join(plotlist[1].warnings))
            else:
                # test sn_windows first:
                if is_invalidated:
                    assert plotlist.data[
                        'sn_windows'] is None  # reset from invalidation
                    assert all(p is None for p in plotlist)
                    return
                elif iserr:
                    assert isinstance(plotlist.data['sn_windows'], Exception)
                    # assert also that it is the same exception raised from stream:
                    assert plotlist.data['stream'] == plotlist.data[
                        'sn_windows']
                elif 'gap' in segment.channel.location:
                    # sn_windows should raise, as we have more than one trace:
                    assert isinstance(plotlist.data['sn_windows'], Exception)
                    assert "gap" in str(plotlist.data['sn_windows'])
                else:  # good segment
                    # if the stream is unprocessed, and it was successfully loaded, assert
                    # sn_windows are correct:
                    sn_wdw = plotlist.data['sn_windows']
                    assert len(sn_wdw) == 2
                    assert all(
                        isinstance(_, UTCDateTime)
                        for _ in list(sn_wdw[0]) + list(sn_wdw[1]))
                # test other stuff:
                if iserr:
                    assert len("".join(plotlist[0].warnings))
                    assert isinstance(plotlist.data['stream'], Exception)
                    if plotlist[1] is not None:
                        assert len("".join(plotlist[1].warnings))
                else:
                    assert isinstance(plotlist.data['stream'], Stream)
                    if "gap_unmerged" in segment.channel.location:
                        # assert that traces labels (d[-1]) are displayed with their seed_id.
                        # To prove that,
                        # assert that we didn't named each trace as "chunk1", "chunk2" etcetera:
                        assert all("chunk" not in d[-1]
                                   for d in plotlist[0].data)
                    elif hasgaps:
                        assert "gaps/overlaps" in "".join(plotlist[0].warnings)
                        # assert that we display all traces with "chunk1", "cunk2" etcetera:
                        assert all("chunk" in d[-1] for d in plotlist[0].data)
                    else:
                        assert not len("".join(plotlist[0].warnings))
                    if plotlist[1] is not None:
                        if hasgaps:
                            assert len("".join(plotlist[1].warnings))
                        else:
                            assert not len("".join(plotlist[1].warnings))

        for sess in db.session.query(Segment):

            pmg = PlotManager(self.pymodule, self.config)

            expected_components_count = components_count[(
                sess.event_id, sess.channel.location)]

            mock_get_stream.reset_mock()
            mock_get_inv.reset_mock()
            mock_get_stream.side_effect = original_get_stream
            mock_get_inv.side_effect = original_get_inventory
            allcomponents = True
            preprocessed = False
            idxs = [0]
            # s_id_was_in_views = sess.id in pmg._plots
            plots = pmg.get_plots(db.session, sess.id, idxs, preprocessed,
                                  allcomponents)
            #             # assert returned plot has the correct number of time/line-series:
            #             # note that plots[0] might be generated from a stream with gaps
            assert len(plots[0].data) == self.traceslen(
                pmg, sess.id, preprocessed, allcomponents)
            # asssert the returned value match the input:
            assert len(plots) == len(idxs)
            assert not self.plotslen(
                pmg, preprocessed=True)  # assert no filtering calculated
            # assert we did not calculate other components (all_components=False)
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents=False) == \
                len(idxs)
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents) == \
                expected_components_count
            # assert SegmentWrapper function calls:
            assert not mock_get_inv.called  # preprocess=False
            assert mock_get_stream.call_count == expected_components_count
            # assert we did not calculate any useless stream:
            assert_(pmg[sess.id][0], sess, preprocessed=False)
            assert pmg[sess.id][1] is None

            # from here on, try to calculate the plots for 3 types: main plot (index 0)
            # index of cumulative, and index of spectra
            CUMUL_INDEX, SN_INDEX, DERIVCUM2_INDEX = [None] * 3  # pylint: disable=invalid-name
            for i, p in enumerate(pmg.userdefined_plots):
                if p['name'] == 'cumulative':
                    CUMUL_INDEX = p['index']
                elif p['name'] == 'sn_spectra':
                    SN_INDEX = p['index']
                elif p['name'] == 'derivcum2':
                    DERIVCUM2_INDEX = p['index']

            if CUMUL_INDEX is None or SN_INDEX is None or DERIVCUM2_INDEX is None:
                raise Exception(
                    'either the test function names have to be changed, or '
                    'the processing file needs to implement "cumulative" and '
                    '"sn_spectra" and "derivcum2"')
            idxs = [0, SN_INDEX, CUMUL_INDEX, DERIVCUM2_INDEX]

            mock_get_stream.reset_mock()
            mock_get_inv.reset_mock()
            allcomponents = True
            preprocessed = False
            # s_id_was_in_views = sess.id in pmg._plots
            plots = pmg.get_plots(db.session, sess.id, idxs, preprocessed,
                                  allcomponents)
            #           # asssert the returned value match the input:
            assert len(plots) == len(idxs)
            assert not self.plotslen(
                pmg, preprocessed=True)  # assert no filtering calculated
            # assert we did not calculate other components (all_components=False)
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents=False) == \
                len(idxs)
            # if we calculate all components, we should have expected components count PLUS
            # all plots which are not the main plot (index 0):
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents) == \
                expected_components_count + sum(_ != 0 for _ in idxs)
            # assert SegmentWrapper function calls:
            assert not mock_get_inv.called  # preprocess=False
            assert not mock_get_stream.called  # already computed
            # assert we did not calculate any useless stream:
            assert_(pmg[sess.id][0], sess, preprocessed=False)
            assert pmg[sess.id][1] is None

            mock_get_stream.reset_mock()
            mock_get_inv.reset_mock()
            allcomponents = False
            preprocessed = True
            # s_id_was_in_views = sess.id in pmg._plots
            plots = pmg.get_plots(db.session, sess.id, idxs, preprocessed,
                                  allcomponents)
            #           # asssert the returned value match the input:
            assert len(plots) == len(idxs)
            assert self.plotslen(
                pmg, preprocessed=True)  # assert no filtering calculated
            # assert we did not calculate other components (all_components=False)
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents=False) == \
                len(idxs)
            assert self.computedplotslen(pmg, sess.id, preprocessed,
                                         allcomponents) == len(idxs)
            # assert SegmentWrapper function calls:
            if 'err' not in sess.channel.location and 'gap' not in sess.channel.location:
                assert mock_get_inv.called  # preprocess=False
            else:
                assert not mock_get_inv.called
            assert not mock_get_stream.called  # already computed
            # assert we did not calculate any useless stream:
            assert_(pmg[sess.id][0], sess, preprocessed=False)
            assert_(pmg[sess.id][1], sess, preprocessed=True)

            mock_get_stream.reset_mock()
            mock_get_inv.reset_mock()
            allcomponents = True
            preprocessed = True
            # s_id_was_in_views = sess.id in pmg._plots
            plots = pmg.get_plots(db.session, sess.id, idxs, preprocessed,
                                  allcomponents)
            # asssert the returned value match the input:
            assert len(plots) == len(idxs)
            assert self.plotslen(
                pmg, preprocessed=True)  # assert no filtering calculated
            # assert we did not calculate other components (all_components=False)
            assert self.computedplotslen(pmg, sess.id, preprocessed, allcomponents=False) == \
                len(idxs)
            # regardless whether allcomponents is true or false, we compute only the main plot
            assert self.computedplotslen(pmg, sess.id, preprocessed,
                                         allcomponents) == len(idxs)
            # assert SegmentWrapper function calls:
            assert not mock_get_inv.called  # already called
            assert not mock_get_stream.called  # already computed
            # assert all titles are properly set, with the given prefix
            seedid = sess.seed_id
            assert all(p is None or p.title.startswith(seedid)
                       for p in pmg[sess.id][0])
            assert all(p is None or p.title.startswith(seedid)
                       for p in pmg[sess.id][1])
            # check plot titles and warnings:
            stream = pmg[sess.id][0].data['stream']
            preprocessed_stream = pmg[sess.id][1].data['stream']
            if 'err' in sess.channel.location:
                assert isinstance(stream, Exception) and \
                    isinstance(preprocessed_stream, Exception) and \
                    'MiniSeed error' in str(preprocessed_stream)
                for i in idxs:
                    plot, pplot = pmg[sess.id][0][i], pmg[sess.id][1][i]
                    assert len(plot.data) == 1  # only one (fake) trace
                    assert plot.warnings
                    assert len(pplot.data) == 1  # only one (fake) trace
                    assert pplot.warnings

            elif 'gap' in sess.channel.location:
                assert isinstance(stream, Stream) and \
                    isinstance(preprocessed_stream, Exception) and \
                    'gaps/overlaps' in str(preprocessed_stream)
                for i in idxs:
                    plot, pplot = pmg[sess.id][0][i], pmg[sess.id][1][i]
                    # if idx=1, plot has 1 series (due to error in gaps/overlaps) otherwise
                    # matches stream traces count:
                    assert len(plot.data) == 1 if i == 1 else len(stream)
                    if i != 0:  # we are iterating over the spectra plots
                        assert "gaps/overlaps" in plot.warnings[0]
                        assert "gaps/overlaps" in pplot.warnings[0]
                    elif i == 0:  # we are iterating over the streams plots
                        if 'gap_unmerged' in sess.channel.location:
                            # assert that we display all traces with their seed_id. To prove that,
                            # assert that we didn't named each trace as "chunk1", "cunk2" etcetera:
                            assert all("chunk" not in d[-1] for d in plot.data)
                        else:
                            assert 'gaps/overlaps' in pplot.warnings[0]
                            # assert that we display all traces with "chunk1", "cunk2" etcetera:
                            assert all("chunk" in d[-1] for d in plot.data)
                    assert len(pplot.data) == 1  # only one (fake) trace
                    assert pplot.warnings and 'gaps/overlaps' in pplot.warnings[
                        0]  # gaps / olaps
            else:
                assert isinstance(stream, Stream) and \
                    isinstance(preprocessed_stream, Exception) and \
                    'Station inventory (xml) error: no data' in str(preprocessed_stream)
                for i in idxs:
                    plot, pplot = pmg[sess.id][0][i], pmg[sess.id][1][i]
                    # if idx=SN_INDEX, plot has 2 series (noie/signal) otherwise matches
                    # vstream traces count:
                    assert len(
                        plot.data) == 2 if i == SN_INDEX else len(stream)
                    assert not plot.warnings  # gaps /overlaps
                    assert len(pplot.data) == 1  # only one (fake) trace
                    assert pplot.warnings and 'inventory' in pplot.warnings[
                        0]  # gaps /overlaps

            # assert we did not calculate any useless stream:
            assert_(pmg[sess.id][0], sess, preprocessed=False)
            assert_(pmg[sess.id][1], sess, preprocessed=True)

            # so we manually set the inventory on the db, discarding it afterwards:
            sess.station.inventory_xml = self.inventory_bytes
            db.session.commit()
            assert sess.station.inventory_xml
            # re-initialize a new PlotManager to assure everything is re-calculated
            # this also sets all cache to None, including pmg.inv_cache:
            pmg = PlotManager(self.pymodule, self.config)

            # calculate plots
            pmg.get_plots(db.session,
                          sess.id,
                          idxs,
                          preprocessed=False,
                          all_components_in_segment_plot=True)
            pmg.get_plots(db.session,
                          sess.id,
                          idxs,
                          preprocessed=True,
                          all_components_in_segment_plot=True)
            # and store their values for later comparison
            sn_plot_unprocessed = pmg[sess.id][0][SN_INDEX].data
            sn_plot_preprocessed = pmg[sess.id][1][SN_INDEX].data
            # shift back the arrival time. 1 second is still within the stream time bounds for
            # the 'ok' stream:
            sn_windows = dict(pmg.config['sn_windows'])
            sn_windows['arrival_time_shift'] -= 1
            pmg.update_config(sn_windows=sn_windows)
            # assert we restored streams that have to be invalidated, and we kept those not to
            # invalidate:
            assert_(pmg[sess.id][0],
                    sess,
                    preprocessed=False,
                    is_invalidated=True)
            assert pmg[sess.id][1] is None
            # and run again the get_plots: with preprocess=False
            plots = pmg.get_plots(db.session,
                                  sess.id,
                                  idxs,
                                  preprocessed=False,
                                  all_components_in_segment_plot=True)
            assert_(pmg[sess.id][0], sess, preprocessed=False)
            assert pmg[sess.id][1] is None
            sn_plot_unprocessed_new = pmg[sess.id][0][SN_INDEX].data
            # we changed the arrival time and both the signal and noise depend on the cumulative,
            # thus changing the arrival time does change them signal window s_stream
            # Conversely, n_stream should change BUT only for the 'ok' stream (no 'gap' or 'err'
            # in sess.channel.location) as for the other we explicitly set a miniseed starttime,
            # endtime BEFORE the event time which should result in noise stream all padded with
            # zeros regardless of the arrival time shift
            if len(sn_plot_unprocessed) == 1:
                # there was an error in sn ratio (e.g., gaps, overlaps in source stream):
                assert len(sn_plot_unprocessed_new) == 1
            else:
                # both signal and noise plots are different. Check it:
                sig_array_new, sig_array_old = \
                    sn_plot_unprocessed_new[0][2], sn_plot_unprocessed[0][2]
                noi_array_new, noi_array_old = \
                    sn_plot_unprocessed_new[1][2], sn_plot_unprocessed[1][2]

                assert len(sig_array_new) != len(sig_array_old)
                assert len(noi_array_new) != len(noi_array_old) or \
                    not np.allclose(noi_array_new, noi_array_old, equal_nan=True)

            # now run again with preprocessed=True.
            plots = pmg.get_plots(db.session,
                                  sess.id,
                                  idxs,
                                  preprocessed=True,
                                  all_components_in_segment_plot=True)
            sn_plot_preprocessed_new = pmg[sess.id][1][SN_INDEX].data
            # assert the s_stream differs from the previous, as we changed the signal/noise
            # arrival time shift this must hold only for the 'ok' stream (no 'gap' or 'err'
            # in sess.channel.location) as for the other we explicitly set a miniseed starttime,
            # endtime BEFORE the event time (thus by shifting BACK the arrival time we should
            # not see changes in the sess/n stream windows)
            if len(sn_plot_preprocessed) == 1:
                # there was an error in sn ratio (e.g., gaps, overlaps in source stream):
                assert len(sn_plot_preprocessed_new) == 1
            else:
                # both signal and noise plots are different. Check it:
                sig_array_new, sig_array_old = \
                    sn_plot_unprocessed_new[0][2], sn_plot_unprocessed[0][2]
                noi_array_new, noi_array_old = \
                    sn_plot_unprocessed_new[1][2], sn_plot_unprocessed[1][2]

                assert len(sig_array_new) != len(sig_array_old)
                assert len(noi_array_new) != len(noi_array_old) or \
                    not np.allclose(noi_array_new, noi_array_old, equal_nan=True)

            assert_(pmg[sess.id][1], sess, preprocessed=True)
            # re-set the inventory_xml to None:
            sess.station.inventory_xml = None
            db.session.commit()
            assert not sess.station.inventory_xml
コード例 #53
0
def schedule_queued_recipes(*args):
    session.begin()
    try:
        # This query returns a queued host recipe and and the guest which has
        # the most recent distro tree. It is to be used as a derived table.
        latest_guest_distro = select([machine_guest_map.c.machine_recipe_id.label('host_id'),
            func.max(DistroTree.date_created).label('latest_distro_date')],
            from_obj=[machine_guest_map.join(GuestRecipe.__table__,
                    machine_guest_map.c.guest_recipe_id==GuestRecipe.__table__.c.id). \
                join(Recipe.__table__).join(DistroTree.__table__)],
            whereclause=Recipe.status=='Queued',
            group_by=machine_guest_map.c.machine_recipe_id).alias()

        hosts_lab_controller_distro_map = aliased(LabControllerDistroTree)
        hosts_distro_tree = aliased(DistroTree)
        guest_recipe = aliased(Recipe)
        guests_distro_tree = aliased(DistroTree)
        guests_lab_controller = aliased(LabController)

        # This query will return queued recipes that are eligible to be scheduled.
        # They are determined to be eligible if:
        # * They are clean
        # * There are systems available (see the filter criteria) in lab controllers where
        #   the recipe's distro tree is available.
        # * If it is a host recipe, the most recently created distro of all
        #   the guest recipe's distros is available in at least one of the same
        #   lab controllers as that of the host's distro tree.
        #
        # Also note that we do not try to handle the situation where the guest and host never
        # have a common labcontroller. In that situation the host and guest would stay queued
        # until that situation was rectified.
        recipes = MachineRecipe.query\
            .join(Recipe.recipeset, RecipeSet.job)\
            .filter(Job.dirty_version == Job.clean_version)\
            .outerjoin((guest_recipe, MachineRecipe.guests))\
            .outerjoin((guests_distro_tree, guest_recipe.distro_tree_id == guests_distro_tree.id))\
            .outerjoin((latest_guest_distro,
                and_(latest_guest_distro.c.host_id == MachineRecipe.id,
                    latest_guest_distro.c.latest_distro_date == \
                    guests_distro_tree.date_created)))\
            .outerjoin(guests_distro_tree.lab_controller_assocs, guests_lab_controller)\
            .join(Recipe.systems)\
            .join((hosts_distro_tree, hosts_distro_tree.id == MachineRecipe.distro_tree_id))\
            .join((hosts_lab_controller_distro_map, hosts_distro_tree.lab_controller_assocs),
                (LabController, and_(
                    hosts_lab_controller_distro_map.lab_controller_id == LabController.id,
                    System.lab_controller_id == LabController.id)))\
            .filter(
                and_(Recipe.status == TaskStatus.queued,
                    System.user == None,
                    LabController.disabled == False,
                    or_(
                        RecipeSet.lab_controller == None,
                        RecipeSet.lab_controller_id == System.lab_controller_id,
                       ),
                    or_(
                        System.loan_id == None,
                        System.loan_id == Job.owner_id,
                       ),
                    or_(
                        # We either have no guest
                        guest_recipe.id == None,
                        # Or we have a guest of which the latest
                        # is in a common lab controller.
                        and_(guests_lab_controller.id == LabController.id,
                            latest_guest_distro.c.latest_distro_date != None
                            ),
                        ) # or
                    ) # and
                  )
        # Get out of here if we have no recipes
        if not recipes.count():
            return False
        # This should be the guest recipe with the latest distro.
        # We return it in this query, to save us from re-running the
        # derived table query in schedule_queued_recipe()
        recipes = recipes.add_column(guest_recipe.id)
        # Effective priority is given in the following order:
        # * Multi host recipes with already scheduled siblings
        # * Priority level (i.e Normal, High etc)
        # * RecipeSet id
        # * Recipe id
        recipes = recipes.order_by(RecipeSet.lab_controller == None). \
            order_by(RecipeSet.priority.desc()). \
            order_by(RecipeSet.id). \
            order_by(MachineRecipe.id)
        # Don't do a GROUP BY before here, it is not needed.
        recipes = recipes.group_by(MachineRecipe.id)
        log.debug("Entering schedule_queued_recipes")
        for recipe_id, guest_recipe_id in recipes.values(
                MachineRecipe.id, guest_recipe.id):
            session.begin(nested=True)
            try:
                schedule_queued_recipe(recipe_id, guest_recipe_id)
                session.commit()
            except (StaleSystemUserException, InsufficientSystemPermissions,
                    StaleTaskStatusException), e:
                # Either
                # System user has changed before
                # system allocation
                # or
                # System permissions have changed before
                # system allocation
                # or
                # Something has moved our status on from queued
                # already.
                log.warn(str(e))
                session.rollback()
            except Exception, e:
                log.exception('Error in schedule_queued_recipe(%s)', recipe_id)
                session.rollback()
                session.begin(nested=True)
                try:
                    recipe = MachineRecipe.by_id(recipe_id)
                    recipe.recipeset.abort(
                        "Aborted in schedule_queued_recipe: %s" % e)
                    session.commit()
                except Exception, e:
                    log.exception(
                        "Error during error handling in schedule_queued_recipe: %s"
                        % e)
                    session.rollback()
コード例 #54
0
def schedule_queued_recipe(recipe_id, guest_recipe_id=None):
    guest_recipe = aliased(Recipe)
    guest_distros_map = aliased(LabControllerDistroTree)
    guest_labcontroller = aliased(LabController)
    # This query will return all the systems that a recipe is
    # able to run on. A system is deemed eligible if:
    # * If the recipe's distro tree is available to the system's lab controller
    # * The system is available (see the filter criteria).
    # * If it's a host recipe, then the system needs to be on a lab controller
    #   that can access the distro tree of both the host recipe,
    #   and the guest recipe.
    systems = System.query.join(System.queued_recipes) \
        .outerjoin(System.cpu) \
        .join(Recipe.recipeset, RecipeSet.job) \
        .join(System.lab_controller, LabController._distro_trees)\
        .join((DistroTree,
            and_(LabControllerDistroTree.distro_tree_id ==
                DistroTree.id, Recipe.distro_tree_id == DistroTree.id)))\
        .outerjoin((machine_guest_map,
            Recipe.id == machine_guest_map.c.machine_recipe_id))\
        .outerjoin((guest_recipe,
            machine_guest_map.c.guest_recipe_id == guest_recipe.id ))\
        .outerjoin((guest_distros_map,
            guest_recipe.distro_tree_id == guest_distros_map.distro_tree_id))\
        .outerjoin((guest_labcontroller,
            guest_distros_map.lab_controller_id == guest_labcontroller.id))\
        .filter(Recipe.id == recipe_id) \
        .filter(or_(guest_recipe.id == guest_recipe_id,
            guest_recipe.id == None))\
        .filter(and_(System.user == None,
                or_(guest_distros_map.id == None,
                    and_(guest_distros_map.id != None,
                        guest_labcontroller.id == LabController.id,
                        ),
                   ),
                LabController.disabled == False,
                or_(System.loan_id == None,
                    System.loan_id == Job.owner_id,
                   ),
                    ), # and
                )

    # We reapply this filter here in case a peer recipe has locked the recipe
    # set in to a particular lab controller earlier in this scheduling pass
    recipe = MachineRecipe.by_id(recipe_id)
    if recipe.recipeset.lab_controller:
        systems = systems.filter(
            System.lab_controller == recipe.recipeset.lab_controller)

    # Something earlier in this pass meant we can't schedule this recipe
    # right now after all. We'll try again next pass.
    if not systems.count():
        return

    # Order systems by owner, then Group, finally shared for everyone.
    # FIXME Make this configurable, so that a user can specify their scheduling
    # Implemented order, still need to do pool
    # preference from the job.
    # <recipe>
    #  <autopick order='sequence|random'>
    #   <pool>owner</pool>
    #   <pool>groups</pool>
    #   <pool>public</pool>
    #  </autopick>
    # </recipe>
    user = recipe.recipeset.job.owner
    if True:  #FIXME if pools are defined add them here in the order requested.
        systems = System.scheduler_ordering(user, query=systems)
    if recipe.autopick_random:
        system = systems[random.randrange(0, systems.count())]
    else:
        system = systems.first()

    log.debug("System : %s is available for Recipe %s" % (system, recipe.id))
    # Check to see if user still has proper permissions to use the system.
    # Remember the mapping of available systems could have happend hours or even
    # days ago and groups or loans could have been put in place since.
    if not recipe.candidate_systems().filter(System.id == system.id).first():
        log.debug("System : %s recipe: %s no longer has access. removing" %
                  (system, recipe.id))
        recipe.systems.remove(system)
        return

    recipe.resource = SystemResource(system=system)
    # Reserving the system may fail here if someone stole it out from
    # underneath us, but that is fine...
    recipe.resource.allocate()
    recipe.schedule()
    recipe.createRepo()
    recipe.recipeset.lab_controller = system.lab_controller
    recipe.systems = []
    # Create the watchdog without an Expire time.
    log.debug("Created watchdog for recipe id: %s and system: %s" %
              (recipe.id, system))
    recipe.watchdog = Watchdog()
    log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id)

    for guestrecipe in recipe.guests:
        guestrecipe.resource = GuestResource()
        guestrecipe.resource.allocate()
        guestrecipe.schedule()
        guestrecipe.createRepo()
        guestrecipe.watchdog = Watchdog()
        log.info('recipe ID %s guest %s moved from Queued to Scheduled',
                 recipe.id, guestrecipe.id)
コード例 #55
0
    def fetch(cls, _db, worklist, facets, pagination, refresher_method,
              max_age=None, raw=False, **response_kwargs
    ):
        """Retrieve a cached feed from the database if possible.

        Generate it from scratch and store it in the database if
        necessary.

        Return it in the most useful form to the caller.

        :param _db: A database connection.
        :param worklist: The WorkList associated with this feed.
        :param facets: A Facets object that distinguishes this feed from
            others (for instance, by its sort order).
        :param pagination: A Pagination object that explains which
            page of a larger feed is being cached.
        :param refresher_method: A function to call if it turns out
            the contents of the feed need to be regenerated. This
            function must take no arguments and return an object that
            implements __unicode__. (A Unicode string or an OPDSFeed is fine.)
        :param max_age: If a cached feed is older than this, it will
            be considered stale and regenerated. This may be either a
            number of seconds or a timedelta. If no value is
            specified, a default value will be calculated based on
            WorkList and Facets configuration. Setting this value to
            zero will force a refresh.
        :param raw: If this is False (the default), a Response ready to be
            converted into a Flask Response object will be returned. If this
            is True, the CachedFeed object itself will be returned. In most
            non-test situations the default is better.

        :return: A Response or CachedFeed containing up-to-date content.
        """

        # Gather the information necessary to uniquely identify this
        # page of this feed.
        keys = cls._prepare_keys(_db, worklist, facets, pagination)

        # Calculate the maximum cache age, converting from timedelta
        # to seconds if necessary.
        max_age = cls.max_cache_age(worklist, keys.feed_type, facets, max_age)

        # These arguments will probably be passed into get_one, and
        # will be passed into get_one_or_create in the event of a cache
        # miss.

        # TODO: this constraint_clause might not be necessary anymore.
        # ISTR it was an attempt to avoid race conditions, and we do a
        # better job of that now.
        constraint_clause = and_(cls.content!=None, cls.timestamp!=None)
        kwargs = dict(
            on_multiple='interchangeable',
            constraint=constraint_clause,
            type=keys.feed_type,
            library=keys.library,
            work=keys.work,
            lane_id=keys.lane_id,
            unique_key=keys.unique_key,
            facets=keys.facets_key,
            pagination=keys.pagination_key
        )
        feed_data = None
        if (max_age is cls.IGNORE_CACHE or isinstance(max_age, int) and max_age <= 0):
            # Don't even bother checking for a CachedFeed: we're
            # just going to replace it.
            feed_obj = None
        else:
            feed_obj = get_one(_db, cls, **kwargs)

        should_refresh = cls._should_refresh(feed_obj, max_age)
        if should_refresh:
            # This is a cache miss. Either feed_obj is None or
            # it's no good. We need to generate a new feed.
            feed_data = unicode(refresher_method())
            generation_time = datetime.datetime.utcnow()

            if max_age is not cls.IGNORE_CACHE:
                # Having gone through all the trouble of generating
                # the feed, we want to cache it in the database.

                # Since it can take a while to generate a feed, and we know
                # that the feed in the database is stale, it's possible that
                # another thread _also_ noticed that feed was stale, and
                # generated a similar feed while we were working.
                #
                # To avoid a database error, fetch the feed _again_ from the
                # database rather than assuming we have the up-to-date
                # object.
                feed_obj, is_new = get_one_or_create(_db, cls, **kwargs)
                if feed_obj.timestamp is None or feed_obj.timestamp < generation_time:
                    # Either there was no contention for this object, or there
                    # was contention but our feed is more up-to-date than
                    # the other thread(s). Our feed takes priority.
                    feed_obj.content = feed_data
                    feed_obj.timestamp = generation_time
        elif feed_obj:
            feed_data = feed_obj.content

        if raw and feed_obj:
            return feed_obj

        # We have the information necessary to create a useful
        # response-type object.
        #
        # Set some defaults in case the caller didn't pass them in.
        if isinstance(max_age, int):
            response_kwargs.setdefault('max_age', max_age)

        if max_age == cls.IGNORE_CACHE:
            # If we were asked to ignore our internal cache, we should
            # also tell the client not to store this document in _its_
            # internal cache.
            response_kwargs['max_age'] = 0

        return OPDSFeedResponse(
            response=feed_data,
            **response_kwargs
        )
コード例 #56
0
ファイル: cc_taskfilter.py プロジェクト: tarun911/camcops
    def filter_query_by_patient(self, q: Query, via_index: bool) -> Query:
        """
        Restricts an query that has *already been joined* to the
        :class:`camcops_server.cc_modules.cc_patient.Patient` class, according
        to the patient filtering criteria.

        Args:
            q: the starting SQLAlchemy ORM Query
            via_index:
                If ``True``, the query relates to a
                :class:`camcops_server.cc_modules.cc_taskindex.TaskIndexEntry`
                and we should restrict it according to the
                :class:`camcops_server.cc_modules.cc_taskindex.PatientIdNumIndexEntry`
                class. If ``False``, the query relates to a
                :class:`camcops_server.cc_modules.cc_taskindex.Task` and we
                should restrict according to
                :class:`camcops_server.cc_modules.cc_patientidnum.PatientIdNum`.

        Returns:
            a revised Query

        """
        if self.surname:
            q = q.filter(func.upper(Patient.surname) == self.surname.upper())
        if self.forename:
            q = q.filter(func.upper(Patient.forename) == self.forename.upper())
        if self.dob is not None:
            q = q.filter(Patient.dob == self.dob)
        if self.sex:
            q = q.filter(func.upper(Patient.sex) == self.sex.upper())

        if self.idnum_criteria:
            id_filter_parts = []  # type: List[ColumnElement]
            if via_index:
                q = q.join(PatientIdNumIndexEntry)
                # "Specify possible ID number values"
                for iddef in self.idnum_criteria:
                    id_filter_parts.append(
                        and_(
                            PatientIdNumIndexEntry.which_idnum ==
                            iddef.which_idnum,  # noqa
                            PatientIdNumIndexEntry.idnum_value ==
                            iddef.idnum_value))
                # Use OR (disjunction) of the specified values:
                q = q.filter(or_(*id_filter_parts))
                # "Must have a value for a given ID number type"
                if self.must_have_idnum_type:
                    # noinspection PyComparisonWithNone,PyPep8
                    q = q.filter(
                        and_(
                            PatientIdNumIndexEntry.which_idnum ==
                            self.must_have_idnum_type,  # noqa
                            PatientIdNumIndexEntry.idnum_value != None))
            else:
                # q = q.join(PatientIdNum) # fails
                q = q.join(Patient.idnums)
                # "Specify possible ID number values"
                for iddef in self.idnum_criteria:
                    id_filter_parts.append(
                        and_(PatientIdNum.which_idnum == iddef.which_idnum,
                             PatientIdNum.idnum_value == iddef.idnum_value))
                # Use OR (disjunction) of the specified values:
                q = q.filter(or_(*id_filter_parts))
                # "Must have a value for a given ID number type"
                if self.must_have_idnum_type:
                    # noinspection PyComparisonWithNone,PyPep8
                    q = q.filter(
                        and_(
                            PatientIdNum.which_idnum ==
                            self.must_have_idnum_type,
                            PatientIdNum.idnum_value != None))

        return q
コード例 #57
0
ファイル: handler.py プロジェクト: ww102111/galaxy
 def __monitor_step( self ):
     """
     Called repeatedly by `monitor` to process waiting jobs. Gets any new
     jobs (either from the database or from its own queue), then iterates
     over all new and waiting jobs to check the state of the jobs each
     depends on. If the job has dependencies that have not finished, it
     goes to the waiting queue. If the job has dependencies with errors,
     it is marked as having errors and removed from the queue. If the job
     belongs to an inactive user it is ignored.
     Otherwise, the job is dispatched.
     """
     # Pull all new jobs from the queue at once
     jobs_to_check = []
     resubmit_jobs = []
     if self.track_jobs_in_database:
         # Clear the session so we get fresh states for job and all datasets
         self.sa_session.expunge_all()
         # Fetch all new jobs
         hda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
             .join(model.JobToInputDatasetAssociation) \
             .join(model.HistoryDatasetAssociation) \
             .join(model.Dataset) \
             .filter(and_( (model.Job.state == model.Job.states.NEW ),
                           or_( ( model.HistoryDatasetAssociation._state == model.HistoryDatasetAssociation.states.FAILED_METADATA ),
                                ( model.HistoryDatasetAssociation.deleted == true() ),
                                ( model.Dataset.state != model.Dataset.states.OK ),
                                ( model.Dataset.deleted == true() ) ) ) ).subquery()
         ldda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
             .join(model.JobToInputLibraryDatasetAssociation) \
             .join(model.LibraryDatasetDatasetAssociation) \
             .join(model.Dataset) \
             .filter(and_((model.Job.state == model.Job.states.NEW),
                     or_((model.LibraryDatasetDatasetAssociation._state != null()),
                         (model.LibraryDatasetDatasetAssociation.deleted == true()),
                         (model.Dataset.state != model.Dataset.states.OK),
                         (model.Dataset.deleted == true())))).subquery()
         if self.app.config.user_activation_on:
             jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
                 .outerjoin( model.User ) \
                 .filter(and_((model.Job.state == model.Job.states.NEW),
                              or_((model.Job.user_id == null()), (model.User.active == true())),
                              (model.Job.handler == self.app.config.server_name),
                              ~model.Job.table.c.id.in_(hda_not_ready),
                              ~model.Job.table.c.id.in_(ldda_not_ready))) \
                 .order_by(model.Job.id).all()
         else:
             jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
                 .filter(and_((model.Job.state == model.Job.states.NEW),
                              (model.Job.handler == self.app.config.server_name),
                              ~model.Job.table.c.id.in_(hda_not_ready),
                              ~model.Job.table.c.id.in_(ldda_not_ready))) \
                 .order_by(model.Job.id).all()
         # Fetch all "resubmit" jobs
         resubmit_jobs = self.sa_session.query(model.Job).enable_eagerloads(False) \
             .filter(and_((model.Job.state == model.Job.states.RESUBMITTED),
                          (model.Job.handler == self.app.config.server_name))) \
             .order_by(model.Job.id).all()
     else:
         # Get job objects and append to watch queue for any which were
         # previously waiting
         for job_id in self.waiting_jobs:
             jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
         try:
             while 1:
                 message = self.queue.get_nowait()
                 if message is self.STOP_SIGNAL:
                     return
                 # Unpack the message
                 job_id, tool_id = message
                 # Get the job object and append to watch queue
                 jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
         except Empty:
             pass
     # Ensure that we get new job counts on each iteration
     self.__clear_job_count()
     # Check resubmit jobs first so that limits of new jobs will still be enforced
     for job in resubmit_jobs:
         log.debug( '(%s) Job was resubmitted and is being dispatched immediately', job.id )
         # Reassemble resubmit job destination from persisted value
         jw = self.job_wrapper( job )
         jw.job_runner_mapper.cached_job_destination = JobDestination( id=job.destination_id, runner=job.job_runner_name, params=job.destination_params )
         self.increase_running_job_count(job.user_id, jw.job_destination.id)
         self.dispatcher.put( jw )
     # Iterate over new and waiting jobs and look for any that are
     # ready to run
     new_waiting_jobs = []
     for job in jobs_to_check:
         try:
             # Check the job's dependencies, requeue if they're not done.
             # Some of these states will only happen when using the in-memory job queue
             job_state = self.__check_job_state( job )
             if job_state == JOB_WAIT:
                 new_waiting_jobs.append( job.id )
             elif job_state == JOB_INPUT_ERROR:
                 log.info( "(%d) Job unable to run: one or more inputs in error state" % job.id )
             elif job_state == JOB_INPUT_DELETED:
                 log.info( "(%d) Job unable to run: one or more inputs deleted" % job.id )
             elif job_state == JOB_READY:
                 self.dispatcher.put( self.job_wrappers.pop( job.id ) )
                 log.info( "(%d) Job dispatched" % job.id )
             elif job_state == JOB_DELETED:
                 log.info( "(%d) Job deleted by user while still queued" % job.id )
             elif job_state == JOB_ADMIN_DELETED:
                 log.info( "(%d) Job deleted by admin while still queued" % job.id )
             elif job_state == JOB_USER_OVER_QUOTA:
                 log.info( "(%d) User (%s) is over quota: job paused" % ( job.id, job.user_id ) )
                 job.set_state( model.Job.states.PAUSED )
                 for dataset_assoc in job.output_datasets + job.output_library_datasets:
                     dataset_assoc.dataset.dataset.state = model.Dataset.states.PAUSED
                     dataset_assoc.dataset.info = "Execution of this dataset's job is paused because you were over your disk quota at the time it was ready to run"
                     self.sa_session.add( dataset_assoc.dataset.dataset )
                 self.sa_session.add( job )
             elif job_state == JOB_ERROR:
                 log.error( "(%d) Error checking job readiness" % job.id )
             else:
                 log.error( "(%d) Job in unknown state '%s'" % ( job.id, job_state ) )
                 new_waiting_jobs.append( job.id )
         except Exception:
             log.exception( "failure running job %d" % job.id )
     # Update the waiting list
     if not self.track_jobs_in_database:
         self.waiting_jobs = new_waiting_jobs
     # Remove cached wrappers for any jobs that are no longer being tracked
     for id in self.job_wrappers.keys():
         if id not in new_waiting_jobs:
             del self.job_wrappers[id]
     # Flush, if we updated the state
     self.sa_session.flush()
     # Done with the session
     self.sa_session.remove()
コード例 #58
0
ファイル: kobo.py プロジェクト: yahkun/calibre-web
def HandleSyncRequest():
    sync_token = SyncToken.SyncToken.from_headers(request.headers)
    log.info("Kobo library sync request received.")
    if not current_app.wsgi_app.is_proxied:
        log.debug(
            'Kobo: Received unproxied request, changed request port to server port'
        )

    # TODO: Limit the number of books return per sync call, and rely on the sync-continuatation header
    # instead so that the device triggers another sync.

    new_books_last_modified = sync_token.books_last_modified
    new_books_last_created = sync_token.books_last_created
    new_reading_state_last_modified = sync_token.reading_state_last_modified
    sync_results = []

    # We reload the book database so that the user get's a fresh view of the library
    # in case of external changes (e.g: adding a book through Calibre).
    db.reconnect_db(config)

    archived_books = (ub.session.query(ub.ArchivedBook).filter(
        ub.ArchivedBook.user_id == int(current_user.id)).all())

    # We join-in books that have had their Archived bit recently modified in order to either:
    #   * Restore them to the user's device.
    #   * Delete them from the user's device.
    # (Ideally we would use a join for this logic, however cross-database joins don't look trivial in SqlAlchemy.)
    recently_restored_or_archived_books = []
    archived_book_ids = {}
    new_archived_last_modified = datetime.datetime.min
    for archived_book in archived_books:
        if archived_book.last_modified > sync_token.archive_last_modified:
            recently_restored_or_archived_books.append(archived_book.book_id)
        if archived_book.is_archived:
            archived_book_ids[archived_book.book_id] = True
        new_archived_last_modified = max(new_archived_last_modified,
                                         archived_book.last_modified)

    # sqlite gives unexpected results when performing the last_modified comparison without the datetime cast.
    # It looks like it's treating the db.Books.last_modified field as a string and may fail
    # the comparison because of the +00:00 suffix.
    changed_entries = (db.session.query(db.Books).join(db.Data).filter(
        or_(
            func.datetime(db.Books.last_modified) >
            sync_token.books_last_modified,
            db.Books.id.in_(recently_restored_or_archived_books))).filter(
                db.Data.format.in_(KOBO_FORMATS)).all())

    reading_states_in_new_entitlements = []
    for book in changed_entries:
        kobo_reading_state = get_or_create_reading_state(book.id)
        entitlement = {
            "BookEntitlement":
            create_book_entitlement(book,
                                    archived=(book.id in archived_book_ids)),
            "BookMetadata":
            get_metadata(book),
        }

        if kobo_reading_state.last_modified > sync_token.reading_state_last_modified:
            entitlement["ReadingState"] = get_kobo_reading_state_response(
                book, kobo_reading_state)
            new_reading_state_last_modified = max(
                new_reading_state_last_modified,
                kobo_reading_state.last_modified)
            reading_states_in_new_entitlements.append(book.id)

        if book.timestamp > sync_token.books_last_created:
            sync_results.append({"NewEntitlement": entitlement})
        else:
            sync_results.append({"ChangedEntitlement": entitlement})

        new_books_last_modified = max(book.last_modified,
                                      new_books_last_modified)
        new_books_last_created = max(book.timestamp, new_books_last_created)

    changed_reading_states = (ub.session.query(ub.KoboReadingState).filter(
        and_(
            func.datetime(ub.KoboReadingState.last_modified) >
            sync_token.reading_state_last_modified,
            ub.KoboReadingState.user_id == current_user.id,
            ub.KoboReadingState.book_id.notin_(
                reading_states_in_new_entitlements))))
    for kobo_reading_state in changed_reading_states.all():
        book = db.session.query(db.Books).filter(
            db.Books.id == kobo_reading_state.book_id).one_or_none()
        if book:
            sync_results.append({
                "ChangedReadingState": {
                    "ReadingState":
                    get_kobo_reading_state_response(book, kobo_reading_state)
                }
            })
            new_reading_state_last_modified = max(
                new_reading_state_last_modified,
                kobo_reading_state.last_modified)

    sync_shelves(sync_token, sync_results)

    sync_token.books_last_created = new_books_last_created
    sync_token.books_last_modified = new_books_last_modified
    sync_token.archive_last_modified = new_archived_last_modified
    sync_token.reading_state_last_modified = new_reading_state_last_modified

    return generate_sync_response(sync_token, sync_results)
コード例 #59
0
ファイル: asterisk_conf_dao.py プロジェクト: jaunis/xivo-dao
def find_meetme_rooms_settings(session):
    rows = session.query(StaticMeetme).filter(
        and_(StaticMeetme.commented == 0,
             StaticMeetme.category == 'rooms')).all()

    return [row.todict() for row in rows]
コード例 #60
0
def init_model(engine):
    """
    This function initializes the ontology model for the given engine.
    This must be called before using the model. 

    Tables are defined first and then relations mapped to them.

    """
    meta.Session.configure(bind=engine)
    meta.engine = engine

    #define entity tables
    global entity_table
    entity_table = Table('entity', meta.metadata,
        Column('ID', Integer, primary_key=True),
        Column('label', Unicode),
        Column('searchstring', Unicode),
        autoload=True, autoload_with=engine)
    
    global searchpatterns_table
    searchpatterns_table = Table('searchpatterns', meta.metadata,
        Column('searchpattern', Unicode, primary_key=True),
        Column('target_id', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    #define idea tables
    global idea_table
    idea_table = Table('idea', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    global idea_link_to_table
    idea_link_to_table = Table('idea_link_to', meta.metadata,
        Column('source_id', ForeignKey('idea.ID')),
        Column('target_id', ForeignKey('idea.ID')),
        autoload=True, autoload_with=engine)

    global idea_instance_of_table
    idea_instance_of_table = Table('idea_instance_of', meta.metadata,
        Column('instance_id', ForeignKey('idea.ID')),
        Column('class_id', ForeignKey('idea.ID')),
        autoload=True, autoload_with=engine)

    global idea_graph_edges_table
    idea_graph_edges_table = Table('idea_graph_edges', meta.metadata,
        Column('ID', Integer, primary_key=True),
        Column('ante_id', ForeignKey('idea.ID')),
        Column('cons_id', ForeignKey('idea.ID')),
        autoload=True, autoload_with=engine)
    
    global idea_thinker_graph_edges_table
    idea_thinker_graph_edges_table = Table('idea_thinker_graph_edges', meta.metadata,
        Column('ID', Integer, primary_key=True),
        Column('ante_id', ForeignKey('entity.ID')),
        Column('cons_id', ForeignKey('entity.ID')),
        autoload=True, autoload_with=engine)
    
    global thinker_graph_edges_table
    thinker_graph_edges_table = Table('thinker_graph_edges', meta.metadata,
        Column('ante_id', ForeignKey('thinker.ID'), primary_key=True),
        Column('cons_id', ForeignKey('thinker.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    global ontotree_table
    ontotree_table = Table('ontotree', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        Column('concept_id', ForeignKey('idea.ID')),
        Column('area_id', ForeignKey('idea.ID')),
        Column('parent_concept_id', ForeignKey('idea.ID')),
        Column('parent_id', ForeignKey('ontotree.ID')), 
        autoload=True, autoload_with=engine)

    # note - when moving to new schema, will need to change this table's ORM
    global idea_evaluation_table
    idea_evaluation_table = Table('idea_evaluation', meta.metadata,
        Column('ID', Integer, primary_key=True),
        Column('cons_id', ForeignKey('idea.ID')),
        Column('ante_id', ForeignKey('idea.ID')),
        Column('uid', ForeignKey('inpho_user.ID')),
        autoload=True, useexisting=True, autoload_with=engine)
    
    global anon_evaluation_table
    anon_evaluation_table = Table('anon_eval', meta.metadata,
        Column('ip', String, primary_key=True),
        Column('cons_id', ForeignKey('idea.ID'), primary_key=True),
        Column('ante_id', ForeignKey('idea.ID'), primary_key=True),
        autoload=True, useexisting=True, autoload_with=engine)
    

    #define thinker tables
    global thinker_table
    thinker_table = Table('thinker', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine, useexisting=True)
    
    global nationality_table
    nationality_table = Table('nationality', meta.metadata,
        Column('ID', Integer, primary_key=True),
        autoload=True, autoload_with=engine)
    
    global thinker_has_nationality
    thinker_has_nationality_table = Table('thinker_has_nationality', meta.metadata,
        Column('thinker_id', ForeignKey('thinker.ID')),
        Column('value', ForeignKey('nationality.ID')),
        autoload=True, autoload_with=engine)

    global profession_table
    profession_table = Table('profession', meta.metadata,
        Column('id', Integer, primary_key=True),
        autoload=True, autoload_with=engine)
    
    global thinker_has_profession_table
    thinker_has_profession_table = Table('thinker_has_profession', meta.metadata,
        Column('thinker_id', ForeignKey('thinker.ID')),
        Column('value', ForeignKey('profession.id')),
        autoload=True, autoload_with=engine)

    global alias_table
    alias_table = Table('alias', meta.metadata,
        Column('thinker_id', ForeignKey('entity.ID'), primary_key=True),
        Column('value', Integer, primary_key=True),
        autoload=True, autoload_with=engine)

    global thinker_has_influenced_evaluation_table
    thinker_has_influenced_evaluation_table = Table('thinker_has_influenced_evaluation', meta.metadata,
        Column('thinker1_id', ForeignKey('thinker.ID'), primary_key=True),
        Column('thinker2_id', ForeignKey('thinker.ID'), primary_key=True),
        Column('uid', ForeignKey('inpho_user.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    global thinker_teacher_of_evaluation_table
    thinker_teacher_of_evaluation_table = Table('thinker_teacher_of_evaluation', meta.metadata,
        Column('thinker1_id', ForeignKey('thinker.ID'), primary_key=True),
        Column('thinker2_id', ForeignKey('thinker.ID'), primary_key=True),
        Column('uid', ForeignKey('inpho_user.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    # Journal tables
    global journal_table
    journal_table = Table('journal', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    global journal_abbr_table
    journal_abbr_table = Table('journal_abbr', meta.metadata,
        Column('id', Integer, primary_key=True),
        Column('journal_id', ForeignKey('journal.ID')),
        autoload=True, autoload_with=engine)

    global journal_search_query_table
    journal_search_query_table = Table('journal_search_query', meta.metadata,
        Column('id', Integer, primary_key=True),
        Column('journal_id', ForeignKey('journal.ID')),
        Column('value', String),
        autoload=True, autoload_with=engine)

    # Group tables
    global school_of_thought_table
    school_of_thought_table = Table('school_of_thought', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine)
    
    # Work tables
    global work_table
    work_table = Table('work', meta.metadata,
        Column('ID', ForeignKey('entity.ID'), primary_key=True),
        autoload=True, autoload_with=engine)

    # User tables
    global user_table
    user_table = Table('inpho_user', meta.metadata,
        Column('ID', Integer, primary_key=True),
        Column('first_area_concept_id', ForeignKey('idea.ID')),
        Column('second_area_concept_id', ForeignKey('idea.ID')),
        Column("group_uid", ForeignKey("groups.uid")),
        autoload=True, autoload_with=engine)

    global sep_area_table
    sep_area_table = Table('sep_areas', meta.metadata,
        Column('id', Integer, primary_key=True),
        Column('concept_id', ForeignKey('idea.ID')),
        autoload=True, autoload_with=engine)
    
    global sepentries_table
    sepentries_table = Table('sepentries', meta.metadata,
        Column('sep_dir', String, unique=True, nullable=False, primary_key=True),
        Column('title', String, unique=True, nullable=False),
        Column('published', Boolean, nullable=False),
        Column('status', String, nullable=False),
        autoload=True, autoload_with=engine
    )
    
    global fuzzymatches_table
    fuzzymatches_table = Table('fuzzymatches', meta.metadata,
        Column('sep_dir', ForeignKey('sepentries.sep_dir'), primary_key=True),
        Column('entityID', ForeignKey('entity.ID'), primary_key=True),
        Column('strength', Numeric),
        Column('edits', Numeric),
        autoload=True, autoload_with=engine)
    
    global groups_table
    groups_table = Table(
            "groups",
            meta.metadata,
            Column("uid", Integer, primary_key=True),
            Column("name", String, unique=True,    nullable=False),
        )
    global roles_table
    roles_table = Table(
            "roles",
            meta.metadata,
            Column("uid", Integer, primary_key=True),
            Column("name", String, unique=True,    nullable=False),
        )
    
    global users_roles_table
    users_roles_table = Table(                # many:many relation table
            "users_roles",
            meta.metadata,
            Column("user_uid", ForeignKey("inpho_user.ID")),
            Column("role_uid", ForeignKey("roles.uid")),
        )

    #define idea relations
    mapper(Entity, entity_table, polymorphic_on=entity_table.c.typeID, polymorphic_identity=0, properties={
        'alias':relation(Alias), 
        #'spatterns':relation(Searchpattern),
            'spatterns':relation(Searchpattern, 
            cascade="all,delete-orphan"),
            })
    
    mapper(Idea, idea_table, inherits=Entity, polymorphic_on=entity_table.c.typeID, polymorphic_identity=1, properties={
        'links':relation(Idea, secondary=idea_link_to_table,
            primaryjoin=(idea_table.c.ID == idea_link_to_table.c.source_id),
            secondaryjoin=(idea_table.c.ID == idea_link_to_table.c.target_id),
            order_by=idea_table.c.entropy.asc(),
            backref=backref('links_to', order_by=idea_table.c.entropy.desc()),
            cascade="all, delete"
            ),
        'instances':relation(Idea, secondary=idea_instance_of_table,
            primaryjoin=(idea_table.c.ID == idea_instance_of_table.c.class_id),
            secondaryjoin=(idea_table.c.ID == idea_instance_of_table.c.instance_id),
            order_by=idea_table.c.entropy.desc(),
            backref=backref('instance_of', order_by=idea_table.c.entropy.desc()),
            cascade="all, delete"
            ),
        'nodes':relation(Node, secondary=ontotree_table, viewonly=True,
            primaryjoin=(idea_table.c.ID == ontotree_table.c.concept_id),
            secondaryjoin=(ontotree_table.c.concept_id == idea_table.c.ID),
            order_by=idea_table.c.entropy.desc(),
            #backref=backref('idea'),
            cascade="all, delete"
            ),
        'evaluations':relation(Node, secondary=idea_evaluation_table,
            primaryjoin=(idea_table.c.ID == idea_evaluation_table.c.ante_id),
            secondaryjoin=(idea_evaluation_table.c.ante_id == idea_table.c.ID),
            order_by=idea_evaluation_table.c.relatedness.desc(),
            cascade="all, delete"
            ),
        'hyponyms':dynamic_loader(Idea, secondary=idea_graph_edges_table,
            primaryjoin=and_(idea_table.c.ID == idea_graph_edges_table.c.cons_id, 
                             idea_graph_edges_table.c.weight > 0),
            secondaryjoin=(idea_graph_edges_table.c.ante_id == idea_table.c.ID),
            order_by=idea_graph_edges_table.c.weight.desc(),
            ),
        'occurrences':dynamic_loader(Idea, secondary=idea_graph_edges_table,
            primaryjoin=and_(idea_table.c.ID == idea_graph_edges_table.c.cons_id, 
                             idea_graph_edges_table.c.occurs_in > 0),
            secondaryjoin=(idea_graph_edges_table.c.ante_id == idea_table.c.ID),
            order_by=idea_graph_edges_table.c.occurs_in.desc(),
            ),
        'thinker_occurrences':dynamic_loader(Thinker, secondary=idea_thinker_graph_edges_table,
            primaryjoin=and_(entity_table.c.ID == idea_thinker_graph_edges_table.c.cons_id, 
                             idea_thinker_graph_edges_table.c.occurs_in > 0),
            secondaryjoin=(idea_thinker_graph_edges_table.c.ante_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.occurs_in.desc(),
            ),
        'related':dynamic_loader(Idea, secondary=idea_graph_edges_table,
            primaryjoin=(idea_table.c.ID == idea_graph_edges_table.c.ante_id),
            secondaryjoin=(idea_graph_edges_table.c.cons_id == idea_table.c.ID),
            order_by=idea_graph_edges_table.c.jweight.desc(),
            ),
        'evaluated':dynamic_loader(Idea, secondary=idea_evaluation_table,
            primaryjoin=and_(idea_table.c.ID == idea_evaluation_table.c.ante_id,
                             idea_evaluation_table.c.relatedness >= 3),
            secondaryjoin=(idea_evaluation_table.c.cons_id == idea_table.c.ID),
            order_by=idea_evaluation_table.c.relatedness.desc(),
            ),
        'related_thinkers':dynamic_loader(Thinker, secondary=idea_thinker_graph_edges_table,
            primaryjoin=and_(entity_table.c.ID ==idea_thinker_graph_edges_table.c.ante_id,
                             entity_table.c.typeID == 3),
            secondaryjoin=(idea_thinker_graph_edges_table.c.cons_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'it_in_edges':dynamic_loader(IdeaThinkerGraphEdge, secondary=idea_thinker_graph_edges_table,
            primaryjoin=(entity_table.c.ID==idea_thinker_graph_edges_table.c.cons_id),
            secondaryjoin=(idea_thinker_graph_edges_table.c.cons_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'it_out_edges':dynamic_loader(IdeaThinkerGraphEdge, secondary=idea_thinker_graph_edges_table,
            primaryjoin=(entity_table.c.ID==idea_thinker_graph_edges_table.c.ante_id),
            secondaryjoin=(idea_thinker_graph_edges_table.c.ante_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'ii_in_edges':dynamic_loader(IdeaGraphEdge, secondary=idea_graph_edges_table,
            primaryjoin=(idea_table.c.ID==idea_graph_edges_table.c.cons_id),
            secondaryjoin=(idea_graph_edges_table.c.cons_id == idea_table.c.ID),
            order_by=idea_graph_edges_table.c.jweight.desc(),
            ),
        'ii_out_edges':dynamic_loader(IdeaGraphEdge, secondary=idea_graph_edges_table,
            primaryjoin=(idea_table.c.ID==idea_graph_edges_table.c.ante_id),
            secondaryjoin=(idea_graph_edges_table.c.ante_id == idea_table.c.ID),
            order_by=idea_graph_edges_table.c.jweight.desc(),
            ),
    })
    mapper(Node, ontotree_table, 
        inherits=Entity, polymorphic_identity=2, polymorphic_on=entity_table.c.typeID,
        properties={
        'children':relation(Node, lazy='joined', 
            primaryjoin=ontotree_table.c.ID==ontotree_table.c.parent_id,
            backref=backref('parent', remote_side=[ontotree_table.c.ID])), 
        'idea':relation(Idea, uselist=False, secondary=idea_table, lazy=False,
            primaryjoin=(idea_table.c.ID == ontotree_table.c.concept_id),
            secondaryjoin=(ontotree_table.c.concept_id == idea_table.c.ID),
            foreign_keys=[idea_table.c.ID]
            ), #uselist=False allows for 1:1 relation
    })
    mapper(Instance, idea_instance_of_table, properties={
        'class_idea':relation(Idea, uselist=False, secondary=idea_table,
            primaryjoin=(idea_table.c.ID == idea_instance_of_table.c.class_id),
            secondaryjoin=(idea_instance_of_table.c.class_id == idea_table.c.ID)
            ), #uselist=False allows for 1:1 relation
        'idea':relation(Idea, uselist=False, secondary=idea_table,
            primaryjoin=(idea_table.c.ID == idea_instance_of_table.c.instance_id),
            secondaryjoin=(idea_instance_of_table.c.instance_id == idea_table.c.ID)
            ), #uselist=False allows for 1:1 relation
    })
    mapper(IdeaEvaluation, idea_evaluation_table, properties={
        'ante':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(idea_evaluation_table.c.ante_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == idea_evaluation_table.c.ante_id)
            ), #uselist=False allows for 1:1 relation
        'cons':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(idea_evaluation_table.c.cons_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == idea_evaluation_table.c.cons_id)
            ), #uselist=False allows for 1:1 relation
        'user':relation(User, uselist=False, lazy=False, secondary=user_table,
            primaryjoin=(idea_evaluation_table.c.uid == user_table.c.ID),
            secondaryjoin=(user_table.c.ID == idea_evaluation_table.c.uid)
            )
    })
    mapper(AnonIdeaEvaluation, anon_evaluation_table, properties={
        'ante':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(anon_evaluation_table.c.ante_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == anon_evaluation_table.c.ante_id)
            ), #uselist=False allows for 1:1 relation
        'cons':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(anon_evaluation_table.c.cons_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == anon_evaluation_table.c.cons_id)
            ) #uselist=False allows for 1:1 relation
    })
    mapper(IdeaGraphEdge, idea_graph_edges_table, properties={
        'ante':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(idea_graph_edges_table.c.ante_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == idea_graph_edges_table.c.ante_id)
            ), #uselist=False allows for 1:1 relation
        'cons':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
            primaryjoin=(idea_graph_edges_table.c.cons_id == idea_table.c.ID),
            secondaryjoin=(idea_table.c.ID == idea_graph_edges_table.c.cons_id)
            ), #uselist=False allows for 1:1 relation


    })
    mapper(IdeaThinkerGraphEdge, idea_thinker_graph_edges_table, properties={
        'ante':relation(Entity, uselist=False, lazy=False, secondary=entity_table,
            primaryjoin=(idea_thinker_graph_edges_table.c.ante_id == entity_table.c.ID),
            secondaryjoin=(entity_table.c.ID == idea_thinker_graph_edges_table.c.ante_id)
            ), #uselist=False allows for 1:1 relation
        'cons':relation(Entity, uselist=False, lazy=False, secondary=entity_table,
            primaryjoin=(idea_thinker_graph_edges_table.c.cons_id == entity_table.c.ID),
            secondaryjoin=(entity_table.c.ID == idea_thinker_graph_edges_table.c.cons_id)
            ), #uselist=False allows for 1:1 relation


    })
    mapper(ThinkerGraphEdge, thinker_graph_edges_table, properties={
        'ante':relation(Thinker, uselist=False, lazy=False, secondary=thinker_table,
            primaryjoin=(thinker_graph_edges_table.c.ante_id == thinker_table.c.ID),
            secondaryjoin=(thinker_table.c.ID == thinker_graph_edges_table.c.ante_id)
            ), #uselist=False allows for 1:1 relation
        'cons':relation(Thinker, uselist=False, lazy=False, secondary=thinker_table,
            primaryjoin=(thinker_graph_edges_table.c.cons_id == thinker_table.c.ID),
            secondaryjoin=(thinker_table.c.ID == thinker_graph_edges_table.c.cons_id)
            ), #uselist=False allows for 1:1 relation


    })
    '''
    , properties={
    TODO : fix IdeaEvaluation Mapper
    'ante':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
        primaryjoin=(idea_table.c.ID == idea_evaluation_table.c.ante_id),
        secondaryjoin=(idea_evaluation_table.c.ante_id == idea_table.c.ID)
        ), #uselist=False allows for 1:1 relation
    'cons':relation(Idea, uselist=False, lazy=False, secondary=idea_table,
        primaryjoin=(idea_table.c.ID == idea_evaluation_table.c.cons_id),
        secondaryjoin=(idea_evaluation_table.c.cons_id == idea_table.c.ID)
        ), #uselist=False allows for 1:1 relation
    'user':relation(User, uselist=False, lazy=False, secondary=user_table,
        primaryjoin=(user_table.c.ID == idea_evaluation_table.c.uid),
        secondaryjoin=(idea_evaluation_table.c.uid == user_table.c.ID)
        ), #uselist=False allows for 1:1 relation
    }
    '''

    #define thinker mappings
    mapper(Thinker, thinker_table,
        inherits=Entity, polymorphic_identity=3, polymorphic_on=entity_table.c.typeID,
        properties={
        'nationalities':relation(Nationality, secondary=thinker_has_nationality_table),
        'professions':relation(Profession, secondary=thinker_has_profession_table),
        'influenced':relation(Thinker, secondary=thinker_has_influenced_evaluation_table,
            primaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker1_id),
            secondaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker2_id),
            cascade="all, delete",
            backref='influenced_by'),
        'students':relation(Thinker, secondary=thinker_teacher_of_evaluation_table,
            primaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker1_id),
            secondaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker2_id),
            cascade="all, delete",
            backref='teachers'),
        'occurrences':dynamic_loader(Thinker, secondary=thinker_graph_edges_table,
            primaryjoin=and_(thinker_table.c.ID == thinker_graph_edges_table.c.cons_id, 
                             thinker_graph_edges_table.c.occurs_in > 0),
            secondaryjoin=(thinker_graph_edges_table.c.ante_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.occurs_in.desc(),
            ),
        'idea_occurrences':dynamic_loader(Idea, secondary=idea_thinker_graph_edges_table,
            primaryjoin=and_(entity_table.c.ID == idea_thinker_graph_edges_table.c.cons_id, 
                             idea_thinker_graph_edges_table.c.occurs_in > 0),
            secondaryjoin=(idea_thinker_graph_edges_table.c.ante_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.occurs_in.desc(),
            ),
        'hyponyms':dynamic_loader(Thinker, secondary=thinker_graph_edges_table,
            primaryjoin=and_(thinker_table.c.ID == thinker_graph_edges_table.c.cons_id, 
                             thinker_graph_edges_table.c.weight > 0),
            secondaryjoin=(thinker_graph_edges_table.c.ante_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.weight.desc(),
            ),
        'related':dynamic_loader(Thinker, secondary=thinker_graph_edges_table,
            primaryjoin=(thinker_table.c.ID == thinker_graph_edges_table.c.ante_id),
            secondaryjoin=(thinker_graph_edges_table.c.cons_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.jweight.desc(),
            ),
        'related_ideas':dynamic_loader(Idea, secondary=idea_thinker_graph_edges_table,
            primaryjoin=and_(entity_table.c.ID ==idea_thinker_graph_edges_table.c.ante_id,
                             entity_table.c.typeID == 1),
            secondaryjoin=(idea_thinker_graph_edges_table.c.cons_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'hyponyms':dynamic_loader(Thinker, secondary=thinker_graph_edges_table,
            primaryjoin=and_(thinker_table.c.ID == thinker_graph_edges_table.c.cons_id, 
                             thinker_graph_edges_table.c.weight > 0),
            secondaryjoin=(thinker_graph_edges_table.c.ante_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.weight.desc(),
            ),
        'related':dynamic_loader(Thinker, secondary=thinker_graph_edges_table,
            primaryjoin=(thinker_table.c.ID == thinker_graph_edges_table.c.ante_id),
            secondaryjoin=(thinker_graph_edges_table.c.cons_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.jweight.desc(),
            ),
        'it_in_edges':dynamic_loader(IdeaThinkerGraphEdge, secondary=idea_thinker_graph_edges_table,
            primaryjoin=(entity_table.c.ID==idea_thinker_graph_edges_table.c.cons_id),
            secondaryjoin=(idea_thinker_graph_edges_table.c.cons_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'it_out_edges':dynamic_loader(IdeaThinkerGraphEdge, secondary=idea_thinker_graph_edges_table,
            primaryjoin=(entity_table.c.ID==idea_thinker_graph_edges_table.c.ante_id),
            secondaryjoin=(idea_thinker_graph_edges_table.c.ante_id == entity_table.c.ID),
            order_by=idea_thinker_graph_edges_table.c.jweight.desc(),
            ),
        'tt_in_edges':dynamic_loader(ThinkerGraphEdge, secondary=thinker_graph_edges_table,
            primaryjoin=(thinker_table.c.ID==thinker_graph_edges_table.c.cons_id),
            secondaryjoin=(thinker_graph_edges_table.c.cons_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.jweight.desc(),
            ),
        'tt_out_edges':dynamic_loader(ThinkerGraphEdge, secondary=thinker_graph_edges_table,
            primaryjoin=(thinker_table.c.ID==thinker_graph_edges_table.c.ante_id),
            secondaryjoin=(thinker_graph_edges_table.c.ante_id == thinker_table.c.ID),
            order_by=thinker_graph_edges_table.c.jweight.desc(),
            ),
    })
    """    'birth':composite(SplitDate, thinker_table.c.birth_year,
                                     thinker_table.c.birth_month,
                                     thinker_table.c.birth_day),
        'death':composite(SplitDate, thinker_table.c.death_year,
                                     thinker_table.c.death_month,
                                     thinker_table.c.death_day)"""

    mapper(Nationality, nationality_table)
    mapper(Profession, profession_table)
    mapper(Alias, alias_table)
    # TODO : Fix Thinker relation ORMs
    mapper(ThinkerTeacherEvaluation, thinker_teacher_of_evaluation_table, properties={
        '''
        'thinker1':relation(Thinker, uselist=False, secondary=thinker_table,
            primaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker1_id),
            secondaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker1_id),
        ),
        'thinker2':relation(Thinker, uselist=False, secondary=thinker_table,
            primaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker2_id),
            secondaryjoin=(thinker_table.c.ID == thinker_teacher_of_evaluation_table.c.thinker2_id),
        ),
        '''
        'user':relation(User),
        'ante_id':thinker_teacher_of_evaluation_table.c.thinker1_id,
        'cons_id':thinker_teacher_of_evaluation_table.c.thinker2_id
        
    })
    mapper(ThinkerInfluencedEvaluation, thinker_has_influenced_evaluation_table, properties={
        '''
        'thinker1':relation(Thinker, uselist=False, secondary=thinker_table,
            primaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker1_id),
            secondaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker1_id),
        ),
        'thinker2':relation(Thinker, uselist=False, secondary=thinker_table,
            primaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker2_id),
            secondaryjoin=(thinker_table.c.ID == thinker_has_influenced_evaluation_table.c.thinker2_id),
        ),
        '''
        'user':relation(User),
        'ante_id':thinker_has_influenced_evaluation_table.c.thinker1_id,
        'cons_id':thinker_has_influenced_evaluation_table.c.thinker2_id
    })
    

    # define Journal mappings
    mapper(Journal, journal_table,
        inherits=Entity, polymorphic_identity=4, polymorphic_on=entity_table.c.typeID,
        properties={
        'abbreviations':relation(Abbr),
        'query':relation(SearchQuery),
    })
    mapper(Abbr, journal_abbr_table)
    mapper(SearchQuery, journal_search_query_table)
    mapper(Searchpattern, searchpatterns_table)
    
    #Define works mappings
    mapper(Work, work_table, inherits=Entity, polymorphic_identity=5, polymorphic_on=entity_table.c.typeID)
    
    #Define SchoolOfThought mappings
    mapper(SchoolOfThought, school_of_thought_table, inherits=Entity, polymorphic_identity=6, polymorphic_on=entity_table.c.typeID)

    # define User mappings
    mapper(User, user_table, properties={
        'first_area':relation(Idea, uselist=False, secondary=idea_table,
            primaryjoin=(idea_table.c.ID == user_table.c.first_area_concept_id),
            secondaryjoin=(idea_table.c.ID == user_table.c.first_area_concept_id)
            ), #uselist=False allows for 1:1 relation
        'second_area':relation(Idea, uselist=False, secondary=idea_table,
            primaryjoin=(idea_table.c.ID == user_table.c.second_area_concept_id),
            secondaryjoin=(idea_table.c.ID == user_table.c.second_area_concept_id)
            ), #uselist=False allows for 1:1 relation
        "_roles": relation(Role, secondary=users_roles_table, lazy='immediate'),
        "group": relation(Group),
        "password": user_table.c.passwd
    })
    
    mapper(SEPArea, sep_area_table,properties={
        "idea": relation(Idea, uselist=False)
    })
    mapper(Group, groups_table,properties={
        "users": relation(User)
    })
    mapper(Role, roles_table,properties={
        "users": relation(User, secondary=users_roles_table)
    })
    
    mapper(SEPEntry, sepentries_table, properties={
            'fmatches':relation(Fuzzymatch,
            cascade="all, delete-orphan"),
    
    })
    
    mapper(Fuzzymatch, fuzzymatches_table)