Пример #1
0
    def get(self, course_id):
        course = Courses.query.get_or_404(course_id)
        require(READ, course)
        # Get all questions for this course, default order is most recent first
        post = Posts(courses_id=course_id)
        question = PostsForQuestions(post=post)
        base_query = PostsForQuestions.query. \
            options(joinedload("criteria").joinedload("criterion")). \
            options(joinedload("selfevaltype")). \
            options(undefer_group('counts')). \
            join(Posts). \
            options(contains_eager('post').joinedload("user").joinedload('usertypeforsystem')). \
            options(contains_eager('post').joinedload("files")). \
            filter(Posts.courses_id == course_id). \
            order_by(desc(Posts.created))
        if allow(MANAGE, question):
            questions = base_query.all()
        else:
            now = datetime.datetime.utcnow()
            questions = base_query. \
                filter(or_(PostsForQuestions.answer_start.is_(None), now >= PostsForQuestions.answer_start)).\
                all()

        restrict_users = not allow(MANAGE, question)

        on_question_list_get.send(
            self,
            event_name=on_question_list_get.name,
            user=current_user,
            course_id=course_id)

        return {
            "questions": marshal(questions, dataformat.get_posts_for_questions(restrict_users, include_answers=False))
        }
Пример #2
0
def _list():
    query = (
        Notification.query(recipient_id=request.user_id)
        .join("event")
        .options(contains_eager("event"))
        .options(subqueryload("event.actor"))
        .outerjoin(Event.flight)
        .options(contains_eager("event.flight"))
        .filter(or_(Event.flight == None, Flight.is_rankable()))
        .order_by(Event.time.desc())
    )

    query = _filter_query(query, request.args)

    page = request.args.get("page", type=int, default=1)
    per_page = request.args.get("per_page", type=int, default=50)

    query = query.limit(per_page)
    query = query.offset((page - 1) * per_page)

    def get_event(notification):
        event = notification.event
        event.unread = notification.time_read is None
        return event

    events = list(convert_event(get_event(notification)) for notification in query)

    return jsonify(events=events)
Пример #3
0
def export(request):
    """Handle exporting a user's bookmarks to file"""
    rdict = request.matchdict
    username = rdict.get("username")

    if request.user is not None:
        current_user = request.user.username
    else:
        current_user = None

    bmark_list = (
        Bmark.query.join(Bmark.tags)
        .options(contains_eager(Bmark.tags))
        .join(Bmark.hashed)
        .options(contains_eager(Bmark.hashed))
        .filter(Bmark.username == username)
        .all()
    )

    BmarkLog.export(username, current_user)

    request.response_content_type = "text/html"

    headers = [("Content-Disposition", 'attachment; filename="bookie_export.html"')]
    setattr(request, "response_headerlist", headers)

    return {"bmark_list": bmark_list}
Пример #4
0
def index():
    if 'application/json' not in request.headers.get('Accept', ''):
        return render_template('ember-page.jinja', active_page='notifications')

    query = Notification.query(recipient=g.current_user) \
        .join('event') \
        .options(contains_eager('event')) \
        .options(subqueryload('event.actor')) \
        .outerjoin(Event.flight) \
        .options(contains_eager('event.flight')) \
        .filter(or_(Event.flight == None, Flight.is_rankable())) \
        .order_by(Event.time.desc())

    query = _filter_query(query, request.args)

    page = request.args.get('page', type=int, default=1)
    per_page = request.args.get('per_page', type=int, default=50)

    query = query.limit(per_page)
    query = query.offset((page - 1) * per_page)

    def get_event(notification):
        event = notification.event
        event.unread = (notification.time_read is None)
        return event

    events = map(get_event, query)

    return jsonify(events=(map(convert_event, events)))
Пример #5
0
    def render(self, session, **arguments):
        q = session.query(Switch)

        q = q.options(subqueryload('location'),
                      subqueryload('interfaces'),
                      joinedload('interfaces.assignments'),
                      joinedload('interfaces.assignments.dns_records'),
                      joinedload('interfaces.assignments.network'),
                      subqueryload('observed_macs'),
                      undefer('observed_macs.creation_date'),
                      subqueryload('observed_vlans'),
                      undefer('observed_vlans.creation_date'),
                      joinedload('observed_vlans.network'),
                      subqueryload('model'),
                      # Switches don't have machine specs, but the formatter
                      # checks for their existence anyway
                      joinedload('model.machine_specs'))

        # Prefer the primary name for ordering
        q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                        DnsDomain)
        q = q.options(contains_eager('primary_name'),
                      contains_eager('primary_name.fqdn'),
                      contains_eager('primary_name.fqdn.dns_domain'))
        q = q.reset_joinpoint()
        q = q.order_by(Fqdn.name, DnsDomain.name, Switch.label)

        return q.all()
Пример #6
0
def export(request):
    """Handle exporting a user's bookmarks to file"""
    rdict = request.matchdict
    username = rdict.get('username')

    if request.user is not None:
        current_user = request.user.username
    else:
        current_user = None

    bmark_list = Bmark.query.join(Bmark.tags).\
        options(
            contains_eager(Bmark.tags)
        ).\
        join(Bmark.hashed).\
        options(
            contains_eager(Bmark.hashed)
        ).\
        filter(Bmark.username == username).all()

    BmarkLog.export(username, current_user)

    request.response_content_type = 'text/html'

    headers = [('Content-Disposition',
                'attachment; filename="bookie_export.html"')]
    setattr(request, 'response_headerlist', headers)

    return {
        'bmark_list': bmark_list,
    }
Пример #7
0
def add_addresses(session):
    """ Add an AddressAssignment record for every PrimaryNameAssociation """
    q = session.query(PrimaryNameAssociation)
    q = q.join(System, DnsDomain)
    q = q.filter(System.ip != None)
    q = q.filter(~exists().where(AddressAssignment.ip == System.ip))
    q = q.options(contains_eager('dns_record'))
    q = q.options(contains_eager('dns_record.dns_domain'))
    q = q.options(subqueryload_all('hardware_entity.interfaces.vlans.assignments'))
    q = q.options(subqueryload_all('hardware_entity.interfaces._vlan_ids'))

    count = 0
    pnas = q.all()
    for pna in pnas:
        hw = pna.hardware_entity
        if len(hw.interfaces) != 1:
            print "{0} has an unexpected number of interfaces, skipping: " \
                    "{1}".format(hw, len(hw.interfaces))
            continue
        iface = hw.interfaces[0]
        if len(iface.vlans[0].addresses):
            print "{0} already has addresses, skipping".format(iface)
            continue
        #print "Adding AddressAssignment record for {0:l}".format(hw)
        iface.vlans[0].addresses.append(pna.dns_record.ip)
        count += 1

    session.flush()
    print "Added %d AddressAssignment records" % count
Пример #8
0
def add_interfaces(session):
    """ Add a default interface for all HW that has an IP """
    q = session.query(HardwareEntity)
    q = q.filter(~exists().where(Interface.hardware_entity_id == HardwareEntity.id))
    q = q.outerjoin(PrimaryNameAssociation, System, DnsDomain)
    q = q.options(contains_eager('_primary_name_asc'))
    q = q.options(contains_eager('_primary_name_asc.dns_record'))
    q = q.options(contains_eager('_primary_name_asc.dns_record.dns_domain'))
    q = q.filter(System.ip != None)

    hws = q.all()
    count = 0
    for hw in hws:
        if hw.hardware_type == "machine":
            interface = "eth0"
            itype = "public"
        elif hw.hardware_type == "switch":
            interface = "xge"
            itype = "oa"
        else:
            interface = "oa"
            itype = "oa"

        #print "Adding default interface for {0:l}".format(hw)

        dbinterface = Interface(hardware_entity=hw, name=interface,
                                interface_type="oa",
                                comments="Created automatically by upgrade script")
        session.add(dbinterface)
        count += 1

    session.flush()
    print "Added %d interfaces" % count
Пример #9
0
 def GET(self, page = 1 ):
   d = LoadInfo()
   pageSize = 5
   page = int(page)
   startIndex = pageSize*(page-1)
   session = Session()
   try:
     d['itemlist'] =  session.query(Item).\
       join(Item.itemcat).\
       filter(Item.pubdate < datetime.datetime.utcnow() ).\
       filter(Item.ispass == True).\
       options(contains_eager(Item.itemcat)).\
       order_by(Item.pubdate.desc())\
       [startIndex:startIndex+pageSize]
     if page==0: page=1
     #total
     d['recodecount'] =  session.query(Item).\
       join(Item.itemcat).\
       filter(Item.pubdate < datetime.datetime.utcnow() ).\
       filter(Item.ispass == True).\
       options(contains_eager(Item.itemcat)).\
       count()
     d['pagecount'] =  int(math.ceil( d['recodecount'] / float(pageSize)));
     if d['pagecount']==0: d['pagecount']=1
     d['page'] = page
     d['itemcat'] = session.query(ItemCat).all()
   finally:
     session.close()
   #local not show stat
   if web.ctx.env.get('HTTP_HOST','').startswith('localhost'):
     d['debug'] = True
   return render.index(**d)
Пример #10
0
    def _poll(self, limit, max_age):
        if not self.link:
            self.link = url(
                controller='forum', action='threads', forum_id=self.forum_id)

        thread_q = meta.Session.query(forum_model.Thread) \
            .filter_by(forum_id=self.forum_id) \
            .join((forum_model.Post, forum_model.Thread.first_post)) \
            .options(
                contains_eager(forum_model.Thread.first_post, alias=forum_model.Post),
                contains_eager(forum_model.Thread.first_post, forum_model.Post.thread, alias=forum_model.Thread),
                joinedload(forum_model.Thread.first_post, forum_model.Post.author),
            )

        if max_age:
            thread_q = thread_q.filter(forum_model.Post.posted_time >= max_age)

        threads = thread_q \
            .order_by(forum_model.Post.posted_time.desc()) \
            [:limit]

        updates = []
        for thread in threads:
            update = FrontPageThread(
                source = self,
                time = thread.first_post.posted_time,
                post = thread.first_post,
            )
            updates.append(update)

        return updates
Пример #11
0
def LoadInfo():
  d = {}
  d['localtime'] = localtime
  d['site_url'] = config.site_url
  d['site_generator'] = config.site_generator
  d['site_name'] = config.site_name;
  d['site_subname'] = config.site_subname;
  d['site_title'] = config.site_name + ' - ' + config.site_subname;
  d['site_keywords'] = config.site_keywords;
  d['site_description'] = config.site_description; 
  d['myhtml'] = myhtml; 

  #get latest items
  session = Session()
  try:
    d['catlist'] = session.query(ItemCat).\
              order_by(ItemCat.orders.asc()).\
              all()
    d['newitemlist'] =  session.query(Item).\
          join(Item.itemcat).\
          filter(Item.pubdate < datetime.datetime.utcnow() ).\
          filter(Item.ispass == True).\
          options(contains_eager(Item.itemcat)).\
          order_by(Item.pubdate.desc())\
          [:15]
    d['newcommentlist'] =  session.query(ItemComment).\
          join(ItemComment.item).\
          filter(Item.pubdate < datetime.datetime.utcnow() ).\
          filter(ItemComment.ispass == True).\
          options(contains_eager(ItemComment.item)).\
          order_by(ItemComment.adddate.desc())\
          [:15]
  finally:
    session.close()
  return d
Пример #12
0
 def GET(self, page = 1 ):
   d = LoadInfo()
   pageSize = 20
   page = int(page)
   startIndex = pageSize*(page-1)
   session = Session()
   try:
     d['itemlist'] =  session.query(Item).\
       join(Item.itemcat).\
       options(contains_eager(Item.itemcat)).\
       order_by(Item.pubdate.desc())\
       [startIndex:startIndex+pageSize]
     if page==0: page=1
     #总数
     d['recodecount'] =  session.query(Item).\
       join(Item.itemcat).\
       options(contains_eager(Item.itemcat)).\
       count()
     d['pagecount'] =  int(math.ceil( d['recodecount'] / float(pageSize)));
     if d['pagecount']==0: d['pagecount']=1
     d['page'] = page
     d['itemcat'] = session.query(ItemCat).all()
   finally:
     session.close()
   return render.admin_itemlist(**d)
Пример #13
0
def list():
    query = Notification.query(recipient_id=request.user_id) \
        .join('event') \
        .options(contains_eager('event')) \
        .options(subqueryload('event.actor')) \
        .outerjoin(Event.flight) \
        .options(contains_eager('event.flight')) \
        .filter(or_(Event.flight == None, Flight.is_rankable())) \
        .order_by(Event.time.desc())

    query = _filter_query(query, request.args)

    page = request.args.get('page', type=int, default=1)
    per_page = request.args.get('per_page', type=int, default=50)

    query = query.limit(per_page)
    query = query.offset((page - 1) * per_page)

    def get_event(notification):
        event = notification.event
        event.unread = (notification.time_read is None)
        return event

    events = map(get_event, query)

    return jsonify(events=(map(convert_event, events)))
Пример #14
0
    def search(self, phrase, content=False):
        """Perform the search on the index"""
        # we need to adjust the phrase to be a set of OR per word
        phrase = " OR ".join(phrase.split())

        results = set()

        desc = Bmark.query.filter(Bmark.description.match(phrase))

        tag_str = Bmark.query.filter(Bmark.tag_str.match(phrase))

        ext = Bmark.query.filter(Bmark.extended.match(phrase))

        results.update(set([bmark for bmark in desc.union(tag_str, ext).order_by(Bmark.stored).all()]))

        readable_res = []
        if content:
            content = Readable.query.filter(Readable.content.match(phrase))

            hashed = aliased(Hashed)
            qry = content.outerjoin((hashed, Readable.hashed)).options(contains_eager(Readable.hashed, alias=hashed))

            bmarks = aliased(Bmark)
            qry = qry.outerjoin((bmarks, hashed.bmark)).options(
                contains_eager(Readable.hashed, hashed.bmark, alias=bmarks)
            )

            res = qry.order_by(bmarks.stored).all()
            for read in res:
                readable_res.append(read.hashed.bmark[0])

        results.update(set(readable_res))
        return sorted(list(results), key=lambda res: res.stored, reverse=True)
Пример #15
0
    def test_outerouterjoin_eager(self):
        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager('bars')),
            str(self.db.query(Foo).outerjoin('bars').options(orm.contains_eager('bars'))),
            'it should outerjoin eager on single string entity'
        )

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager(Foo.bars)),
            str(self.db.query(Foo).outerjoin(Foo.bars).options(orm.contains_eager(Foo.bars))),
            'it should outerjoin eager on single model entity'
        )

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager('bars', 'bazs')),
            str(
                self.db.query(Foo).outerjoin('bars', 'bazs').options(
                    orm.contains_eager('bars').contains_eager('bazs')
                )
            ),
            'it should outerjoin eager on multiple string entities'
        )

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager(Foo.bars, Bar.bazs)),
            str(
                self.db.query(Foo).outerjoin(Foo.bars, Bar.bazs).options(
                    orm.contains_eager(Foo.bars).contains_eager(Bar.bazs)
                )
            ),
            'it should outerjoin eager on multiple model entities'
        )
Пример #16
0
 def query(self):
     query = self.request.dbsession.query(
         distinct(Estimation.id),
         Estimation,
     )
     query = query.outerjoin(Task.company)
     query = query.outerjoin(Task.customer)
     query = query.options(
         contains_eager(Task.customer).load_only(
             Customer.id,
             Customer.label,
         )
     )
     query = query.options(
         contains_eager(Task.company).load_only(
             Company.id,
             Company.name
         )
     )
     query = query.options(
         load_only(
             "name",
             "internal_number",
             "status",
             "signed_status",
             "geninv",
             "date",
             "description",
             "ht",
             "tva",
             "ttc",
         )
     )
     return query
Пример #17
0
def type_browse(context, request):
    types = (
        session.query(t.Type)
            .join(t.Type.names_local)
            # Force inner join here to strip out e.g. Shadow, which has no
            # efficacy
            .join(t.Type.damage_efficacies)
            .order_by(t.Type.names_table.name)
            .options(
                contains_eager(t.Type.names_local),
                contains_eager(t.Type.damage_efficacies),
            )
            .all()
    )

    efficacy_map = {}
    for attacking_type in types:
        submap = efficacy_map[attacking_type] = {}
        for efficacy in attacking_type.damage_efficacies:
            submap[efficacy.target_type] = efficacy.damage_factor


    template_ns = dict(
        types=types,
        efficacy_map=efficacy_map,
    )

    return template_ns
Пример #18
0
    def test_outerouterjoin_eager_with_alias(self):
        bar_alias = orm.aliased(Bar)
        baz_alias = orm.aliased(Baz)

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager('bars', alias=bar_alias)),
            str((self.db.query(Foo)
                 .outerjoin(bar_alias, 'bars')
                 .options(orm.contains_eager('bars', alias=bar_alias)))),
            'it should outerjoin eager on alias and string entity'
        )

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager(Foo.bars, alias=bar_alias)),
            str((self.db.query(Foo)
                 .outerjoin(bar_alias, Foo.bars)
                 .options(orm.contains_eager(Foo.bars, alias=bar_alias)))),
            'it should outerjoin eager on alias and model entity'
        )

        self.assertEqual(
            str(self.db.query(Foo).outerjoin_eager('bars',
                                                   'bazs',
                                                   alias={'bars': bar_alias,
                                                          'bazs': baz_alias})),
            str((self.db.query(Foo)
                 .outerjoin((bar_alias, 'bars'), (baz_alias, 'bazs'))
                 .options((orm.contains_eager('bars', alias=bar_alias)
                           .contains_eager('bazs', alias=baz_alias))))),
            'it should join eager on multiple aliases'
        )
Пример #19
0
    def render(self, session, **arguments):
        q = session.query(Chassis)

        q = q.options(subqueryload('model'),
                      joinedload('model.machine_specs'),
                      subqueryload('location'),
                      joinedload('slots'),
                      subqueryload('slots.machine'),

                      # A rare case when we don't need primary name/host
                      lazyload('slots.machine.primary_name'),
                      lazyload('slots.machine.host'),

                      subqueryload('interfaces'),
                      joinedload('interfaces.assignments'),
                      joinedload('interfaces.assignments.network'),
                      joinedload('interfaces.assignments.dns_records'))

        # Prefer the primary name for ordering
        q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                        DnsDomain)
        q = q.options(contains_eager('primary_name'),
                      contains_eager('primary_name.fqdn'),
                      contains_eager('primary_name.fqdn.dns_domain'))
        q = q.order_by(Fqdn.name, DnsDomain.name, Chassis.label)
        return q.all()
Пример #20
0
    def duplicate_run_model(self, model_id, user):
        """
        Duplicate the run model and all its parameters to a new run model
        :param model_id: model run id to duplicate
        :param user: the user duplicating the model
        :return: nothing
        """

        id_for_user_upload_driving_dataset = self._dataset_service.get_id_for_user_upload_driving_dataset()
        self.delete_model_run_being_created(user)

        with self.transaction_scope() as session:
            model_run_to_duplicate = session\
                .query(ModelRun)\
                .join(ModelRunStatus)\
                .outerjoin(ParameterValue)\
                .outerjoin(LandCoverAction) \
                .filter(ModelRun.id == model_id) \
                .filter(or_(
                    ModelRun.user_id == user.id,
                    ModelRunStatus.name == constants.MODEL_RUN_STATUS_PUBLISHED,
                    ModelRunStatus.name == constants.MODEL_RUN_STATUS_PUBLIC,)) \
                .options(contains_eager(ModelRun.parameter_values)) \
                .options(contains_eager(ModelRun.land_cover_actions)) \
                .one()

            new_model_run_name = model_run_to_duplicate.name
            if self._is_duplicate_name(model_run_to_duplicate.name, session, user):
                new_model_run_name = "{} (Copy)".format(model_run_to_duplicate.name)
                copy_id = 1
                while self._is_duplicate_name(new_model_run_name, session, user):
                    copy_id += 1
                    new_model_run_name = "{} (Copy {})".format(model_run_to_duplicate.name, copy_id)

            new_model_run = ModelRun()

            new_model_run.duplicate_from(model_run_to_duplicate)
            new_model_run.name = new_model_run_name
            new_model_run.user = user
            new_model_run.change_status(session, constants.MODEL_RUN_STATUS_CREATED)
            for parameter_value in model_run_to_duplicate.parameter_values:
                new_parameter = ParameterValue()
                new_parameter.duplicate_from(parameter_value)
                new_parameter.model_run = new_model_run

            for land_cover_action in model_run_to_duplicate.land_cover_actions:
                new_land_cover_action = LandCoverAction()
                new_land_cover_action.duplicate_from(land_cover_action)
                new_land_cover_action.model_run = new_model_run

            session.add(new_model_run)

        if model_run_to_duplicate.driving_dataset_id == id_for_user_upload_driving_dataset:
            try:
                self._job_runner_client.duplicate_uploaded_driving_data(model_run_to_duplicate.id, new_model_run.id)
            except ServiceException:
                self.delete_model_run_being_created(user)
                raise ServiceException("Could not duplicate the model run because "
                                       "the user uploaded data can not be duplicated")
Пример #21
0
def get(video_id):
    """Get a video identified by `video_id`.

    **Example request:**

    .. sourcecode:: http

       GET /videos/1/ HTTP/1.1
       Accept: application/json

    **Example response:**

    .. sourcecode:: http

       HTTP/1.1 200 OK
       Content-Type: application/json

       {
        "name": "Joe Schmoe",
        "first_name": "Joe",
        "second_name": "Schmoe",
        "image_url": "abc"
       }

    :statuscode 200: success
    :statuscode 404: video does not exist
    """
    columns = ['Video.name', 'Video.image_url',
        'Video.video_sources', 'VideoSource.name', 'VideoSource.url', 
        'VideoSource.source_id',
        'Video.products', 'Product.model', 'Product.uri', 'Product.brand',
        'Product.product_images', 'ProductImage.url',
        'Product.product_style', 'RefProductStyle.name']
    try:
        message = 'success'
        data = Video.query.outerjoin(Video.video_sources, Video.products,
            Product.product_style, Product.product_images,
            ).options(
                contains_eager(Video.video_sources),
                contains_eager(Video.products),
            ).filter(Video.id==video_id
            ).order_by(RefProductStyle.name).limit(100).all()[0]
    except IndexError as error:
        message = "'%s' record does not exist." % video_id
        return jsonify(message=message, success=False, error=404), 404
    except Exception as error:
        message = '%s: %s' % (error.__class__.__name__, error)
        return jsonify(message=message, success=False, error=500), 500

    if data is None:
        message = "'%s' record does not exist." % video_id
        return jsonify(message=message, success=False, error=404), 404
    else:
        ## need to use the JSONEncoder class for datetime objects
        response = make_response(json.dumps(dict(data=data, message=message,
            success=True), cls=json_encoder(False, columns)))
        response.headers['Content-Type'] = 'application/json'
        response.headers['mimetype'] = 'application/json'
        return response
Пример #22
0
 def query(self):
     query = DBSESSION().query(Task)
     query = query.with_polymorphic([Invoice, CancelInvoice])
     query = query.outerjoin(Invoice.payments)
     query = query.outerjoin(Task.customer)
     query = query.options(contains_eager(Invoice.payments).load_only(Payment.id, Payment.date, Payment.mode))
     query = query.options(contains_eager(Task.customer).load_only(Customer.name, Customer.code, Customer.id))
     return query
Пример #23
0
def get_blockings_with_rooms(start_date, end_date):
    return (BlockedRoom.query
            .filter(BlockedRoom.state == BlockedRoomState.accepted,
                    Blocking.start_date <= end_date,
                    Blocking.end_date >= start_date)
            .join(BlockedRoom.blocking)
            .join(BlockedRoom.room)
            .options(contains_eager('blocking'), contains_eager('room'))
            .all())
Пример #24
0
    def get(self, project_id, test_hash):
        project = Project.get(project_id)
        if not project:
            return '', 404

        # use the most recent test run to find basic details
        test = TestCase.query.filter(
            TestCase.project_id == project_id,
            TestCase.name_sha == test_hash,
        ).order_by(TestCase.date_created.desc()).limit(1).first()
        if not test:
            return '', 404

        # restrict the join to the last 1000 jobs otherwise this can get
        # significantly expensive as we have to seek quite a ways
        job_sq = Job.query.filter(
            Job.status == Status.finished,
            Job.project_id == project_id,
        ).order_by(Job.date_created.desc()).limit(1000).subquery()

        recent_runs = list(TestCase.query.options(
            contains_eager('job', alias=job_sq),
            contains_eager('job.source'),
            joinedload('job', 'build'),
        ).join(
            job_sq, TestCase.job_id == job_sq.c.id,
        ).join(
            Source, job_sq.c.source_id == Source.id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Source.revision_sha != None,  # NOQA
            TestCase.name_sha == test.name_sha,
        ).order_by(job_sq.c.date_created.desc())[:25])

        first_build = Build.query.join(
            Job, Job.build_id == Build.id,
        ).join(
            TestCase, TestCase.job_id == Job.id,
        ).filter(
            TestCase.project_id == project_id,
            TestCase.name_sha == test_hash,
        ).order_by(TestCase.date_created.asc()).limit(1).first()

        extended_serializers = {
            TestCase: TestCaseWithJobSerializer(),
            Job: JobWithBuildSerializer(),
        }

        context = self.serialize(test, {
            TestCase: GeneralizedTestCase(),
        })
        context.update({
            'results': self.serialize(recent_runs, extended_serializers),
            'firstBuild': first_build,
        })

        return self.respond(context, serialize=False)
Пример #25
0
    def get(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        plans = Plan.query.options(
            subqueryload_all(Plan.steps),
        ).filter(
            Plan.projects.contains(project),
        )

        last_build = Build.query.options(
            joinedload('author'),
            contains_eager('source')
        ).join(
            Source, Build.source_id == Source.id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project == project,
            Build.status == Status.finished,
        ).order_by(
            Build.date_created.desc(),
        ).first()
        if not last_build or last_build.result == Result.passed:
            last_passing_build = last_build
        else:
            last_passing_build = Build.query.options(
                joinedload('author'),
                contains_eager('source')
            ).join(
                Source, Build.source_id == Source.id,
            ).filter(
                Source.patch_id == None,  # NOQA
                Build.project == project,
                Build.result == Result.passed,
                Build.status == Status.finished,
            ).order_by(
                Build.date_created.desc(),
            ).first()

        options = dict(
            (o.name, o.value) for o in ProjectOption.query.filter(
                ProjectOption.project_id == project.id,
            )
        )
        for key, value in OPTION_DEFAULTS.iteritems():
            options.setdefault(key, value)

        data = self.serialize(project)
        data['lastBuild'] = last_build
        data['lastPassingBuild'] = last_passing_build
        data['repository'] = project.repository
        data['plans'] = list(plans)
        data['options'] = options
        data['stats'] = self._get_stats(project)

        return self.respond(data)
Пример #26
0
 def fully_loaded_by_language_tag_and_valsi_word(self):
     return self.options(
         contains_eager(Definition.language),
         contains_eager(Definition.valsi),
         joinedload(Definition.user),
         subqueryload(Definition.keyword_mappings)
         .joinedload(keyword_mappings.KeywordMapping.word)
         .joinedload(words.Word.language, words.Word.user),
         subqueryload(Definition.examples).joinedload(examples.Example.user),
     )
Пример #27
0
 def render(self, session, **arguments):
     q = session.query(Host)
     q = q.join(Machine, DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                DnsDomain)
     q = q.options(contains_eager('machine'))
     q = q.options(contains_eager('machine.primary_name'))
     q = q.options(contains_eager('machine.primary_name.fqdn'))
     q = q.options(contains_eager('machine.primary_name.fqdn.dns_domain'))
     q = q.order_by(Fqdn.name, DnsDomain.name)
     return SimpleHostList(q.all())
Пример #28
0
 def render(self, session, **arguments):
     q = session.query(Host)
     q = q.join(HardwareEntity, DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                DnsDomain)
     q = q.options(contains_eager('hardware_entity'),
                   contains_eager('hardware_entity.primary_name'),
                   contains_eager('hardware_entity.primary_name.fqdn'),
                   contains_eager('hardware_entity.primary_name.fqdn.dns_domain'))
     q = q.order_by(Fqdn.name, DnsDomain.name)
     return StringAttributeList(q.all(), "fqdn")
Пример #29
0
	def revzone_ipv6(self, rz):
		from netprofile_networks.models import Network
		from netprofile_ipaddresses.models import IPv6Address
		return DBSession().query(IPv6Address).join(IPv6Address.host).join(IPv6Address.network).options(
			contains_eager(IPv6Address.host),
			contains_eager(IPv6Address.network)
		).filter(
			(Binary16ToDecimal(Network.ipv6_address) + IPv6Address.offset) >= int(rz.ipv6_network.network),
			(Binary16ToDecimal(Network.ipv6_address) + IPv6Address.offset) <= int(rz.ipv6_network.broadcast)
		)
Пример #30
0
    def by_tag(tag, limit=50, page=0):
        """Get a recent set of bookmarks"""
        qry = Bmark.query.join(Bmark.tags).options(contains_eager(Bmark.tags)).filter(Tag.name == tag)

        offset = limit * page
        qry = qry.order_by(Bmark.stored.desc()).limit(limit).offset(offset).from_self()

        qry = qry.outerjoin(Bmark.tags).options(contains_eager(Bmark.tags))

        return qry.all()
Пример #31
0
def get_family(id: int):
    app.logger.debug("Retrieving user id..")
    if app.config.get("LOGIN_DISABLED") or current_user.is_admin:
        user_id = request.args.get("user")
        app.logger.debug("User is admin with ID '%s'", user_id)
    else:
        user_id = current_user.user_id
        app.logger.debug("User is regular with ID '%s'", user_id)

    if user_id:
        app.logger.debug("Processing query - restricted based on user id.")
        family = (
            models.Family.query.filter_by(family_id=id)
            .options(
                contains_eager(models.Family.participants)
                .contains_eager(models.Participant.tissue_samples)
                .contains_eager(models.TissueSample.datasets)
            )
            .join(models.Participant)
            .join(models.TissueSample)
            .join(models.Dataset)
            .join(
                models.groups_datasets_table,
                models.Dataset.dataset_id
                == models.groups_datasets_table.columns.dataset_id,
            )
            .join(
                models.users_groups_table,
                models.groups_datasets_table.columns.group_id
                == models.users_groups_table.columns.group_id,
            )
            .filter(models.users_groups_table.columns.user_id == user_id)
            .one_or_none()
        )
    else:
        app.logger.debug("Processing query - unrestricted based on user id.")
        family = (
            models.Family.query.filter_by(family_id=id)
            .options(
                joinedload(models.Family.participants)
                .joinedload(models.Participant.tissue_samples)
                .joinedload(models.TissueSample.datasets),
                joinedload(models.Family.created_by),
                joinedload(models.Family.updated_by),
            )
            .one_or_none()
        )

    if not family:
        app.logger.debug("Query did not return any records.")
        abort(404)

    app.logger.debug("Query successful, returning JSON...")
    return jsonify(
        [
            {
                **asdict(family),
                "updated_by": family.updated_by.username,
                "created_by": family.created_by.username,
                "participants": [
                    {
                        **asdict(participant),
                        "institution": participant.institution.institution
                        if participant.institution
                        else None,
                        "updated_by": participant.updated_by.username,
                        "created_by": participant.created_by.username,
                        "tissue_samples": [
                            {
                                **asdict(tissue_sample),
                                "datasets": tissue_sample.datasets,
                                "updated_by": tissue_sample.updated_by.username,
                                "created_by": tissue_sample.created_by.username,
                            }
                            for tissue_sample in participant.tissue_samples
                        ],
                    }
                    for participant in family.participants
                ],
            }
        ]
    )
Пример #32
0
def list_families():

    app.logger.debug("Parsing query parameters..")

    starts_with = request.args.get("starts_with", default="", type=str)
    starts_with = f"{starts_with}%"
    max_rows = request.args.get("max_rows", default=100)
    order_by_col = request.args.get("order", default="family_id", type=str)

    app.logger.debug(
        "Query parameters: \n\tstarts_with %s \n\tmax_rows %s, \n\torder_by_col %s",
        starts_with,
        max_rows,
        order_by_col,
    )

    app.logger.debug("Validating max rows..")
    try:
        int(max_rows)
    except:
        app.logger.error("Max Rows: %s is not a valid integer")
        abort(400, description="Max rows must be a valid integer")

    columns = models.Family.__table__.columns.keys()

    app.logger.debug("Validating 'order_by' column..")
    if order_by_col not in columns:
        app.logger.error(
            "Column name %s is not valid, it must be one of %s", order_by_col, columns
        )
        abort(400, description=f"Column name for ordering must be one of {columns}")
    column = getattr(models.Family, order_by_col)

    app.logger.debug("Query parameters successfully parsed.")

    app.logger.debug("Retrieving user id..")
    if app.config.get("LOGIN_DISABLED") or current_user.is_admin:
        user_id = request.args.get("user")
        app.logger.debug("User is admin with ID '%s'", user_id)
    else:
        user_id = current_user.user_id
        app.logger.debug("User is regular with ID '%s'", user_id)
    if user_id:
        app.logger.debug("Processing query - restricted based on user id.")
        families = (
            models.Family.query.options(contains_eager(models.Family.participants))
            .filter(models.Family.family_codename.like(starts_with))
            .join(models.Participant)
            .join(models.TissueSample)
            .join(models.Dataset)
            .join(
                models.groups_datasets_table,
                models.Dataset.dataset_id
                == models.groups_datasets_table.columns.dataset_id,
            )
            .join(
                models.users_groups_table,
                models.groups_datasets_table.columns.group_id
                == models.users_groups_table.columns.group_id,
            )
            .filter(models.users_groups_table.columns.user_id == user_id)
            .order_by(column)
            .limit(max_rows)
        )
    else:
        app.logger.debug("Processing query - unrestricted based on user id.")
        families = (
            models.Family.query.options(
                joinedload(models.Family.participants),
                joinedload(models.Family.created_by),
                joinedload(models.Family.updated_by),
            )
            .filter(models.Family.family_codename.like(starts_with))
            .order_by(column)
            .limit(max_rows)
        )
    app.logger.debug("Query successful, returning JSON...")
    return jsonify(
        [
            {
                **asdict(family),
                "participants": [
                    {
                        **asdict(participant),
                        "institution": participant.institution.institution
                        if participant.institution
                        else None,
                    }
                    for participant in family.participants
                ],
                "updated_by": family.updated_by.username,
                "created_by": family.created_by.username,
            }
            for family in families
        ]
    )
Пример #33
0
def smart_query(query, filters=None, sort_attrs=None, schema=None):
    """
    Does magic Django-ish joins like post___user___name__startswith='Bob'
     (see https://goo.gl/jAgCyM)
    Does filtering, sorting and eager loading at the same time.
    And if, say, filters and sorting need the same joinm it will be done
     only one. That's why all stuff is combined in single method

    :param query: sqlalchemy.orm.query.Query
    :param filters: dict
    :param sort_attrs: List[basestring]
    :param schema: dict
    """
    if not filters:
        filters = {}
    if not sort_attrs:
        sort_attrs = []
    if not schema:
        schema = {}

    # noinspection PyProtectedMember
    root_cls = query._joinpoint_zero().class_  # for example, User or Post
    attrs = list(filters.keys()) + \
        list(map(lambda s: s.lstrip(DESC_PREFIX), sort_attrs))
    aliases = OrderedDict({})
    _parse_path_and_make_aliases(root_cls, '', attrs, aliases)

    loaded_paths = []
    for path, al in aliases.items():
        relationship_path = path.replace(RELATION_SPLITTER, '.')
        query = query.outerjoin(al[0], al[1]) \
            .options(contains_eager(relationship_path, alias=al[0]))
        loaded_paths.append(relationship_path)

    for attr, value in filters.items():
        if RELATION_SPLITTER in attr:
            parts = attr.rsplit(RELATION_SPLITTER, 1)
            entity, attr_name = aliases[parts[0]][0], parts[1]
        else:
            entity, attr_name = root_cls, attr
        try:
            query = query.filter(*entity.filter_expr(**{attr_name: value}))
        except KeyError as e:
            raise KeyError("Incorrect filter path `{}`: {}".format(attr, e))

    for attr in sort_attrs:
        if RELATION_SPLITTER in attr:
            prefix = ''
            if attr.startswith(DESC_PREFIX):
                prefix = DESC_PREFIX
                attr = attr.lstrip(DESC_PREFIX)
            parts = attr.rsplit(RELATION_SPLITTER, 1)
            entity, attr_name = aliases[parts[0]][0], prefix + parts[1]
        else:
            entity, attr_name = root_cls, attr
        try:
            query = query.order_by(*entity.order_expr(attr_name))
        except KeyError as e:
            raise KeyError("Incorrect order path `{}`: {}".format(attr, e))

    if schema:
        flat_schema = _flatten_schema(schema)
        not_loaded_part = {
            path: v
            for path, v in flat_schema.items() if path not in loaded_paths
        }
        query = query.options(*_eager_expr_from_flat_schema(not_loaded_part))

    return query
Пример #34
0
class _PolymorphicTestBase:
    __dialect__ = "default"

    def test_any_one(self):
        sess = fixture_session()
        any_ = Company.employees.of_type(Engineer).any(
            Engineer.primary_language == "cobol")
        eq_(sess.query(Company).filter(any_).one(), self.c2)

    def test_any_two(self):
        sess = fixture_session()
        calias = aliased(Company)
        any_ = calias.employees.of_type(Engineer).any(
            Engineer.primary_language == "cobol")
        eq_(sess.query(calias).filter(any_).one(), self.c2)

    def test_any_three(self):
        sess = fixture_session()
        any_ = Company.employees.of_type(Boss).any(Boss.golf_swing == "fore")
        eq_(sess.query(Company).filter(any_).one(), self.c1)

    def test_any_four(self):
        sess = fixture_session()
        any_ = Company.employees.of_type(Manager).any(
            Manager.manager_name == "pointy")
        eq_(sess.query(Company).filter(any_).one(), self.c1)

    def test_any_five(self):
        sess = fixture_session()
        any_ = Company.employees.of_type(Engineer).any(
            and_(Engineer.primary_language == "cobol"))
        eq_(sess.query(Company).filter(any_).one(), self.c2)

    def test_join_to_subclass_one(self):
        sess = fixture_session()
        eq_(
            sess.query(Company).join(
                Company.employees.of_type(Engineer)).filter(
                    Engineer.primary_language == "java").all(),
            [self.c1],
        )

    def test_join_to_subclass_two(self):
        sess = fixture_session()
        eq_(
            sess.query(Company).join(Company.employees.of_type(Engineer)).join(
                Engineer.machines).filter(
                    Machine.name.ilike("%thinkpad%")).all(),
            [self.c1],
        )

    def test_join_to_subclass_three(self):
        sess = fixture_session()
        eq_(
            sess.query(Company, Engineer).join(
                Company.employees.of_type(Engineer)).filter(
                    Engineer.primary_language == "java").count(),
            1,
        )

    def test_join_to_subclass_four(self):
        sess = fixture_session()
        # test [ticket:2093]
        eq_(
            sess.query(Company.company_id, Engineer).join(
                Company.employees.of_type(Engineer)).filter(
                    Engineer.primary_language == "java").count(),
            1,
        )

    def test_join_to_subclass_five(self):
        sess = fixture_session()
        eq_(
            sess.query(Company).join(
                Company.employees.of_type(Engineer)).filter(
                    Engineer.primary_language == "java").count(),
            1,
        )

    def test_with_polymorphic_join_compile_one(self):
        sess = fixture_session()

        self.assert_compile(
            sess.query(Company).join(
                Company.employees.of_type(
                    with_polymorphic(Person, [Engineer, Manager],
                                     aliased=True,
                                     flat=True))),
            "SELECT companies.company_id AS companies_company_id, "
            "companies.name AS companies_name FROM companies "
            "JOIN %s" % (self._polymorphic_join_target([Engineer, Manager])),
        )

    def test_with_polymorphic_join_exec_contains_eager_one(self):
        sess = fixture_session()

        def go():
            wp = with_polymorphic(Person, [Engineer, Manager],
                                  aliased=True,
                                  flat=True)
            eq_(
                sess.query(Company).join(
                    Company.employees.of_type(wp)).order_by(
                        Company.company_id, wp.person_id).options(
                            contains_eager(
                                Company.employees.of_type(wp))).all(),
                [self.c1, self.c2],
            )

        self.assert_sql_count(testing.db, go, 1)

    @testing.combinations(
        # this form is not expected to work in all cases, ultimately
        # the "alias" parameter should be deprecated entirely
        # lambda Company, wp: contains_eager(Company.employees, alias=wp),
        lambda Company, wp: contains_eager(Company.employees.of_type(wp)),
        lambda Company, wp: contains_eager(Company.employees.of_type(wp),
                                           alias=wp),
    )
    def test_with_polymorphic_join_exec_contains_eager_two(
            self, contains_eager_option):
        sess = fixture_session()

        wp = with_polymorphic(Person, [Engineer, Manager], aliased=True)
        contains_eager_option = testing.resolve_lambda(contains_eager_option,
                                                       Company=Company,
                                                       wp=wp)
        q = (sess.query(Company).join(Company.employees.of_type(wp)).order_by(
            Company.company_id, wp.person_id).options(contains_eager_option))

        def go():
            eq_(q.all(), [self.c1, self.c2])

        self.assert_sql_count(testing.db, go, 1)

        self.assert_compile(
            q,
            self._test_with_polymorphic_join_exec_contains_eager_two_result(),
        )

    def test_with_polymorphic_any(self):
        sess = fixture_session()
        wp = with_polymorphic(Person, [Engineer], aliased=True)
        eq_(
            sess.query(Company.company_id).filter(
                Company.employees.of_type(wp).any(
                    wp.Engineer.primary_language == "java")).all(),
            [(1, )],
        )

    def test_subqueryload_implicit_withpoly(self):
        sess = fixture_session()

        def go():
            eq_(
                sess.query(Company).filter_by(company_id=1).options(
                    subqueryload(Company.employees.of_type(Engineer))).all(),
                [self._company_with_emps_fixture()[0]],
            )

        self.assert_sql_count(testing.db, go, 4)

    def test_joinedload_implicit_withpoly(self):
        sess = fixture_session()

        def go():
            eq_(
                sess.query(Company).filter_by(company_id=1).options(
                    joinedload(Company.employees.of_type(Engineer))).all(),
                [self._company_with_emps_fixture()[0]],
            )

        self.assert_sql_count(testing.db, go, 3)

    def test_subqueryload_explicit_withpoly(self):
        sess = fixture_session()

        def go():
            target = with_polymorphic(Person, Engineer)
            eq_(
                sess.query(Company).filter_by(company_id=1).options(
                    subqueryload(Company.employees.of_type(target))).all(),
                [self._company_with_emps_fixture()[0]],
            )

        self.assert_sql_count(testing.db, go, 4)

    def test_joinedload_explicit_withpoly(self):
        sess = fixture_session()

        def go():
            target = with_polymorphic(Person, Engineer, flat=True)
            eq_(
                sess.query(Company).filter_by(company_id=1).options(
                    joinedload(Company.employees.of_type(target))).all(),
                [self._company_with_emps_fixture()[0]],
            )

        self.assert_sql_count(testing.db, go, 3)

    def test_joinedload_stacked_of_type(self):
        sess = fixture_session()

        def go():
            eq_(
                sess.query(Company).filter_by(company_id=1).options(
                    joinedload(Company.employees.of_type(Manager)),
                    joinedload(Company.employees.of_type(Engineer)),
                ).all(),
                [self._company_with_emps_fixture()[0]],
            )

        self.assert_sql_count(testing.db, go, 2)
Пример #35
0
def series(series_slug, year=None, all=None):
    if series_slug in BACKCOMPAT_SERIES_ALIASES:
        url = url_for('series',
                      series_slug=BACKCOMPAT_SERIES_ALIASES[series_slug])
        return redirect(url)

    today = datetime.date.today()

    # If there are no years with events, put the current year there at least
    all_years = years_with_events(series_slug) or [today.year]
    first_year, last_year = min(all_years), max(all_years)

    if last_year == today.year and len(all_years) > 1:
        # The current year is displayed on the 'New' page (year=None)
        all_years.remove(last_year)
        last_year = max(all_years)

    if year is not None:
        if year > last_year:
            year = None
        elif year < first_year:
            year = first_year

    if all is not None:
        paginate_prev = {'year': first_year}
        paginate_next = {'all': 'all'}
    elif year is None:
        paginate_prev = {'year': None}
        paginate_next = {'year': last_year}
    else:
        if year >= last_year:
            paginate_prev = {'year': None}
        else:
            paginate_prev = {'year': all_years[all_years.index(year) + 1]}

        if year <= first_year:
            paginate_next = {'all': 'all'}
        else:
            paginate_next = {'year': all_years[all_years.index(year) - 1]}

    query = db.session.query(tables.Series)
    query = query.filter(tables.Series.slug == series_slug)
    query = query.join(tables.Series.events)
    query = query.options(contains_eager(tables.Series.events))
    query = query.options(joinedload(tables.Series.events, 'talks'))
    query = query.options(joinedload(tables.Series.events, 'venue'))
    query = query.options(
        joinedload(tables.Series.events, 'talks', 'talk_speakers'))
    query = query.options(
        subqueryload(tables.Series.events, 'talks', 'talk_speakers',
                     'speaker'))
    query = query.options(subqueryload(tables.Series.events, 'talks', 'links'))
    query = query.order_by(tables.Event.date.desc())

    if not all:
        if year is None:
            # The 'New' page displays the current year as well as the last one
            query = query.filter(
                tables.Event.date >= datetime.date(today.year - 1, 1, 1))
        else:
            query = query.filter(
                tables.Event.date >= datetime.date(year, 1, 1))
            query = query.filter(
                tables.Event.date < datetime.date(year + 1, 1, 1))

    try:
        series = query.one()
        has_events = True
    except NoResultFound:
        has_events = False

        # The series has no events during the selected timeframe so at least
        # load general information on the series so we can properly display
        # the heading.
        query = db.session.query(tables.Series)
        query = query.filter(tables.Series.slug == series_slug)
        try:
            series = query.one()
        except NoResultFound:
            abort(404)

    organizer_info = json.loads(series.organizer_info)
    return render_template('series.html',
                           series=series,
                           today=today,
                           year=year,
                           organizer_info=organizer_info,
                           all=all,
                           first_year=first_year,
                           last_year=last_year,
                           all_years=all_years,
                           paginate_prev=paginate_prev,
                           paginate_next=paginate_next,
                           has_events=has_events)
Пример #36
0
def summary(type: str):
    """
    GET /api/summary/participants\?panel=ENSG00000138131
    GET /api/summary/variants\?panel=ENSG00000138131

    The same sqlalchemy query is used for both endpoints as the participant-wise report is the precursor to the variant-wise report.

    The JSON response for participants is de-normalized such that each object is a participant and a variant,
    whereas for the variant JSON response, each object is a variant, the annotations, and an array of genotypes for the involved participants.

    Similarly, the csv output for the participants is de-normalized such that each row is a participant's variant. If the requested genes span similar coordinates duplicated variants will be returned, for each gene.
    The variant csv output is a summary - each row is a unique variant with various columns collapsed and ';' delimited indicating for example, all participants that had such a variant.

    """

    if type not in ["variants", "participants"]:
        abort(404)

    if app.config.get("LOGIN_DISABLED") or current_user.is_admin:
        user_id = request.args.get("user")
        app.logger.debug("User is admin with ID '%s'", user_id)
    else:
        user_id = current_user.user_id
        app.logger.debug("User is regular with ID '%s'", user_id)

    ensgs = parse_gene_panel()

    # app.logger.debug(ensgs)

    # returns a tuple (Genes, Variants)
    query = (
        db.session.query(models.Gene, models.Variant)
        .options(
            contains_eager(models.Variant.genotype)
            .contains_eager(models.Genotype.analysis)
            .contains_eager(models.Analysis.datasets)
            .contains_eager(models.Dataset.tissue_sample)
            .contains_eager(models.TissueSample.participant)
            .contains_eager(models.Participant.family)
        )
        .join(
            models.Variant,
            and_(
                models.Gene.chromosome == models.Variant.chromosome,
                models.Gene.start <= models.Variant.position,
                models.Variant.position <= models.Gene.end,
            ),
        )
        .join(models.Variant.genotype)
        .join(models.Genotype.analysis, models.Genotype.dataset)
        .join(models.Dataset.tissue_sample)
        .join(models.TissueSample.participant)
        .join(models.Participant.family)
        .filter(models.Gene.ensembl_id.in_(ensgs))
    )

    if user_id:
        app.logger.debug("Processing query - restricted based on user id.")
        query = (
            query.join(
                models.groups_datasets_table,
                models.Dataset.dataset_id
                == models.groups_datasets_table.columns.dataset_id,
            )
            .join(
                models.users_groups_table,
                models.groups_datasets_table.columns.group_id
                == models.users_groups_table.columns.group_id,
            )
            .filter(models.users_groups_table.columns.user_id == user_id)
        )
    else:
        app.logger.debug("Processing query - unrestricted based on user id.")

    # defaults to json unless otherwise specified
    app.logger.info(request.accept_mimetypes)

    if expects_json(request):
        app.logger.info("Defaulting to json response")

        if type == "variants":

            return jsonify(
                [
                    {
                        **asdict(tup[0]),  # gene
                        **asdict(tup[1]),  # variants
                        "genotype": [
                            {
                                **asdict(genotype),
                                "participant_codename": genotype.dataset.tissue_sample.participant.participant_codename,
                            }
                            for genotype in tup[1].genotype
                        ],
                    }
                    for tup in query.all()
                ]
            )

        elif type == "participants":
            try:
                sql_df = pd.read_sql(query.statement, query.session.bind)
            except:
                app.logger.error(
                    "Unexpected error resulting from sqlalchemy query", exc_info=True
                )
                abort(500, "Unexpected error")

            ptp_dict = sql_df.loc[:, ~sql_df.columns.duplicated()][
                relevant_cols
            ].to_dict(orient="records")
            return jsonify(ptp_dict)

    elif expects_csv(request):
        app.logger.info("text/csv Accept header requested")
        try:
            sql_df = pd.read_sql(query.statement, query.session.bind)
        except:
            app.logger.error(
                "Unexpected error resulting from sqlalchemy query", exc_info=True
            )
            abort(500, "Unexpected error")

        agg_df = get_report_df(sql_df, type=type)
        csv_data = agg_df.to_csv(encoding="utf-8", index=False)

        response = Response(csv_data, mimetype="text/csv")
        response.headers.set(
            "Content-Disposition",
            "attachment",
            filename="{}_wise_report.csv".format(type[:-1]),
        )
        return response
    else:
        app.logger.error(
            "Only 'text/csv' and 'application/json' HTTP accept headers supported"
        )
        abort(
            406, "Only 'text/csv' and 'application/json' HTTP accept headers supported"
        )
Пример #37
0
def _patch_query(q):
    current_user = User.get(request.user_id) if request.user_id else None

    return q.join(Flight.igc_file) \
        .options(contains_eager(Flight.igc_file)) \
        .filter(Flight.is_viewable(current_user))
Пример #38
0
def cve_index():
    """
    Display the list of CVEs, with pagination.
    Also accepts the following filtering query parameters:
    - order-by - "oldest" or "newest"
    - query - search query for the description field
    - priority
    - limit - default 20
    - offset - default 0
    """
    # Query parameters
    query = flask.request.args.get("q", "").strip()
    priority = flask.request.args.get("priority")
    package = flask.request.args.get("package")
    limit = flask.request.args.get("limit", default=20, type=int)
    offset = flask.request.args.get("offset", default=0, type=int)
    component = flask.request.args.get("component")
    versions = flask.request.args.getlist("version")
    statuses = flask.request.args.getlist("status")

    is_cve_id = re.match(r"^CVE-\d{4}-\d{4,7}$", query.upper())

    if is_cve_id and db_session.query(CVE).get(query.upper()):
        return flask.redirect(f"/security/{query.lower()}")

    all_releases = (
        db_session.query(Release)
        .order_by(desc(Release.release_date))
        .filter(Release.codename != "upstream")
        .all()
    )

    releases_query = db_session.query(Release).order_by(Release.release_date)

    if versions and not any(a in ["", "current"] for a in versions):
        releases_query = releases_query.filter(Release.codename.in_(versions))
    else:
        releases_query = releases_query.filter(
            or_(
                Release.support_expires > datetime.now(),
                Release.esm_expires > datetime.now(),
            )
        ).filter(Release.codename != "upstream")

    releases = releases_query.all()

    should_filter_by_version_and_status = (
        versions and statuses and len(versions) == len(statuses)
    )

    clean_versions = []
    clean_statuses = []
    if should_filter_by_version_and_status:
        raw_all_statuses = db_session.execute(
            "SELECT unnest(enum_range(NULL::statuses));"
        ).fetchall()
        all_statuses = ["".join(s) for s in raw_all_statuses]

        clean_versions = [
            (
                [version]
                if version not in ["", "current"]
                else [r.codename for r in releases]
            )
            for version in versions
        ]

        clean_statuses = [
            ([status] if status != "" else all_statuses) for status in statuses
        ]

    # query cves by filters
    cves_query = db_session.query(
        CVE, func.count("*").over().label("total")
    ).filter(CVE.status == "active")

    if priority:
        cves_query = cves_query.filter(CVE.priority == priority)

    if query:
        cves_query = cves_query.filter(
            or_(
                CVE.description.ilike(f"%{query}%"),
                CVE.ubuntu_description.ilike(f"%{query}%"),
            )
        )

    parameters = []
    if package:
        parameters.append(Status.package_name == package)

    if component:
        parameters.append(Status.component == component)

    if should_filter_by_version_and_status:
        conditions = []
        for key, version in enumerate(clean_versions):
            conditions.append(
                and_(
                    Status.release_codename.in_(version),
                    Status.status.in_(clean_statuses[key]),
                )
            )

        parameters.append(or_(*[c for c in conditions]))

        conditions = []
        for key, version in enumerate(clean_versions):
            sub_conditions = [
                Status.release_codename.in_(version),
                Status.status.in_(clean_statuses[key]),
                CVE.id == Status.cve_id,
            ]

            if package:
                sub_conditions.append(Status.package_name == package)

            if component:
                sub_conditions.append(Status.component == component)

            condition = Package.statuses.any(
                and_(*[sc for sc in sub_conditions])
            )

            conditions.append(condition)

        parameters.append(Status.package.has(and_(*[c for c in conditions])))
    else:
        parameters.append(Status.status.in_(Status.active_statuses))

    if len(parameters) > 0:
        cves_query = cves_query.filter(
            CVE.statuses.any(and_(*[p for p in parameters]))
        )

    cves_query = (
        cves_query.group_by(CVE.id)
        .order_by(
            case(
                [(CVE.published.is_(None), 1)],
                else_=0,
            ),
            desc(CVE.published),
        )
        .limit(limit)
        .offset(offset)
        .from_self()
        .join(CVE.statuses)
        .options(contains_eager(CVE.statuses))
    )

    raw_cves = cves_query.all()

    # Pagination
    total_results = raw_cves[0][1] if raw_cves else 0

    cves = []
    for raw_cve in raw_cves:
        packages = raw_cve[0].packages

        # filter by package name
        if package:
            packages = {
                package_name: package_statuses
                for package_name, package_statuses in packages.items()
                if package_name == package
            }

        # filter by component
        if component:
            packages = {
                package_name: package_statuses
                for package_name, package_statuses in packages.items()
                if any(
                    status.component == component
                    for status in package_statuses.values()
                )
            }

        if should_filter_by_version_and_status:
            packages = {
                package_name: package_statuses
                for package_name, package_statuses in packages.items()
                if all(
                    any(
                        package_status.release_codename in version
                        and package_status.status in clean_statuses[key]
                        for package_status in package_statuses.values()
                    )
                    for key, version in enumerate(clean_versions)
                )
            }

        # do not return cve if it has no packages left
        if not packages:
            continue

        cve = {
            "id": raw_cve[0].id,
            "priority": raw_cve[0].priority,
            "packages": packages,
        }

        cves.append(cve)

    return flask.render_template(
        "security/cve/index.html",
        releases=releases,
        all_releases=all_releases,
        cves=cves,
        total_results=total_results,
        total_pages=ceil(total_results / limit),
        offset=offset,
        limit=limit,
        priority=priority,
        query=query,
        package=package,
        component=component,
        versions=versions,
        statuses=statuses,
    )
Пример #39
0
    def find(limit=50,
             order_by=None,
             page=0,
             tags=None,
             username=None,
             with_content=False,
             with_tags=True):
        """Search for specific sets of bookmarks"""
        qry = Bmark.query
        offset = limit * page

        if with_content:
            qry = qry.outerjoin(Bmark.readable).\
                options(contains_eager(Bmark.readable))

        if username:
            qry = qry.filter(Bmark.username == username)

        if order_by is None:
            order_by = Bmark.stored.desc()

        if not tags:
            qry = qry.order_by(order_by).\
                limit(limit).\
                offset(offset).\
                from_self()

        if tags:
            qry = qry.join(Bmark.tags).\
                options(contains_eager(Bmark.tags))

            if isinstance(tags, str):
                qry = qry.filter(Tag.name == tags)
                qry = qry.order_by(order_by).\
                    limit(limit).\
                    offset(offset).\
                    from_self()
            else:
                bids_we_want = select(
                    [bmarks_tags.c.bmark_id.label('good_bmark_id')],
                    from_obj=[
                        bmarks_tags.join(
                            'tags',
                            and_(Tag.name.in_(tags),
                            bmarks_tags.c.tag_id == Tag.tid)
                        ).
                        join('bmarks', Bmark.bid == bmarks_tags.c.bmark_id)
                    ]).\
                    group_by(bmarks_tags.c.bmark_id, Bmark.stored).\
                    having(
                        func.count(bmarks_tags.c.tag_id) >= len(tags)
                    ).order_by(Bmark.stored.desc())

                qry = qry.join(
                    (bids_we_want.limit(limit).offset(offset).alias('bids'),
                     Bmark.bid == bids_we_want.c.good_bmark_id))

        # now outer join with the tags again so that we have the
        # full list of tags for each bmark we filterd down to
        if with_tags:
            qry = qry.outerjoin(Bmark.tags).\
                options(contains_eager(Bmark.tags))

        # join to hashed so we always have the url
        # if we have with_content, this is already done
        qry = qry.options(joinedload('hashed'))

        return qry.all()
Пример #40
0
    def get(self, course_id, question_id):
        """
        Return a list of answers for a question based on search criteria. The
        list of the answers are paginated. If there is any answers from instructor
        or TA, their answers will be on top of the list.

        :param course_id: course id
        :param question_id: question id
        :return: list of answers
        """
        Courses.exists_or_404(course_id)
        question = PostsForQuestions.query.get_or_404(question_id)
        require(READ, question)
        restrict_users = not allow(MANAGE, question)

        params = answer_list_parser.parse_args()

        if restrict_users and not question.after_judging:
            # only the answer from student himself/herself should be returned
            params['author'] = current_user.id

        # this query could be further optimized by reduction the selected columns
        query = PostsForAnswers.query. \
            with_entities(PostsForAnswers). \
            options(contains_eager('post').joinedload('files')). \
            options(contains_eager('post').joinedload('user')). \
            options(joinedload('scores')). \
            options(undefer_group('counts')). \
            join(Posts). \
            filter(PostsForAnswers.questions_id == question.id)

        user_ids = []
        if params['author']:
            query = query.filter(Posts.users_id == params['author'])
            user_ids.append(params['author'])
        elif params['group']:
            # get all user ids in the group
            users = Users.query. \
                with_entities(Users.id). \
                join(GroupsAndUsers). \
                filter(GroupsAndUsers.groups_id == params['group']). \
                all()
            user_ids = [x[0] for x in users]

        if params['ids']:
            query = query.filter(PostsForAnswers.id.in_(params['ids'].split(',')))

        # place instructor and TA's answer at the top of the list
        inst_subquery = PostsForAnswers.query.with_entities(PostsForAnswers.id.label('inst_answer')). \
            join(Posts). \
            join(CoursesAndUsers, Posts.users_id == CoursesAndUsers.users_id).filter_by(courses_id=course_id). \
            join(CoursesAndUsers.usertypeforcourse).filter_by(name=UserTypesForCourse.TYPE_INSTRUCTOR)
        ta_subquery = PostsForAnswers.query.with_entities(PostsForAnswers.id.label('ta_answer')). \
            join(Posts). \
            join(CoursesAndUsers, Posts.users_id == CoursesAndUsers.users_id).filter_by(courses_id=course_id). \
            join(CoursesAndUsers.usertypeforcourse).filter_by(name=UserTypesForCourse.TYPE_TA)
        query = query.order_by(PostsForAnswers.id.in_(inst_subquery).desc(), PostsForAnswers.id.in_(ta_subquery).desc())

        if params['orderBy'] and len(user_ids) != 1:
            # order answer ids by one criterion and pagination, in case there are multiple criteria in question
            # left join on Scores and add or condition for criteriaandquestions_id is None to include all answers
            # that don't have scores yet
            query = query.outerjoin(Scores).filter(
                or_(Scores.criteriaandquestions_id == params['orderBy'], Scores.criteriaandquestions_id.is_(None)))
            query = query.order_by(Scores.score.desc(), Posts.created.desc())
        else:
            query = query.order_by(Posts.created.desc())

        if user_ids:
            query = query.filter(Posts.users_id.in_(user_ids))

        page = query.paginate(params['page'], params['perPage'])

        on_answer_list_get.send(
            self,
            event_name=on_answer_list_get.name,
            user=current_user,
            course_id=course_id,
            data={'question_id': question_id})

        return {"objects": marshal(page.items, dataformat.get_posts_for_answers(restrict_users)), "page": page.page,
                "pages": page.pages, "total": page.total, "per_page": page.per_page}
Пример #41
0
    def __queryItemsByParseTree(self, query_tree, limit, page, order_by):
        order_by_1 = ""
        order_by_2 = ""
        for col, direction in order_by:
            if order_by_1:
                order_by_1 += ", "
            if order_by_2:
                order_by_2 += ", "
            order_by_2 += col + " " + direction + " "
            if col == "title":
                order_by_1 += col + " " + direction + " "
        if order_by_1:
            order_by_1 = " ORDER BY " + order_by_1
        if order_by_2:
            order_by_2 = " ORDER BY " + order_by_2

        sub_sql = query_tree.interpret()

        if page < 1:
            raise ValueError("Page number cannot be negative or zero.")

        if limit < 0:
            raise ValueError("Limit cannot be negative number.")

        limit_offset = ""
        if limit > 0:
            offset = (page - 1) * limit
            limit_offset += "LIMIT {0} OFFSET {1}".format(limit, offset)

        sql = '''
        select sub.*,
        ''' + db.Item_Tag._sql_from() + ''',
        ''' + db.Tag._sql_from() + ''',
        ''' + db.Thumbnail._sql_from() + ''',
        ''' + db.Item_Field._sql_from() + ''',
        ''' + db.Field._sql_from() + '''
        from (''' + sub_sql + " " + order_by_1 + " " + limit_offset + ''') as sub
        left join items_tags on sub.id = items_tags.item_id
        left join tags on tags.id = items_tags.tag_id
        left join items_fields on sub.id = items_fields.item_id
        left join fields on fields.id = items_fields.field_id
        left join thumbnails on thumbnails.data_ref_id = sub.data_refs_id and
                  thumbnails.size = ''' + str(UserConfig().get(
            "thumbnail_size", consts.THUMBNAIL_DEFAULT_SIZE)) + '''
        where sub.alive
        ''' + order_by_2

        items = []
        try:
            items = self._session.query(db.Item)\
            .options(contains_eager("data_ref"), \
                     contains_eager("data_ref.thumbnails"), \
                     contains_eager("item_tags"), \
                     contains_eager("item_tags.tag"), \
                     contains_eager("item_fields"),\
                     contains_eager("item_fields.field"))\
            .from_statement(sql).all()

            for item in items:
                self._session.expunge(item)

        except ResourceClosedError:
            pass

        return items
Пример #42
0
    def __getUntaggedItems(self, limit, page, order_by):
        order_by_1 = ""
        order_by_2 = ""
        for col, direction in order_by:
            if order_by_1:
                order_by_1 += ", "
            if order_by_2:
                order_by_2 += ", "
            order_by_2 += col + " " + direction + " "
            if col == "title":
                order_by_1 += col + " " + direction + " "
        if order_by_1:
            order_by_1 = " ORDER BY " + order_by_1
        if order_by_2:
            order_by_2 = " ORDER BY " + order_by_2

        thumbnail_default_size = UserConfig().get(
            "thumbnail_size", consts.THUMBNAIL_DEFAULT_SIZE)

        if page < 1:
            raise ValueError("Page number cannot be negative or zero.")

        if limit < 0:
            raise ValueError("Limit cannot be negative number.")

        limit_offset = ""
        if limit > 0:
            offset = (page - 1) * limit
            limit_offset += "LIMIT {0} OFFSET {1}".format(limit, offset)

        sql = '''
        select sub.*, ''' + \
        db.Item_Tag._sql_from() + ", " + \
        db.Tag._sql_from() + ", " + \
        db.Item_Field._sql_from() + ", " + \
        db.Field._sql_from() + \
        '''
        from (select i.*, ''' + \
            db.DataRef._sql_from() + ", " + \
            db.Thumbnail._sql_from() + \
            '''
            from items i
            left join items_tags it on i.id = it.item_id
            left join data_refs on i.data_ref_id = data_refs.id
            left join thumbnails on data_refs.id = thumbnails.data_ref_id and thumbnails.size = ''' + \
                str(thumbnail_default_size) + '''
            where
                it.item_id is null
                AND i.alive
            ''' + order_by_1 + " " + limit_offset + '''
        ) as sub
        left join items_tags on sub.id = items_tags.item_id
        left join tags on tags.id = items_tags.tag_id
        left join items_fields on sub.id = items_fields.item_id
        left join fields on fields.id = items_fields.field_id
        ''' + order_by_2

        items = []
        try:
            items = self._session.query(db.Item)\
            .options(contains_eager("data_ref"), \
                     contains_eager("data_ref.thumbnails"), \
                     contains_eager("item_tags"), \
                     contains_eager("item_tags.tag"), \
                     contains_eager("item_fields"),\
                     contains_eager("item_fields.field"))\
            .from_statement(sql).all()
            for item in items:
                self._session.expunge(item)

        except ResourceClosedError:
            pass

        return items
Пример #43
0
def series(series_slug, year=None, all=None):
    if series_slug in BACKCOMPAT_SERIES_ALIASES:
        url = url_for('series',
                      series_slug=BACKCOMPAT_SERIES_ALIASES[series_slug])
        return redirect(url)

    today = datetime.date.today()

    # List of years to show in the pagination
    # If there are no years with events, put the current year there at least
    all_years = years_with_events(series_slug) or [today.year]
    first_year, last_year = min(all_years), max(all_years)

    if last_year == today.year and len(all_years) > 1:
        # The current year is displayed on the 'New' page (year=None)
        all_years.remove(last_year)
        last_year = max(all_years)

    if year is not None:
        if year > last_year:
            # Instead of showing a future year, redirect to the 'New' page
            return redirect(url_for('series', series_slug=series_slug))
        if year not in all_years:
            # Otherwise, if there are no events in requested year, return 404.
            abort(404)

    if all is not None:
        paginate_prev = {'year': first_year}
        paginate_next = {'all': 'all'}
    elif year is None:
        paginate_prev = {'year': None}
        paginate_next = {'year': last_year}
    else:
        past_years = [y for y in all_years if y < year]
        if past_years:
            paginate_next = {'year': max(past_years)}
        else:
            paginate_next = {'all': 'all'}

        future_years = [y for y in all_years if y > year]
        if future_years:
            paginate_prev = {'year': min(future_years)}
        else:
            paginate_prev = {'year': None}

    query = db.session.query(tables.Series)
    query = query.filter(tables.Series.slug == series_slug)
    query = query.join(tables.Series.events)
    query = query.options(contains_eager(tables.Series.events))
    query = query.options(joinedload(tables.Series.events, 'talks'))
    query = query.options(joinedload(tables.Series.events, 'venue'))
    query = query.options(
        joinedload(tables.Series.events, 'talks', 'talk_speakers'))
    query = query.options(
        subqueryload(tables.Series.events, 'talks', 'talk_speakers',
                     'speaker'))
    query = query.options(subqueryload(tables.Series.events, 'talks', 'links'))
    query = query.order_by(tables.Event.date.desc())

    if not all:
        if year is None:
            # The 'New' page displays the current year as well as the last one
            query = query.filter(
                tables.Event.date >= datetime.date(today.year - 1, 1, 1))
        else:
            query = query.filter(
                tables.Event.date >= datetime.date(year, 1, 1))
            query = query.filter(
                tables.Event.date < datetime.date(year + 1, 1, 1))

    try:
        series = query.one()
        has_events = True
    except NoResultFound:
        has_events = False

        # The series has no events during the selected timeframe so at least
        # load general information on the series so we can properly display
        # the heading.
        query = db.session.query(tables.Series)
        query = query.filter(tables.Series.slug == series_slug)
        try:
            series = query.one()
        except NoResultFound:
            abort(404)

    # Split events between future and past
    # (today's event, if any, is considered future)
    past_events = [e for e in series.events if e.date < today]
    future_events = [e for e in series.events if e.date >= today]

    # Events are ordered closest first;
    #  for future ones this means ascending order
    future_events.reverse()

    featured_event = None
    if year is None:
        # Pop the featured event -- closest future one, or latest past one
        if future_events:
            featured_event = future_events.pop(0)
        elif past_events:
            featured_event = past_events.pop(0)

    organizer_info = json.loads(series.organizer_info)
    return render_template('series.html',
                           series=series,
                           today=today,
                           year=year,
                           future_events=future_events,
                           past_events=past_events,
                           featured_event=featured_event,
                           organizer_info=organizer_info,
                           all=all,
                           first_year=first_year,
                           last_year=last_year,
                           all_years=all_years,
                           paginate_prev=paginate_prev,
                           paginate_next=paginate_next,
                           has_events=has_events)
Пример #44
0
def _patch_query(q):
    return q.join(Flight.igc_file) \
            .options(contains_eager(Flight.igc_file)) \
            .filter(Flight.is_viewable(g.current_user))
Пример #45
0
def _create_list(date=None,
                 pilot=None,
                 club=None,
                 airport=None,
                 pinned=None,
                 filter=None,
                 default_sorting_column='score',
                 default_sorting_order='desc'):

    pilot_alias = aliased(User, name='pilot')
    owner_alias = aliased(User, name='owner')

    subq = db.session \
        .query(FlightComment.flight_id, func.count('*').label('count')) \
        .group_by(FlightComment.flight_id).subquery()

    current_user = User.get(request.user_id) if request.user_id else None

    flights = db.session.query(Flight, subq.c.count) \
        .filter(Flight.is_listable(current_user)) \
        .join(Flight.igc_file) \
        .options(contains_eager(Flight.igc_file)) \
        .join(owner_alias, IGCFile.owner) \
        .options(contains_eager(Flight.igc_file, IGCFile.owner, alias=owner_alias)) \
        .outerjoin(pilot_alias, Flight.pilot) \
        .options(contains_eager(Flight.pilot, alias=pilot_alias)) \
        .options(joinedload(Flight.co_pilot)) \
        .outerjoin(Flight.club) \
        .options(contains_eager(Flight.club)) \
        .outerjoin(Flight.takeoff_airport) \
        .options(contains_eager(Flight.takeoff_airport)) \
        .outerjoin(Flight.model) \
        .options(contains_eager(Flight.model)) \
        .outerjoin((subq, Flight.comments))

    if date:
        flights = flights.filter(Flight.date_local == date)

    if pilot:
        flights = flights.filter(
            or_(Flight.pilot == pilot, Flight.co_pilot == pilot))
    if club:
        flights = flights.filter(Flight.club == club)

    if airport:
        flights = flights.filter(Flight.takeoff_airport == airport)

    if pinned:
        flights = flights.filter(Flight.id.in_(pinned))

    if filter is not None:
        flights = flights.filter(filter)

    valid_columns = {
        'date': getattr(Flight, 'date_local'),
        'score': getattr(Flight, 'index_score'),
        'pilot': getattr(pilot_alias, 'name'),
        'distance': getattr(Flight, 'olc_classic_distance'),
        'airport': getattr(Airport, 'name'),
        'club': getattr(Club, 'name'),
        'aircraft': getattr(AircraftModel, 'name'),
        'time': getattr(Flight, 'takeoff_time'),
    }

    flights_count = flights.count()

    flights = Sorter.sort(flights,
                          'flights',
                          default_sorting_column,
                          valid_columns=valid_columns,
                          default_order=default_sorting_order)

    flights = flights.order_by(Flight.index_score.desc())

    flights = Pager.paginate(flights,
                             'flights',
                             items_per_page=int(
                                 current_app.config.get(
                                     'SKYLINES_LISTS_DISPLAY_LENGTH', 50)))

    flight_schema = FlightSchema()
    flights_json = []
    for f, num_comments in flights:
        flight = flight_schema.dump(f).data
        flight['private'] = not f.is_rankable()
        flight['numComments'] = num_comments
        flights_json.append(flight)

    json = dict(flights=flights_json, count=flights_count)

    if date:
        json['date'] = date.isoformat()

    if pilot:
        user_schema = UserSchema(only=('id', 'name'))
        json['pilot'] = user_schema.dump(pilot).data

    if club:
        club_schema = ClubSchema(only=('id', 'name'))
        json['club'] = club_schema.dump(club).data

    if airport:
        airport_schema = AirportSchema(only=('id', 'name', 'countryCode'))
        json['airport'] = airport_schema.dump(airport).data

    return jsonify(json)
Пример #46
0
 def get_blocked_rooms(self, *dates, **kwargs):
     states = kwargs.get('states', (BlockedRoom.State.accepted, ))
     return (self.blocked_rooms.join(BlockedRoom.blocking).options(
         contains_eager(BlockedRoom.blocking)).filter(
             or_(Blocking.is_active_at(d) for d in dates),
             BlockedRoom.state.in_(states)).all())
Пример #47
0
    def filter(self, filter):
        """
        Returns a list of details for distro trees filtered by the given criteria.

        The *filter* argument must be an XML-RPC structure (dict) specifying 
        filter criteria. The following keys are recognised:

            'name'
                Distro name. May include % SQL wildcards, for example 
                ``'%20101121.nightly'``.
            'family'
                Distro family name, for example ``'RedHatEnterpriseLinuxServer5'``. 
                Matches are exact.
            'tags'
                List of distro tags, for example ``['STABLE', 'RELEASED']``. All given 
                tags must be present on the distro for it to match.
            'arch'
                Architecture name, for example ``'x86_64'``.
            'treepath'
                Tree path (on any lab controller). May include % SQL wildcards, for 
                example ``'nfs://nfs.example.com:%'``.
            'labcontroller'
                FQDN of lab controller. Limit to distro trees which are 
                available on this lab controller. May include % SQL wildcards.
            'distro_id'
                Distro id.
                Matches are exact.
            'distro_tree_id'
                Distro Tree id.
                Matches are exact.
            'xml'
                XML filter criteria in the same format allowed inside 
                ``<distroRequires/>`` in a job, for example
                ``<or><distro_tag value="RELEASED"/><distro_tag value="STABLE"/></or>``.
            'limit'
                Integer limit to number of distro trees returned.

        The return value is an array with one element per distro (up to the 
        maximum number of distros given by 'limit'). Each element is an XML-RPC 
        structure (dict) describing a distro tree.

        .. versionadded:: 0.9
        """
        query = DistroTree.query\
                .join(DistroTree.distro, Distro.osversion, OSVersion.osmajor)\
                .join(DistroTree.arch)\
                .options(contains_eager(DistroTree.distro),
                    contains_eager(DistroTree.arch))
        name = filter.get('name', None)
        family = filter.get('family', None)
        tags = filter.get('tags', None) or []
        arch = filter.get('arch', None)
        distro_id = filter.get('distro_id', None)
        distro_tree_id = filter.get('distro_tree_id', None)
        treepath = filter.get('treepath', None)
        labcontroller = filter.get('labcontroller', None)
        xml = filter.get('xml', None)
        limit = filter.get('limit', None)
        for tag in tags:
            query = query.filter(Distro._tags.any(DistroTag.tag == tag))
        if name:
            query = query.filter(Distro.name.like('%s' % name))
        if family:
            query = query.filter(OSMajor.osmajor == '%s' % family)
        if arch:
            if isinstance(arch, list):
                query = query.filter(Arch.arch.in_(arch))
            else:
                query = query.filter(Arch.arch == '%s' % arch)
        if distro_id:
            query = query.filter(Distro.id == int(distro_id))
        if distro_tree_id:
            query = query.filter(DistroTree.id == int(distro_tree_id))
        if treepath:
            query = query.filter(DistroTree.lab_controller_assocs.any(
                    LabControllerDistroTree.url.like('%s' % treepath)))
        elif labcontroller:
            query = query.filter(exists([1],
                    from_obj=[LabControllerDistroTree.__table__.join(LabController.__table__)])
                    .where(LabControllerDistroTree.distro_tree_id == DistroTree.id)
                    .where(LabController.fqdn.like(labcontroller)))
        else:
            # we only want distro trees that are active in at least one lab controller
            query = query.filter(DistroTree.lab_controller_assocs.any())
        if xml:
            query = needpropertyxml.apply_distro_filter('<and>%s</and>' % xml, query)
        query = query.order_by(DistroTree.date_created.desc())
        if limit:
            query = query[:limit]
        return [{'distro_tree_id': dt.id,
                 'distro_id': dt.distro.id,
                 'distro_name': dt.distro.name,
                 'distro_osversion': unicode(dt.distro.osversion),
                 'distro_osmajor' : unicode(dt.distro.osversion.osmajor),
                 'distro_tags': [unicode(tag) for tag in dt.distro.tags],
                 'arch': unicode(dt.arch),
                 'variant': dt.variant,
                 'images' : [(unicode(image.image_type), image.path) for image in dt.images],
                 'kernel_options': dt.kernel_options or u'',
                 'available': [(lca.lab_controller.fqdn, lca.url) for lca in dt.lab_controller_assocs],
                } for dt in query]
Пример #48
0
def _create_list(tab,
                 kw,
                 date=None,
                 pilot=None,
                 club=None,
                 airport=None,
                 pinned=None,
                 filter=None,
                 default_sorting_column='score',
                 default_sorting_order='desc'):
    pilot_alias = aliased(User, name='pilot')
    owner_alias = aliased(User, name='owner')

    subq = db.session \
        .query(FlightComment.flight_id, func.count('*').label('count')) \
        .group_by(FlightComment.flight_id).subquery()

    flights = db.session.query(Flight, subq.c.count) \
        .filter(Flight.is_listable(g.current_user)) \
        .join(Flight.igc_file) \
        .options(contains_eager(Flight.igc_file)) \
        .join(owner_alias, IGCFile.owner) \
        .options(contains_eager(Flight.igc_file, IGCFile.owner, alias=owner_alias)) \
        .outerjoin(pilot_alias, Flight.pilot) \
        .options(contains_eager(Flight.pilot, alias=pilot_alias)) \
        .options(joinedload(Flight.co_pilot)) \
        .outerjoin(Flight.club) \
        .options(contains_eager(Flight.club)) \
        .outerjoin(Flight.takeoff_airport) \
        .options(contains_eager(Flight.takeoff_airport)) \
        .outerjoin(Flight.model) \
        .options(contains_eager(Flight.model)) \
        .outerjoin((subq, Flight.comments))

    if date:
        flights = flights.filter(Flight.date_local == date)

    if pilot:
        flights = flights.filter(
            or_(Flight.pilot == pilot, Flight.co_pilot == pilot))
    if club:
        flights = flights.filter(Flight.club == club)

    if airport:
        flights = flights.filter(Flight.takeoff_airport == airport)

    if pinned:
        flights = flights.filter(Flight.id.in_(pinned))

    if filter is not None:
        flights = flights.filter(filter)

    valid_columns = {
        'date': getattr(Flight, 'date_local'),
        'score': getattr(Flight, 'index_score'),
        'pilot': getattr(pilot_alias, 'name'),
        'distance': getattr(Flight, 'olc_classic_distance'),
        'airport': getattr(Airport, 'name'),
        'club': getattr(Club, 'name'),
        'aircraft': getattr(AircraftModel, 'name'),
        'time': getattr(Flight, 'takeoff_time'),
    }

    flights_count = flights.count()

    flights = Sorter.sort(flights,
                          'flights',
                          default_sorting_column,
                          valid_columns=valid_columns,
                          default_order=default_sorting_order)

    flights = flights.order_by(Flight.index_score.desc())

    flights = Pager.paginate(flights,
                             'flights',
                             items_per_page=int(
                                 current_app.config.get(
                                     'SKYLINES_LISTS_DISPLAY_LENGTH', 50)))

    return render_template('flights/list.jinja',
                           tab=tab,
                           date=date,
                           pilot=pilot,
                           club=club,
                           airport=airport,
                           flights=flights,
                           flights_count=flights_count)
Пример #49
0
def ping(testID, sectionID, questionID):
    """
    Perform 1 ping.
    The time spent on each section will be reduced by the Pinger's duration
    value.
    (Read how the logic works for sectional jumps vs non sectional jumps in the
    comments below).

    If all the requests succeed, then the pinger gets a new cookie with an
    updated time.
    If not, this operation will not happen, making future requests impossible.

    :param testID:
    :param sectionID:
    :param questionID:
    :return:
    """

    try:
        # for tests with jumps allowed, the section_id field is ignored
        # instead, we use the value passed by the client.

        test_id, section_id, jumps_allowed, _, warn_count = Pinger.validate_ping_time(
        )
    except ValueError:
        return Error("Ping Time Too High", 403, Pinger.PING_TIME_FAIL_CODE)()

    # get TestAttempt -> Test
    # & TestAttempt->SectionAttempts->QuestionAttempts
    # & TestAttempt->SectionAttempts->Sections

    try:
        test_attempt = (
            TestAttempt.query.filter(TestAttempt.test_id == test_id).filter(
                TestAttempt.user_id == current_user.id).filter(
                    TestAttempt.is_complete == False).join(
                        TestAttempt.test).join(
                            TestAttempt.section_attempts).join(
                                SectionAttempt.question_attempts).join(
                                    SectionAttempt.section).
            options(contains_eager(TestAttempt.test)).options(
                contains_eager(TestAttempt.section_attempts).contains_eager(
                    SectionAttempt.question_attempts).load_only(
                        QuestionAttempt.section_attempt_id,
                        QuestionAttempt.time_spent)).options(
                            contains_eager(
                                TestAttempt.section_attempts).contains_eager(
                                    SectionAttempt.section)).one())
    except NoResultFound:
        return Error("Test already complete or invalid data", 403)()

    if jumps_allowed:
        # Section jumps are allowed.
        # in this case,calculate total time spent by summing up each
        # section attempt.
        # Then the time is compared with one of the sections under the test
        # (each section should have the same total_time)
        # if the time spent is > total_time , the test is automatically
        # marked complete

        section_times = map(lambda section_att: section_att.time_spent,
                            test_attempt.section_attempts)

        section_attempt = list(
            filter(
                lambda section_attempt: section_attempt.section_id ==
                sectionID, test_attempt.section_attempts))[0]

        section_attempt.time_spent += Pinger.PING_DURATION

        total_time_spent = sum(section_times)

        total_time = (list(
            map(lambda section_att: section_att.section.total_time,
                test_attempt.section_attempts))[0])

        if total_time_spent >= total_time:
            # fixme this method call does the entire complicated
            # join once more. may be avoided
            TestAttempt.calculate_score_for_test(current_user.id,
                                                 test_id,
                                                 should_persist=True)

    else:
        # Section jumps are not allowed.
        # in this case,calculate total time spent on the section by
        # checking the section from the session
        # if the time spent is > total sectional time , the section is
        # automatically marked complete

        section_attempt = list(
            filter(
                lambda section_attempt: section_attempt.section_id ==
                section_id, test_attempt.section_attempts))[0]

        sorted_section_attempts = sorted(
            test_attempt.section_attempts,
            key=lambda section_attempt: section_id)

        section_attempt.time_spent += Pinger.PING_DURATION

        if section_attempt.time_spent >= section_attempt.section.total_time:
            section_attempt.is_complete = True

            # if the last section was just completed
            # then calculate score and mark the test complete
            if section_attempt.section_id == sorted_section_attempts[
                    -1].section_id:
                TestAttempt.calculate_score_for_test(current_user.id, test_id,
                                                     True)

    # question attempt time is updated from the UI directly
    # this block will be deleted soon.

    # for section_attempt in test_attempt.section_attempts:
    #     for question_attempt in section_attempt.question_attempts:
    #         # print(question_attempt.question_id)
    #         # print(questionID)
    #         if question_attempt.question_id == questionID:
    #             # print("HERE")
    #             question_attempt.time_spent += Pinger.PING_DURATION

    db.session.commit()

    curr_time = datetime.timestamp(datetime.now())
    string = Pinger.push_data_from_data(test_id, section_id, jumps_allowed,
                                        curr_time, warn_count)
    resp = Response()
    resp.headers["ping"] = string
    return resp
Пример #50
0
def _authenticate_request_from_cookie(sql_session, contest, timestamp, cookie):
    """Return the current participation based on the cookie.

    If a participation can be extracted, the cookie is refreshed.

    sql_session (Session): the SQLAlchemy database session used to
        execute queries.
    contest (Contest): the contest the user is trying to access.
    timestamp (datetime): the date and the time of the request.
    cookie (bytes|None): the cookie the user's browser provided in the
        request (if any).

    return ((Participation, bytes)|(None, None)): the participation
        extracted from the cookie and the cookie to set/refresh, or
        None in case of errors.

    """
    if cookie is None:
        logger.info("Unsuccessful cookie authentication: no cookie provided")
        return None, None

    # Parse cookie.
    try:
        cookie = json.loads(cookie.decode("utf-8"))
        username = cookie[0]
        password = cookie[1]
        last_update = make_datetime(cookie[2])
    except Exception as e:
        # Cookies are stored securely and thus cannot be tampered with:
        # this is either a programming or a configuration error.
        logger.warning("Invalid cookie (%s): %s", e, cookie)
        return None, None

    def log_failed_attempt(msg, *args):
        logger.info(
            "Unsuccessful cookie authentication as %r, returning from "
            "%s, at %s: " + msg, username, last_update, timestamp, *args)

    # Check if the cookie is expired.
    if timestamp - last_update > timedelta(seconds=config.cookie_duration):
        log_failed_attempt("cookie expired (lasts %d seconds)",
                           config.cookie_duration)
        return None, None

    # Load participation from DB and make sure it exists.
    participation = sql_session.query(Participation) \
        .join(Participation.user) \
        .options(contains_eager(Participation.user)) \
        .filter(Participation.contest == contest) \
        .filter(User.username == username) \
        .first()
    if participation is None:
        log_failed_attempt("user not registered to contest")
        return None, None

    correct_password = get_password(participation)

    # We compare hashed password because it would be too expensive to
    # re-hash the user-provided plaintext password at every request.
    if password != correct_password:
        log_failed_attempt("wrong password")
        return None, None

    logger.info(
        "Successful cookie authentication as user %r, on contest %s, "
        "returning from %s, at %s", username, contest.name, last_update,
        timestamp)

    # We store the hashed password (if hashing is used) so that the
    # expensive bcrypt hashing doesn't need to be done at every request.
    return (participation,
            json.dumps([username, correct_password,
                        make_timestamp(timestamp)]).encode("utf-8"))
jack = session.query(User). \
        options(joinedload(User.addresses)). \
        filter_by(name='jack').one()
print("jack: ", jack)
print("jack.addresses: ", jack.addresses)

# Explicit Join + Eagerload
print("\nExplicit Join + Eagerload:")
print(
    "Below we illustrate loading an Address row as well as the related User object, filtering on the User named "
    "“jack” and using contains_eager() to apply the “user” columns to the Address.user attribute:"
)
jacks_addresses = session.query(Address).\
                                join(Address.user).\
                                filter(User.name=='jack').\
                                options(contains_eager(Address.user)).\
                                all()
print("jacks_addresses: ", jacks_addresses)
print("jacks_addresses[0].user: "******"\nDeleting:")
print(
    "Let’s try to delete jack and see how that goes. We’ll mark the object as deleted in the session, then we’ll "
    "issue a count query to see that no rows remain::")
session.delete(jack)
print("session.query(User).filter_by(name='jack').count(): ",
      session.query(User).filter_by(name='jack').count())

print("\nSo far, so good. How about Jack’s Address objects ?:")
print(
Пример #52
0
 def _get_allocations_from_db(context, rp_uuid):
     query = (context.session.query(models.Allocation).join(
         models.Allocation.resource_provider).options(
             contains_eager('resource_provider')).filter(
                 models.ResourceProvider.uuid == rp_uuid))
     return query.all()
Пример #53
0
 def _get_all_by_resource_provider(context, rp_uuid):
     return context.session.query(models.Inventory).\
         join(models.Inventory.resource_provider).\
         options(contains_eager('resource_provider')).\
         filter(models.ResourceProvider.uuid == rp_uuid).all()
Пример #54
0
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True):
    """Retrieve time blocks that fall within a specific time interval
       for a given set of categories.

       :param categ_ids: iterable containing list of category IDs
       :param start_dt: start of search interval (``datetime``, expected
                        to be in display timezone)
       :param end_dt: end of search interval (``datetime`` in expected
                      to be in display timezone)
       :param detail_level: the level of detail of information
                            (``event|session|contribution``)
       :param tz: the ``timezone`` information should be displayed in
       :param from_categ: ``Category`` that will be taken into account to calculate
                          visibility
       :param grouped: Whether to group results by start date
       :returns: a dictionary containing timetable information in a
                 structured way. See source code for examples.
    """
    day_start = start_dt.astimezone(utc)
    day_end = end_dt.astimezone(utc)
    dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)

    items = defaultdict(lambda: defaultdict(list))

    # first of all, query TimetableEntries/events that fall within
    # specified range of dates (and category set)
    events = _query_events(categ_ids, day_start, day_end)
    if from_categ:
        events = events.filter(Event.is_visible_in(from_categ))
    for eid, tt_start_dt in events:
        if tt_start_dt:
            items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
        else:
            items[eid] = None

    # then, retrieve detailed information about the events
    event_ids = set(items)
    query = (Event.find(Event.id.in_(event_ids))
             .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),
                      joinedload(Event.own_room).noload('owner'),
                      joinedload(Event.own_venue),
                      joinedload(Event.category).undefer('effective_icon_data'),
                      undefer('effective_protection_mode')))
    scheduled_events = defaultdict(list)
    ongoing_events = []
    events = []
    for e in query:
        if grouped:
            local_start_dt = e.start_dt.astimezone(tz).date()
            local_end_dt = e.end_dt.astimezone(tz).date()
            if items[e.id] is None:
                # if there is no TimetableEntry, this means the event has not timetable on that interval
                for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):
                    # if the event starts on this date, we've got a time slot
                    if day.date() == local_start_dt:
                        scheduled_events[day.date()].append((e.start_dt, e))
                    else:
                        ongoing_events.append(e)
            else:
                for start_d, start_dts in items[e.id].viewitems():
                    scheduled_events[start_d].append((start_dts[0], e))
        else:
            events.append(e)

    # result['events'][date(...)] -> [(datetime(....), Event(...))]
    # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]
    # result['ongoing_events'] = [Event(...)]
    if grouped:
        result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
    else:
        result = defaultdict(lambda: defaultdict(list))

    result.update({
        'events': scheduled_events if grouped else events,
        'ongoing_events': ongoing_events
    })

    # according to detail level, ask for extra information from the DB
    if detail_level != 'event':
        query = _query_blocks(event_ids, dates_overlap, detail_level)
        if grouped:
            for b in query:
                start_date = b.timetable_entry.start_dt.astimezone(tz).date()
                result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))
        else:
            for b in query:
                result[b.session.event_id]['blocks'].append(b)

    if detail_level == 'contribution':
        query = (Contribution.find(Contribution.event_id.in_(event_ids),
                                   dates_overlap(TimetableEntry),
                                   ~Contribution.is_deleted)
                 .options(contains_eager(Contribution.timetable_entry),
                          joinedload(Contribution.person_links))
                 .join(TimetableEntry))
        if grouped:
            for c in query:
                start_date = c.timetable_entry.start_dt.astimezone(tz).date()
                result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))
        else:
            for c in query:
                result[c.event_id]['contributions'].append(c)

        query = (Break.find(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))
                 .options(contains_eager(Break.timetable_entry))
                 .join(TimetableEntry))
        if grouped:
            for b in query:
                start_date = b.timetable_entry.start_dt.astimezone(tz).date()
                result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))
        else:
            for b in query:
                result[b.timetable_entry.event_id]['breaks'].append(b)
    return result
Пример #55
0
def _create_list(
    date=None,
    pilot=None,
    club=None,
    landscape=None,
    airport=None,
    pinned=None,
    filter=None,
    default_sorting_column="score",
    default_sorting_order="desc",
):

    pilot_alias = aliased(User, name="pilot")
    owner_alias = aliased(User, name="owner")

    subq = (
        db.session.query(FlightComment.flight_id, func.count("*").label("count"))
        .group_by(FlightComment.flight_id)
        .subquery()
    )

    current_user = User.get(request.user_id) if request.user_id else None

    flights = (
        db.session.query(Flight, subq.c.count)
        .filter(Flight.is_listable(current_user))
        .join(Flight.igc_file)
        .options(contains_eager(Flight.igc_file))
        .join(owner_alias, IGCFile.owner)
        .options(contains_eager(Flight.igc_file, IGCFile.owner, alias=owner_alias))
        .outerjoin(pilot_alias, Flight.pilot)
        .options(contains_eager(Flight.pilot, alias=pilot_alias))
        .options(joinedload(Flight.co_pilot))
        .outerjoin(Flight.club)
        .options(contains_eager(Flight.club))
        .outerjoin(Flight.takeoff_airport)
        .options(contains_eager(Flight.takeoff_airport))
        .outerjoin(Flight.model)
        .options(contains_eager(Flight.model))
        .outerjoin((subq, Flight.comments))
    )

    if date:
        flights = flights.filter(Flight.date_local == date)

    if pilot:
        flights = flights.filter(or_(Flight.pilot == pilot, Flight.co_pilot == pilot))

    if club:
        flights = flights.filter(Flight.club == club)

    if landscape:
        flights.filter(Flight.landscape == landscape)

    if airport:
        flights = flights.filter(Flight.takeoff_airport == airport)

    if pinned:
        flights = flights.filter(Flight.id.in_(pinned))

    if filter is not None:
        flights = flights.filter(filter)

    valid_columns = {
        "date": getattr(Flight, "date_local"),
        "score": getattr(Flight, "index_score"),
        "pilot": getattr(pilot_alias, "name"),
        "distance": getattr(Flight, "olc_classic_distance"),
        "landscape": getattr(Flight, "landscape"),
        "airport": getattr(Airport, "name"),
        "club": getattr(Club, "name"),
        "aircraft": getattr(AircraftModel, "name"),
        "time": getattr(Flight, "takeoff_time"),
    }

    flights_count = flights.count()

    flights = Sorter.sort(
        flights,
        "flights",
        default_sorting_column,
        valid_columns=valid_columns,
        default_order=default_sorting_order,
    )

    flights = flights.order_by(Flight.index_score.desc())

    flights = Pager.paginate(
        flights,
        "flights",
        items_per_page=int(current_app.config.get("SKYLINES_LISTS_DISPLAY_LENGTH", 50)),
    )

    flight_schema = FlightSchema()
    flights_json = []
    for f, num_comments in flights:
        flight = flight_schema.dump(f).data
        flight["private"] = not f.is_rankable()
        flight["numComments"] = num_comments
        flights_json.append(flight)

    json = dict(flights=flights_json, count=flights_count)

    if date:
        json["date"] = date.isoformat()

    if pilot:
        user_schema = UserSchema(only=("id", "name"))
        json["pilot"] = user_schema.dump(pilot).data

    if club:
        club_schema = ClubSchema(only=("id", "name"))
        json["club"] = club_schema.dump(club).data

    if airport:
        airport_schema = AirportSchema(only=("id", "name", "countryCode"))
        json["airport"] = airport_schema.dump(airport).data

    return jsonify(json)
Пример #56
0
 def go():
     eq_(
         sess.query(Parent).join(Parent.children).options(
             contains_eager(Parent.children)).order_by(
                 Parent.data, Sub.data).all(), [p1, p2])
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.get_parser.parse_args()

        filters = []

        if args.authors:
            filters.append(Build.author_id.in_([a.id for a in args.authors]))
        elif args.authors is not None:
            return []

        if args.source:
            filters.append(Build.target.startswith(args.source))

        # is this from the search bar
        if args.query:
            clauses = []
            # search by revision title
            clauses.append(Build.label.contains(args.query))
            # search by prefix
            clauses.append(Build.target.startswith(args.query))
            # allows users to paste a full commit hash and still
            # find the relevant build(s). Should be fine for mercurial/git,
            # and svn will never have long enough strings
            if len(args.query) > 12:
                clauses.append(Build.target.startswith(args.query[0:12]))
            # if they searched for something that looks like a phabricator
            # identifier, try to find it
            if might_be_diffusion_iden(args.query):
                possible_hash = get_hash_from_diffusion_iden(args.query)
                if possible_hash:
                    # the query should always be at least as long or longer than
                    # our commit identifiers
                    clauses.append(
                        Build.target.startswith(possible_hash[0:12]))
            filters.append(or_(*clauses))

        if args.result:
            filters.append(Build.result == Result[args.result])

        if args.cause:
            filters.append(Build.cause == Cause[args.cause])

        if args.tag:
            tags = filter(bool, args.tag)
            # Avoid empty tags, which historically are meant to mean "no tag" restriction.
            if tags:
                filters.append(or_(*[Build.tags.any(t) for t in tags]))

        if args.patches_only:
            filters.append(Source.patch_id != None)  # NOQA
        elif not args.include_patches:
            filters.append(Source.patch_id == None)  # NOQA

        queryset = Build.query.options(
            joinedload('project', innerjoin=True),
            joinedload('author'),
            contains_eager('source').joinedload('revision'),
        ).join(
            Source, Source.id == Build.source_id,
        ).filter(
            Build.project_id == project.id,
            Source.repository_id == project.repository_id,
            *filters
        ).order_by(Build.date_created.desc())

        return self.paginate(queryset)
Пример #58
0
 def go():
     eq_(
         sess.query(Subparent).join(Subparent.children).options(
             contains_eager(Subparent.children)).all(), [p1, p2])
Пример #59
0
def select_rubric_items(
        submission_id: int) -> ExtendedJSONResponse[models.Work]:
    """Select the given rubric items for the given submission.

    .. :quickref: Submission; Select multiple rubric items.

    :param submission_id: The submission to unselect the item for.

    :>json array items: An array of objects, each object representing the item
        you want to select.
    :>jsonarr int row_id: The id of the row the item is in.
    :>jsonarr int item_id: The id of the item you want to select.
    :>jsonarr float mutiplier: The multiplier you want to use for this rubric
        item. This value defaults to 1.0, and can only be something other than
        1.0 for rubric rows with ``type`` 'continuous'.

    :returns: Nothing.

    :raises APIException: If the assignment of a given item does not belong to
        the assignment of the given submission. of the submission
        (INVALID_PARAM).
    :raises PermissionException: If the current user cannot grace work
        (INCORRECT_PERMISSION).
    """
    submission = helpers.filter_single_or_404(
        models.Work,
        models.Work.id == submission_id,
        ~models.Work.deleted,
        with_for_update=True,
        with_for_update_of=models.Work,
    )
    assig = submission.assignment

    auth.ensure_permission(CPerm.can_grade_work, assig.course_id)

    content = ensure_json_dict(request.get_json())
    ensure_keys_in_dict(content, [('items', list)])
    json_input = t.cast(t.List[helpers.JSONType], content['items'])

    RowType = t.NamedTuple('RowType', [('row_id', int), ('item_id', int),
                                       ('multiplier', float)])
    sanitized_input: t.List[RowType] = []
    copy_locked_items = helpers.request_arg_true('copy_locked_items')

    if json_input and isinstance(json_input[0], dict):
        for json_row in json_input:
            with helpers.get_from_map_transaction(
                    ensure_json_dict(json_row), ) as [get, opt_get]:
                row_id = get('row_id', int)
                item_id = get('item_id', int)
                multiplier = opt_get('multiplier', (float, int), 1.0)

            if multiplier > 1.0 or multiplier < 0.0:
                raise APIException(
                    'A multiplier has to be between 0.0 and 1.0.',
                    f'The given multiplier of {multiplier} is illegal',
                    APICodes.INVALID_PARAM, 400)

            sanitized_input.append(
                RowType(row_id=row_id, item_id=item_id, multiplier=multiplier))
    else:
        sanitized_input = [
            RowType(row_id=item.rubricrow_id, item_id=item.id, multiplier=1.0)
            for item in helpers.get_in_or_error(
                models.RubricItem,
                models.RubricItem.id,
                t.cast(t.List[int], json_input),
            )
        ]

    if helpers.contains_duplicate(item.row_id for item in sanitized_input):
        duplicates = [
            k for k, v in Counter(item.row_id
                                  for item in sanitized_input).items() if v > 1
        ]
        raise APIException(
            'Duplicate rows in selected items',
            'The rows "{}" had duplicate items'.format(','.join(
                map(str, duplicates))), APICodes.INVALID_PARAM, 400)

    rows = helpers.get_in_or_error(
        models.RubricRow,
        models.RubricRow.id,
        [item.row_id for item in sanitized_input],
        options=[selectinload(models.RubricRow.items)],
        as_map=True,
    )

    if any(row.assignment_id != assig.id for row in rows.values()):
        raise APIException(
            'Selected rubric item is not coupled to the given submission',
            (f'A given item'
             f' of "{", ".join(str(i.item_id) for i in sanitized_input)}"'
             f' does not belong to assignment "{assig.id}"'),
            APICodes.INVALID_PARAM, 400)

    items = [
        rows[item.row_id].make_work_rubric_item(work=submission,
                                                item_id=item.item_id,
                                                multiplier=item.multiplier)
        for item in sanitized_input
    ]

    if copy_locked_items:
        for locked_item in db.session.query(models.WorkRubricItem).join(
                models.RubricItem,
                sql.expression.and_(
                    models.RubricItem.id ==
                    models.WorkRubricItem.rubricitem_id,
                    t.cast(DbColumn[int], models.RubricItem.rubricrow_id).in_(
                        list(assig.locked_rubric_rows.keys())),
                )).filter(models.WorkRubricItem.work == submission).options(
                    contains_eager(models.WorkRubricItem.rubric_item)).all():
            items.append(locked_item)

            row_id = locked_item.rubric_item.rubricrow_id
            if row_id in rows:
                raise APIException(
                    ('You cannot specify locked rows when the option'
                     " 'copy_locked_items' is given."),
                    f'The row {row_id} was given twice',
                    APICodes.INVALID_PARAM,
                    400,
                )
    elif assig.auto_test is not None:
        changed_items = set(items) ^ set(submission.selected_items)
        locked_row_ids = set(assig.locked_rubric_rows)
        if any(item.rubric_item.rubricrow_id in locked_row_ids
               for item in changed_items):
            raise APIException(
                ('This rubric row is connected to an AutoTest category, so'
                 ' you cannot change it.'),
                'An item is connected to one of these rows: "{}"'.format(
                    ', '.join(map(str, assig.locked_rubric_rows))),
                APICodes.INVALID_PARAM, 400)

    submission.select_rubric_items(items, current_user, True)
    db.session.commit()

    return extended_jsonify(submission, use_extended=models.Work)
Пример #60
0
def validate_login(sql_session, contest, timestamp, username, password,
                   ip_address):
    """Authenticate a user logging in, with username and password.

    Given the information the user provided (the username and the
    password) and some context information (contest, to determine which
    users are allowed to log in, how and with which restrictions;
    timestamp for cookie creation; IP address to check against) try to
    authenticate the user and return its participation and the cookie
    to set to help authenticate future visits.

    After finding the participation, IP login and hidden users
    restrictions are checked.

    sql_session (Session): the SQLAlchemy database session used to
        execute queries.
    contest (Contest): the contest the user is trying to access.
    timestamp (datetime): the date and the time of the request.
    username (str): the username the user provided.
    password (str): the password the user provided.
    ip_address (IPv4Address|IPv6Address): the IP address the request
        came from.

    return ((Participation, bytes)|(None, None)): if the user couldn't
        be authenticated then return None, otherwise return the
        participation that they wanted to authenticate as; if a cookie
        has to be set return it as well, otherwise return None.

    """
    def log_failed_attempt(msg, *args):
        logger.info(
            "Unsuccessful login attempt from IP address %s, as user "
            "%r, on contest %s, at %s: " + msg, ip_address, username,
            contest.name, timestamp, *args)

    if not contest.allow_password_authentication:
        log_failed_attempt("password authentication not allowed")
        return None, None

    participation = sql_session.query(Participation) \
        .join(Participation.user) \
        .options(contains_eager(Participation.user)) \
        .filter(Participation.contest == contest)\
        .filter(User.username == username)\
        .first()

    if participation is None:
        log_failed_attempt("user not registered to contest")
        return None, None

    correct_password = get_password(participation)

    try:
        password_valid = validate_password(correct_password, password)
    except ValueError as e:
        # This is either a programming or a configuration error.
        logger.warning(
            "Invalid password stored in database for user %s in contest %s: "
            "%s", participation.user.username, participation.contest.name, e)
        return None, None

    if not password_valid:
        log_failed_attempt("wrong password")
        return None, None

    if contest.ip_restriction and participation.ip is not None \
            and not any(ip_address in network for network in participation.ip):
        log_failed_attempt("unauthorized IP address")
        return None, None

    if contest.block_hidden_participations and participation.hidden:
        log_failed_attempt("participation is hidden and unauthorized")
        return None, None

    logger.info(
        "Successful login attempt from IP address %s, as user %r, on "
        "contest %s, at %s", ip_address, username, contest.name, timestamp)

    # If hashing is used, the cookie stores the hashed password so that
    # the expensive bcrypt call doesn't need to be done at every request.
    return (participation,
            json.dumps([username, correct_password,
                        make_timestamp(timestamp)]).encode("utf-8"))