Пример #1
0
def quarterly_review(quarter):
	quarter = str(quarter)

	sortingDictionary = {0: Attendee.id, 1: Attendee.year, 2: Attendee.first_name, 3: Attendee.last_name }
	siftingDictionary = {1: "freshman", 2: "sophomore", 3: "junior", 4: "senior", 5: "other" }

	# get params
	sort = request.args.get('sort', 0, type=int)
	sift = request.args.get('sift', 0, type=int)

	weeks = [0 for i in range(10)]
	if (sift == 0):
		users = db.session.query(Attendee).order_by(sortingDictionary[sort]).options(load_only("id", "first_name", "last_name", "year"))
		userCount = db.session.query(Attendee).order_by(sortingDictionary[sort]).options(load_only("id", "first_name", "last_name", "year")).count()
	else:
		users = db.session.query(Attendee).filter_by(year=siftingDictionary[sift]).order_by(sortingDictionary[sort]).options(load_only("id", "first_name", "last_name", "year"))
		userCount = db.session.query(Attendee).filter_by(year=siftingDictionary[sift]).order_by(sortingDictionary[sort]).options(load_only("id", "first_name", "last_name", "year")).count()

	attendanceArray = [[0 for i in range(10)] for j in range(userCount)]
	sumArray = [0 for i in range(userCount)]
	weekDB = db.session.query(LargeGroup).filter_by(quarter=quarter).options(load_only("id"))

	# set up full quarter week array with db ID's if exists, 0 otherwise
	for week in weekDB:
		try:
			weeks[int(week.weekNumber)-1] = week.id
		except ValueError,e:
			print str(e)
Пример #2
0
 def _relevant_to_snapshot(object_name, ids):
   """Filter by relevant object over snapshot"""
   snapshot_qs = models.Snapshot.query.filter(
       models.Snapshot.parent_type == models.Audit.__name__,
       models.Snapshot.child_type == object_name,
       models.Snapshot.child_id.in_(ids),
   ).options(
       load_only(models.Snapshot.id),
   ).distinct(
   ).subquery(
       "snapshot"
   )
   dest_qs = models.Relationship.query.filter(
       models.Relationship.destination_id == snapshot_qs.c.id,
       models.Relationship.destination_type == models.Snapshot.__name__,
       models.Relationship.source_type == object_class.__name__,
   ).options(
       load_only("source_id")
   ).distinct()
   source_qs = models.Relationship.query.filter(
       models.Relationship.source_id == snapshot_qs.c.id,
       models.Relationship.source_type == models.Snapshot.__name__,
       models.Relationship.destination_type == object_class.__name__,
   ).options(
       load_only("destination_id")
   ).distinct()
   ids_qs = dest_qs.union(source_qs).distinct().subquery("ids")
   return object_class.id == ids_qs.c.relationships_source_id
def main():
    html_tag_regex = '<[a-zA-Z]+.*>'
    contributions = (Contribution.query
                     .filter(Contribution.description.op('~')(html_tag_regex))
                     .options(load_only('id', 'description'))
                     .all())
    subcontributions = (SubContribution.query
                        .filter(SubContribution.description.op('~')(html_tag_regex))
                        .options(load_only('id', 'description'))
                        .all())
    categories = (Category.query
                  .filter(Category.description.op('~')(html_tag_regex))
                  .options(load_only('id', 'description'))
                  .all())

    def as_dict(objs):
        return {x.id: x.description for x in objs}

    def format_table(model):
        return model.__table__.fullname

    object_descriptions = {
        format_table(Contribution): as_dict(contributions),
        format_table(SubContribution): as_dict(subcontributions),
        format_table(Category): as_dict(categories)
    }

    env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)))

    template = env.get_template('fix_descriptions_template.html')
    print(template.render(object_descriptions=htmlsafe_dumps(object_descriptions)))
Пример #4
0
def get_events_with_abstract_reviewer_convener(user, dt=None):
    """
    Return a dict of event ids and the abstract reviewing related
    roles the user has in that event.

    :param user: A `User`
    :param dt: Only include events taking place on/after that date
    """
    data = defaultdict(set)
    # global reviewer/convener
    mapping = {'global_abstract_reviewer_for_events': 'abstract_reviewer',
               'global_convener_for_events': 'track_convener'}
    for rel, role in mapping.iteritems():
        query = (Event.query.with_parent(user, rel)
                 .filter(Event.ends_after(dt), ~Event.is_deleted)
                 .options(load_only('id')))
        for event in query:
            data[event.id].add(role)
    # track reviewer/convener
    mapping = {'abstract_reviewer_for_tracks': 'abstract_reviewer',
               'convener_for_tracks': 'track_convener'}
    for rel, role in mapping.iteritems():
        query = (Track.query.with_parent(user, rel)
                 .join(Track.event_new)
                 .filter(Event.ends_after(dt), ~Event.is_deleted)
                 .options(load_only('event_id')))
        for track in query:
            data[track.event_id].add(role)
    return data
Пример #5
0
def get_events_with_abstract_persons(user, dt=None):
    """
    Return a dict of event ids and the abstract submission related
    roles the user has in that event.

    :param user: A `User`
    :param dt: Only include events taking place on/after that date
    """
    data = defaultdict(set)
    bad_states = {AbstractState.withdrawn, AbstractState.rejected}
    # submitter
    query = (Abstract.query
             .filter(~Event.is_deleted,
                     ~Abstract.is_deleted,
                     ~Abstract.state.in_(bad_states),
                     Event.ends_after(dt),
                     Abstract.submitter == user)
             .join(Abstract.event_new)
             .options(load_only('event_id')))
    for abstract in query:
        data[abstract.event_id].add('abstract_submitter')
    # person
    abstract_criterion = db.and_(~Abstract.state.in_(bad_states), ~Abstract.is_deleted)
    query = (user.event_persons
             .filter(~Event.is_deleted,
                     Event.ends_after(dt),
                     EventPerson.abstract_links.any(AbstractPersonLink.abstract.has(abstract_criterion)))
             .join(EventPerson.event_new)
             .options(load_only('event_id')))
    for person in query:
        data[person.event_id].add('abstract_person')
    return data
Пример #6
0
def autocomplete():
    search = unicode(request.args.get('q'))
    products = Product.query.options(load_only("title", "id")).filter(Product.title.startswith(search), Product.status == 'publish').limit(5).all()
    products2 = Product.query.options(load_only("title", "id")).filter(Product.title.contains('%' + search + '%'), Product.status == 'publish').limit(5).all()
    p = {}
    q = []
    for product in products:
        # p.extend(['{ title:'+product.title+', image: '+product.image+'}'])
        p = {"label": product.title, "url": "/p/" + str(product.id)}
        q.extend([p])
    for product in products2:
        r = {"label": product.title, "url": "/p/" + str(product.id)}
        q.extend([r])

    seen = set()  # http://stackoverflow.com/questions/9427163/remove-duplicate-dict-in-list-in-python
    l = []
    for d in q:
        t = tuple(d.items())
        if t not in seen:
            seen.add(t)
            l.append(d)

    products = json.dumps(l)
    response = Response(products, mimetype='application/json')
    return response
Пример #7
0
    def post(self, course_id, question_id, criteria_id):
        Courses.exists_or_404(course_id)
        PostsForQuestions.query.options(load_only('id')).get_or_404(question_id)
        Criteria.query.options(load_only('id')).get_or_404(criteria_id)

        question = PostsForQuestions(post=Posts(courses_id=course_id))
        criteria_question = CriteriaAndPostsForQuestions(question=question)
        require(CREATE, criteria_question)

        criteria_question = CriteriaAndPostsForQuestions.query.filter_by(criteria_id=criteria_id). \
            filter_by(questions_id=question_id).first()
        if criteria_question:
            criteria_question.active = True
        else:
            criteria_question = CriteriaAndPostsForQuestions()
            criteria_question.criteria_id = criteria_id
            criteria_question.questions_id = question_id

        db.session.add(criteria_question)

        on_question_criteria_create.send(
            self,
            event_name=on_question_criteria_create.name,
            user=current_user,
            course_id=course_id,
            data={'question_id': question_id, 'criteria_id': criteria_id})

        db.session.commit()

        return {'criterion': marshal(criteria_question, dataformat.get_criteria_and_posts_for_questions())}
Пример #8
0
    def post(self, course_id, question_id, answer_id, comment_id):
        """
        Create an answer comment
        """
        Courses.exists_or_404(course_id)
        PostsForQuestions.query.options(load_only('id')).get_or_404(question_id)
        PostsForAnswers.query.options(load_only('id')).get_or_404(answer_id)
        comment = PostsForComments.query.get_or_404(comment_id)
        require(EDIT, comment)
        params = existing_comment_parser.parse_args()
        # make sure the comment id in the rul and the id matches
        if params['id'] != comment.id:
            return {"error": "Comment id does not match URL."}, 400
        # modify comment according to new values, preserve original values if values not passed
        comment.content = params.get("content")
        if not comment.content:
            return {"error": "The comment content is empty!"}, 400
        comment.answer_assoc.type = params.get("type")
        db.session.add(comment)

        on_answer_comment_modified.send(
            self,
            event_name=on_answer_comment_modified.name,
            user=current_user,
            course_id=course_id,
            data=get_model_changes(comment))

        db.session.commit()
        return marshal(comment, dataformat.get_answer_comment())
Пример #9
0
    def turnovers(self):
        """
            Return the realised turnovers
        """
        result = dict(year_total=0)
        for month in range(1, 13):

            invoices = self.request.context.get_invoices(valid=True).options(
                load_only('ht')
            )

            date_condition = and_(
                extract('year', Invoice.date) == self.year,
                extract('month', Invoice.date) == month,
                Invoice.financial_year == self.year,
            )
            if month != 12:
                invoices = invoices.filter(date_condition)
            else:
                # for december, we also like to have invoices edited in january
                # and reported to the previous comptability year
                reported_condition = and_(
                    Invoice.financial_year == self.year,
                    extract('year', Invoice.date) != self.year,
                )
                invoices = invoices.filter(
                    or_(date_condition, reported_condition)
                )

            invoice_sum = sum([invoice.ht for invoice in invoices])

            cinvoices = self.request.context.get_cancelinvoices(valid=True).options(
                load_only('ht')
            )

            date_condition = and_(
                extract('year', CancelInvoice.date) == self.year,
                extract('month', CancelInvoice.date) == month,
                CancelInvoice.financial_year == self.year,
            )
            if month != 12:
                cinvoices = cinvoices.filter(date_condition)
            else:
                reported_condition = and_(
                    CancelInvoice.financial_year == self.year,
                    extract('year', CancelInvoice.date) != self.year,
                )
                cinvoices = cinvoices.filter(
                    or_(date_condition, reported_condition)
                )

            cinvoice_sum = sum([cinvoice.ht for cinvoice in cinvoices])

            result[month] = invoice_sum + cinvoice_sum
            result['year_total'] += result[month]
        return result
Пример #10
0
Файл: rdf.py Проект: clld/clld
 def render(self, ctx, req):
     if req.params.get('sEcho'):
         # triggered from a datatable, thus potentially filtered and sorted
         items = ctx.get_query(limit=1000)
     else:
         # triggered without any filter parameters
         items = ctx.rdf_index_query(req.db.query(ctx.db_model()).order_by(ctx.db_model().pk))
     if isinstance(ctx.model.name, property):
         items = [(item.id, None) for item in items.options(load_only('id'))]
     else:
         items = [(item.id, item.name)
                  for item in items.options(load_only('id', 'name'))]
     return convert(super(RdfIndex, self).render(items, req), 'xml', self.rdflibname)
Пример #11
0
 def _process(self):
     self.user.settings.set('suggest_categories', True)
     tz = session.tzinfo
     hours, minutes = timedelta_split(tz.utcoffset(datetime.now()))[:2]
     categories = get_related_categories(self.user)
     categories_events = []
     if categories:
         category_ids = {c['categ'].id for c in categories.itervalues()}
         today = now_utc(False).astimezone(tz).date()
         query = (Event.query
                  .filter(~Event.is_deleted,
                          Event.category_chain_overlaps(category_ids),
                          Event.start_dt.astimezone(session.tzinfo) >= today)
                  .options(joinedload('category').load_only('id', 'title'),
                           joinedload('series'),
                           subqueryload('acl_entries'),
                           load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'access_key',
                                     'protection_mode', 'series_id', 'series_pos', 'series_count'))
                  .order_by(Event.start_dt, Event.id))
         categories_events = get_n_matching(query, 10, lambda x: x.can_access(self.user))
     from_dt = now_utc(False) - relativedelta(weeks=1, hour=0, minute=0, second=0)
     linked_events = [(event, {'management': bool(roles & self.management_roles),
                               'reviewing': bool(roles & self.reviewer_roles),
                               'attendance': bool(roles & self.attendance_roles)})
                      for event, roles in get_linked_events(self.user, from_dt, 10).iteritems()]
     return WPUser.render_template('dashboard.html', 'dashboard',
                                   offset='{:+03d}:{:02d}'.format(hours, minutes), user=self.user,
                                   categories=categories,
                                   categories_events=categories_events,
                                   suggested_categories=get_suggested_categories(self.user),
                                   linked_events=linked_events)
Пример #12
0
def get_events_with_linked_sessions(user, dt=None):
    """Returns a dict with keys representing event_id and the values containing
    data about the user rights for sessions within the event

    :param user: A `User`
    :param dt: Only include events taking place on/after that date
    """
    query = (user.in_session_acls
             .options(load_only('session_id', 'roles', 'full_access', 'read_access'))
             .options(noload('*'))
             .options(contains_eager(SessionPrincipal.session).load_only('event_id'))
             .join(Session)
             .join(Event, Event.id == Session.event_id)
             .filter(~Session.is_deleted, ~Event.is_deleted, Event.ends_after(dt)))
    data = defaultdict(set)
    for principal in query:
        roles = data[principal.session.event_id]
        if 'coordinate' in principal.roles:
            roles.add('session_coordinator')
        if 'submit' in principal.roles:
            roles.add('session_submission')
        if principal.full_access:
            roles.add('session_manager')
        if principal.read_access:
            roles.add('session_access')
    return data
Пример #13
0
def get_events_with_linked_sessions(user, from_dt=None, to_dt=None):
    """Returns a dict with keys representing event_id and the values containing
    data about the user rights for sessions within the event

    :param user: A `User`
    :param from_dt: The earliest event start time to look for
    :param to_dt: The latest event start time to look for
    """
    query = (user.in_session_acls
             .options(load_only('session_id', 'roles', 'full_access', 'read_access'))
             .options(noload('*'))
             .options(contains_eager(SessionPrincipal.session).load_only('event_id'))
             .join(Session)
             .join(Event, Event.id == Session.event_id)
             .filter(~Session.is_deleted, ~Event.is_deleted, Event.starts_between(from_dt, to_dt)))
    data = defaultdict(set)
    for principal in query:
        roles = data[principal.session.event_id]
        if 'coordinate' in principal.roles:
            roles.add('session_coordinator')
        if 'submit' in principal.roles:
            roles.add('session_submission')
        if principal.full_access:
            roles.add('session_manager')
        if principal.read_access:
            roles.add('session_access')
    return data
Пример #14
0
def monitor_api_key_limits(self):
    result = {}
    try:
        today = util.utcnow().strftime('%Y%m%d')
        keys = self.redis_client.keys('apilimit:*:' + today)
        if keys:
            values = self.redis_client.mget(keys)
            keys = [k.split(':')[1] for k in keys]
        else:
            values = []

        names = {}
        if keys:
            with self.db_session(commit=False) as session:
                query = (ApiKey.querykeys(session, keys)
                               .options(load_only('valid_key', 'shortname')))
                for api_key in query.all():
                    names[api_key.valid_key] = api_key.name

        result = {}
        for k, v in zip(keys, values):
            name = names.get(k, k)
            value = int(v)
            result[name] = value
            self.stats_client.gauge('apilimit.' + name, value)
    except Exception:  # pragma: no cover
        # Log but ignore the exception
        self.raven_client.captureException()
    return result
Пример #15
0
    def test_load_only_path_specific(self):
        User = self.classes.User
        Address = self.classes.Address
        Order = self.classes.Order

        users = self.tables.users
        addresses = self.tables.addresses
        orders = self.tables.orders

        mapper(User, users, properties=util.OrderedDict([
                ("addresses", relationship(Address, lazy="joined")),
                ("orders", relationship(Order, lazy="joined"))
            ]))

        mapper(Address, addresses)
        mapper(Order, orders)

        sess = create_session()

        q = sess.query(User).options(
                load_only("name").defaultload("addresses").load_only("id", "email_address"),
                defaultload("orders").load_only("id")
            )

        # hmmmm joinedload seems to be forcing users.id into here...
        self.assert_compile(
            q,
            "SELECT users.id AS users_id, users.name AS users_name, "
            "addresses_1.id AS addresses_1_id, "
            "addresses_1.email_address AS addresses_1_email_address, "
            "orders_1.id AS orders_1_id FROM users "
            "LEFT OUTER JOIN addresses AS addresses_1 "
            "ON users.id = addresses_1.user_id "
            "LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id"
        )
Пример #16
0
def conflict_create(order_id):
    """
    Renders conflict create page.
    """
    order = Order.query.get(order_id)
    form = CreateConflict(formdata=request.form)
    form.user_connected.choices = [
        (user.username, user.username) for user in User.query.options(load_only("username")).all()
    ]
    form.user_connected.choices.append(("None", "None"))
    form.user_connected.default = ("None", "None")
    if request.method == "POST":
        conflict = Conflict()
        conflict.did_order_come = request.form.get("did_order_come") == "y"
        conflict.i_know_who = request.form.get("i_know_who") == "y"
        conflict.user_connected = request.form.get("user_connected")
        conflict.order_connected = order.id
        conflict.created_by_user = current_user.username
        db.session.add(conflict)
        db.session.commit()
        if conflict.i_know_who:
            new_conflict = Conflict.query.order_by(Conflict.date_added.desc()).first()
            conflict_url = server_url() + url_for("conflict_resolve", conf_id=new_conflict.id)
            msg = Message("Lunch app new conflict", recipients=[conflict.user_connected])
            msg.body = (
                "You were chosen as the one who ate my lunch! "
                "Please use the link below to respond"
                " \n\n {}".format(conflict_url)
            )
            mail.send(msg)
        flash("Conflict created")
        return redirect("my_orders")
    return render_template("conflict_create.html", form=form)
Пример #17
0
    def delete_all(self, collection_id, parent_id, filters=None,
                   with_deleted=True, id_field=DEFAULT_ID_FIELD,
                   modified_field=DEFAULT_MODIFIED_FIELD,
                   deleted_field=DEFAULT_DELETED_FIELD,
                   auth=None):
        """Delete all objects in this `collection_id` for this `parent_id`.

        :param str collection_id: the collection id.
        :param str parent_id: the collection parent.

        :param filters: Optionnally filter the objects to delete.
        :type filters: list of :class:`cliquet.storage.Filter`
        :param bool with_deleted: track deleted records with a tombstone

        :returns: the list of deleted objects, with minimal set of attributes.
        :rtype: list of dict
        """
        qry = Session.query(self.collection).options(load_only('id'))\
                     .filter(and_(self.collection.parent_id == parent_id,
                                  getattr(self.collection, deleted_field) == False))
        for every in filters:
            qry = qry.filter(SQLAFilter(self.collection, every)())
        rows = [{"id": every.id, "parent_id": parent_id, "collection_id": collection_id,
                 modified_field: datetime.datetime.utcnow()} for every in qry.all()]
        Session.bulk_update_mappings(self.collection,
                                     [{"id": every['id'], deleted_field: True,
                                       modified_field: every[modified_field]} for every in rows])
        if with_deleted:
            Session.bulk_insert_mappings(Deleted, rows)
        return rows
Пример #18
0
def get_events_with_submitted_surveys(user, from_dt=None, to_dt=None):
    """Gets the IDs of events where the user submitted a survey.

    :param user: A `User`
    :param from_dt: The earliest event start time to look for
    :param to_dt: The latest event start time to look for
    :return: A set of event ids
    """
    event_date_filter = True
    if from_dt and to_dt:
        event_date_filter = IndexedEvent.start_date.between(from_dt, to_dt)
    elif from_dt:
        event_date_filter = IndexedEvent.start_date >= from_dt
    elif to_dt:
        event_date_filter = IndexedEvent.start_date <= to_dt
    # Survey submissions are not stored in links anymore, so we need to get them directly
    query = (
        user.survey_submissions.options(load_only("survey_id"))
        .options(joinedload(SurveySubmission.survey).load_only("event_id"))
        .join(Survey)
        .join(Event)
        .join(IndexedEvent, IndexedEvent.id == Survey.event_id)
        .filter(~Survey.is_deleted, ~Event.is_deleted)
        .filter(event_date_filter)
    )
    return {submission.survey.event_id for submission in query}
Пример #19
0
 def validate_section_id(self, field):
     session = self.get_session()
     field.data = field.data if field.data > 0 else None
     if field.data is None:
         return
     if field.data not in [x.id for x in session.query(Section).options(load_only("id")).all()]:
         raise ValueError(u'Неверный раздел')
Пример #20
0
def query_database(query, raven_client):
    macs = [lookup.mac for lookup in query.wifi]
    if not macs:  # pragma: no cover
        return []

    result = []
    today = util.utcnow().date()
    temp_blocked = today - TEMPORARY_BLOCKLIST_DURATION

    try:
        load_fields = ('lat', 'lon', 'radius')
        shards = defaultdict(list)
        for mac in macs:
            shards[WifiShard.shard_model(mac)].append(mac)

        for shard, shard_macs in shards.items():
            rows = (
                query.session.query(shard)
                             .filter(shard.mac.in_(shard_macs))
                             .filter(shard.lat.isnot(None))
                             .filter(shard.lon.isnot(None))
                             .filter(or_(
                                 shard.block_count.is_(None),
                                 shard.block_count <
                                     PERMANENT_BLOCKLIST_THRESHOLD))
                             .filter(or_(
                                 shard.block_last.is_(None),
                                 shard.block_last < temp_blocked))
                             .options(load_only(*load_fields))
            ).all()
            result.extend(list(rows))
    except Exception:
        raven_client.captureException()
    return result
Пример #21
0
    def __call__(self):
        today = util.utcnow().strftime('%Y%m%d')
        keys = self.redis_client.keys('apilimit:*:' + today)
        if keys:
            values = self.redis_client.mget(keys)
            keys = [k.decode('utf-8').split(':')[1:3] for k in keys]
            api_keys = [key[0] for key in keys]
        else:
            values = []
            api_keys = []

        names = {}
        if api_keys:
            query = (self.session.query(ApiKey)
                                 .filter(ApiKey.valid_key.in_(api_keys))
                                 .options(load_only('shortname')))
            for api_key in query.all():
                names[api_key.valid_key] = api_key.name

        result = defaultdict(dict)
        for key, value in zip(keys, values):
            api_key, path = key
            name = names.get(api_key, api_key)
            value = int(value)
            result[name][path] = value
            self.stats_client.gauge(
                'api.limit', value, tags=['key:' + name, 'path:' + path])
        return result
Пример #22
0
 def bounding_box_query(ne_lat, ne_lng, sw_lat, sw_lng, start_date, end_date,
                        fatal, severe, light, inaccurate, is_thin=False, yield_per=None):
     # example:
     # ne_lat=32.36292402647484&ne_lng=35.08873443603511&sw_lat=32.29257266524761&sw_lng=34.88445739746089
     # >>>  m = Marker.bounding_box_query(32.36, 35.088, 32.292, 34.884)
     # >>> m.count()
     # 250
     accurate = not inaccurate
     markers = Marker.query \
         .filter(Marker.longitude <= ne_lng) \
         .filter(Marker.longitude >= sw_lng) \
         .filter(Marker.latitude <= ne_lat) \
         .filter(Marker.latitude >= sw_lat) \
         .filter(Marker.created >= start_date) \
         .filter(Marker.created < end_date) \
         .order_by(desc(Marker.created))
     if yield_per:
         markers = markers.yield_per(yield_per)
     if accurate:
         markers = markers.filter(Marker.locationAccuracy == 1)
     if not fatal:
         markers = markers.filter(Marker.severity != 1)
     if not severe:
         markers = markers.filter(Marker.severity != 2)
     if not light:
         markers = markers.filter(Marker.severity != 3)
     if is_thin:
         markers = markers.options(load_only("id", "longitude", "latitude"))
     return markers
Пример #23
0
def update_linked_route_titles(waypoint, update_types, user_id):
    """When a waypoint is the main waypoint of a route, the field
    `title_prefix`, which caches the waypoint name, has to be updated.
    This method takes care of updating all routes, that the waypoint is
    "main waypoint" of.
    """
    if UpdateType.LANG not in update_types:
        # if the locales did not change, no need to continue
        return

    linked_routes = DBSession.query(Route). \
        filter(Route.main_waypoint_id == waypoint.document_id). \
        options(joinedload(Route.locales).load_only(
            RouteLocale.lang, RouteLocale.id)). \
        options(load_only(Route.document_id)). \
        all()

    if linked_routes:
        waypoint_locales = waypoint.locales
        waypoint_locales_index = {
            locale.lang: locale for locale in waypoint_locales}

        for route in linked_routes:
            set_route_title_prefix(
                route, waypoint_locales, waypoint_locales_index)
Пример #24
0
def get_problem_info(shortname):
    """Serve the PDF description of a problem"""
    pid = (database.session.query(Problem)
           .options(load_only('pid', 'shortname'))
           .filter(Problem.shortname == shortname)
           .first().pid)
    return serve_info_pdf(str(pid))
Пример #25
0
def get_all_report_hashes(
    db, date_from=None, date_to=None, opsys=None, opsys_releases=None, limit_from=None, limit_to=None
):
    """
    Return ReportHash instance if there is at least one bug in database for selected date range
    """
    query = db.session.query(ReportHash).join(Report).options(load_only("hash"))

    if opsys and opsys != "*":
        if opsys == "rhel":
            opsys = "Red Hat Enterprise Linux"

        query = query.join(ReportOpSysRelease).join(OpSysRelease).join(OpSys).filter(OpSys.name == opsys)

        if opsys_releases and opsys_releases != "*":
            query = query.filter(OpSysRelease.version == opsys_releases)

    if date_from and date_from != "*":
        query = query.filter(Report.last_occurrence >= date_from)

    if date_to and date_to != "*":
        query = query.filter(Report.last_occurrence <= date_to)

    if limit_from is not None and limit_to is not None:
        query = query.slice(limit_from, limit_to)

    return query.all()
Пример #26
0
def serialize_category_atom(category, url, user, event_filter):
    """Export the events in a category to Atom

    :param category: The category to export
    :param url: The URL of the feed
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    """
    query = (Event.query
             .filter(Event.category_chain.contains([int(category.getId())]),
                     ~Event.is_deleted,
                     event_filter)
             .options(load_only('id', 'start_dt', 'title', 'description', 'protection_mode'),
                      subqueryload('acl_entries'))
             .order_by(Event.start_dt))
    events = [e for e in query if e.can_access(user)]

    feed = AtomFeed(feed_url=url, title='Indico Feed [{}]'.format(to_unicode(category.getTitle())))
    for event in events:
        feed.add(title=event.title,
                 summary=unicode(event.description),  # get rid of RichMarkup
                 url=url_for('event.conferenceDisplay', confId=event.id, _external=True),
                 updated=event.start_dt)
    return BytesIO(feed.to_string().encode('utf-8'))
Пример #27
0
def serialize_category_ical(category, user, event_filter):
    """Export the events in a category to iCal

    :param category: The category to export
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    """
    own_room_strategy = joinedload('own_room')
    own_room_strategy.load_only('building', 'floor', 'number', 'name')
    own_room_strategy.lazyload('owner')
    own_venue_strategy = joinedload('own_venue').load_only('name')
    query = (Event.query
             .filter(Event.category_chain.contains([int(category.getId())]),
                     ~Event.is_deleted,
                     event_filter)
             .options(load_only('id', 'start_dt', 'end_dt', 'title', 'description', 'own_venue_name',
                                'own_room_name', 'protection_mode'),
                      subqueryload('acl_entries'),
                      joinedload('person_links'),
                      own_room_strategy,
                      own_venue_strategy)
             .order_by(Event.start_dt))
    events = [e for e in query if e.can_access(user)]
    cal = ical.Calendar()
    cal.add('version', '2.0')
    cal.add('prodid', '-//CERN//INDICO//EN')

    now = now_utc(False)
    for event in events:
        url = url_for('event.conferenceDisplay', confId=event.id, _external=True)
        location = ('{} ({})'.format(event.room_name, event.venue_name)
                    if event.venue_name and event.room_name
                    else (event.venue_name or event.room_name))
        cal_event = ical.Event()
        cal_event.add('uid', u'indico-event-{}@cern.ch'.format(event.id))
        cal_event.add('dtstamp', now)
        cal_event.add('dtstart', event.start_dt)
        cal_event.add('dtend', event.end_dt)
        cal_event.add('url', url)
        cal_event.add('summary', event.title)
        cal_event.add('location', location)
        description = []
        if event.person_links:
            speakers = [u'{} ({})'.format(x.full_name, x.affiliation) if x.affiliation else x.full_name
                        for x in event.person_links]
            description.append(u'Speakers: {}'.format(u', '.join(speakers)))

        if event.description:
            desc_text = unicode(event.description) or u'<p/>'  # get rid of RichMarkup
            try:
                description.append(unicode(html.fromstring(desc_text).text_content()))
            except ParserError:
                # this happens e.g. if desc_text contains only a html comment
                pass
        description.append(url)
        cal_event.add('description', u'\n'.join(description))
        cal.add_component(cal_event)
    return BytesIO(cal.to_ical())
Пример #28
0
    def _getBody(self, params):
        attached_items = self.event.attached_items
        folders = [folder for folder in attached_items.get('folders', []) if folder.title != 'Internal Page Files']
        files = attached_items.get('files', [])

        lectures = []
        if self.event.series is not None and self.event.series.show_links:
            lectures = (Event.query.with_parent(self.event.series)
                        .filter(Event.id != self.event.id)
                        .options(load_only('series_pos', 'id'))
                        .order_by(Event.series_pos)
                        .all())

        plugin = self.theme.get('plugin')
        tpl_name = self.theme['template']
        tpl = ((plugin.name + tpl_name)
               if (plugin and tpl_name[0] == ':')
               else posixpath.join('events/display', tpl_name))

        rv = render_template(tpl,
                             event=self.event,
                             category=self.event.category.title,
                             timezone=self.event.display_tzinfo,
                             theme_settings=self.theme.get('settings', {}),
                             theme_user_settings=layout_settings.get(self.event, 'timetable_theme_settings'),
                             files=files,
                             folders=folders,
                             lectures=lectures)
        return rv.encode('utf-8')
Пример #29
0
    def update(self, batch=1000):
        queue = self.task.app.data_queues['update_mapstat']
        today = util.utcnow().date()
        positions = queue.dequeue(batch=batch)
        if not positions:
            return 0

        found = set()
        wanted = set()
        for position in positions:
            wanted.add(MapStat.to_hashkey(lat=MapStat.scale(position['lat']),
                                          lon=MapStat.scale(position['lon'])))

        stat_iter = MapStat.iterkeys(
            self.session, list(wanted),
            extra=lambda query: query.options(load_only('lat', 'lon')))

        found = set([stat.hashkey() for stat in stat_iter])

        for key in (wanted - found):
            stmt = MapStat.__table__.insert(
                on_duplicate='id = id').values(
                time=today, lat=key.lat, lon=key.lon)
            self.session.execute(stmt)

        if queue.size() >= batch:
            self.task.apply_async(
                kwargs={'batch': batch},
                countdown=2,
                expires=10)

        return len(positions)
Пример #30
0
def get_upcoming_events():
    """Get the global list of upcoming events"""
    from indico.modules.events import Event
    data = upcoming_events_settings.get_all()
    if not data['max_entries'] or not data['entries']:
        return
    tz = timezone(config.DEFAULT_TIMEZONE)
    now = now_utc(False).astimezone(tz)
    base_query = (Event.query
                  .filter(Event.effective_protection_mode == ProtectionMode.public,
                          ~Event.is_deleted,
                          Event.end_dt.astimezone(tz) > now)
                  .options(load_only('id', 'title', 'start_dt', 'end_dt')))
    queries = []
    cols = {'category': Event.category_id,
            'event': Event.id}
    for entry in data['entries']:
        delta = timedelta(days=entry['days'])
        query = (base_query
                 .filter(cols[entry['type']] == entry['id'])
                 .filter(db.cast(Event.start_dt.astimezone(tz), db.Date) > (now - delta).date())
                 .with_entities(Event, db.literal(entry['weight']).label('weight')))
        queries.append(query)

    query = (queries[0].union(*queries[1:])
             .order_by(db.desc('weight'), Event.start_dt, Event.title)
             .limit(data['max_entries']))
    for row in query:
        event = row[0]
        # we cache the result of the function and is_deleted is used in the repr
        # and having a broken repr on the cached objects would be ugly
        set_committed_value(event, 'is_deleted', False)
        yield event
Пример #31
0
def _query_categ_events(categ, start_dt, end_dt):
    return (Event.query.with_parent(categ).filter(
        Event.happens_between(start_dt, end_dt)).options(
            load_only('id', 'start_dt', 'end_dt')))
Пример #32
0
def getMedical_Case(columns=None):
    u = Medical_Case.query
    if columns:
        u = u.options(orm.load_only(*columns))
    return u
    def get_apt_info_df(self, keyword, columns):

        try:
            # http://www.leeladharan.com/sqlalchemy-query-with-or-and-like-common-filters
            from sqlalchemy import or_

            # http://docs.sqlalchemy.org/en/latest/orm/loading_columns.html#load-only-cols
            from sqlalchemy.orm import load_only
            aptqr : list[AptInfo]
            if not columns:
                aptqr = db.session.query(AptInfo).filter(or_(AptInfo.법정동주소.like('%'+keyword+'%'), AptInfo.도로명주소.like('%'+keyword+'%')))
            else:
                aptqr = db.session.query(AptInfo).options(load_only(*columns)).filter(or_(AptInfo.법정동주소.like('%'+keyword+'%'), AptInfo.도로명주소.like('%'+keyword+'%')))

            aptinfo_list_df = pd.read_sql(aptqr.statement, aptqr.session.bind).drop(columns=['index'])


            # 으어!!!!!! It takes 3 hours to find out!! F**k!!!
            # need to now pandas from basic!!!
            # split is very very useful I think!!
            # https://stackoverflow.com/questions/14745022/how-to-split-a-column-into-two-columns
            # I want to try expand option.. but can't set it's columns name!
            z = aptinfo_list_df['사용승인일'].str.split('-').str[0].apply(func=lambda x: str(2018-int(x)))

            def extyear(series):
                return series['사용승인일'].str.split('-').str[0].apply(func=lambda x: str(2018-int(x)))

            # https://stackoverflow.com/a/49278320/5443084
            from datetime import datetime
            n = datetime.now()

            order = ["연번","아파트이름",	"법정동주소","연차","동수",	"세대수", "복도유형","관리사무소연락처",	"일차",	"요일",	"금액",	"문어발",	"장수",	"비고"]

            # reset index!! wow
            y = aptinfo_list_df \
                .assign(연차=lambda x: extyear(x),일차='',요일='',금액='',문어발='',장수='',비고='')\
                .drop(columns=['사용승인일'])\
                .reindex(columns=order)

            # set type
            y["연차"] = y["연차"].astype('int')
            y["동수"] = y["동수"].astype('int')
            y["세대수"] = y["세대수"].astype('int')

            # filterling by range
            # http://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
            y = y.sort_values(by=['연차'], axis=0)

            # todo supply range selection interface
            # https://stackoverflow.com/questions/31617845/how-to-select-rows-in-a-dataframe-between-two-values-in-python-pandas
            # y = y[(y['연차'] >= 10) & (y['연차'] <= 15)].reset_index(drop=True)
            y = y[y['연차'] >= 16].reset_index(drop=True)

            # add index to new colums
            # https://stackoverflow.com/a/20461206/5443084
            # y.reindex(columns=order)
            y['연번'] = y.index
            print(y)

            return y
        except Exception as e:
            print('아파트 정보 가져오기 오류')
            print(e.args)
            return '<h1>Something is broken.</h1>' + str(e.args)
Пример #34
0
def reindex_pairs(pairs):
    """Reindex selected snapshots.

  Args:
    pairs: A list of parent-child pairs that uniquely represent snapshot
    object whose properties should be reindexed.
  """
    if not pairs:
        return
    snapshots = dict()
    options = get_options()
    snapshot_query = models.Snapshot.query.filter(
        tuple_(
            models.Snapshot.parent_type,
            models.Snapshot.parent_id,
            models.Snapshot.child_type,
            models.Snapshot.child_id,
        ).in_({pair.to_4tuple()
               for pair in pairs})).options(
                   orm.subqueryload("revision").load_only(
                       "id",
                       "resource_type",
                       "resource_id",
                       "_content",
                   ),
                   orm.load_only(
                       "id",
                       "parent_type",
                       "parent_id",
                       "child_type",
                       "child_id",
                       "revision_id",
                   ))
    cad_dict = _get_custom_attribute_dict()
    for snapshot in snapshot_query:
        revision = snapshot.revision
        snapshots[snapshot.id] = {
            "id":
            snapshot.id,
            "parent_type":
            snapshot.parent_type,
            "parent_id":
            snapshot.parent_id,
            "child_type":
            snapshot.child_type,
            "child_id":
            snapshot.child_id,
            "revision":
            get_searchable_attributes(CLASS_PROPERTIES[revision.resource_type],
                                      cad_dict[revision.resource_type],
                                      revision.content)
        }
    search_payload = []
    for snapshot in snapshots.values():
        for prop, val in get_properties(snapshot).items():
            search_payload.extend(
                get_record_value(
                    prop, val, {
                        "key": snapshot["id"],
                        "type": "Snapshot",
                        "tags": TAG_TMPL.format(**snapshot),
                        "subproperty": "",
                    }, options))
    delete_records(snapshots.keys())
    insert_records(search_payload)
Пример #35
0
    def set_or_update(self, json_data):
        # Set self.updated, and more importantly, self.run.updated, to avoid
        # deadlocks when lots of jobs are updated at once.
        self.run.updated = self.updated = datetime.utcnow()

        status_map = {
            True: 'pass',
            False: 'fail',
            None: 'unknown',
        }

        old_status = self.status
        old_run_status = self.run.status
        if 'status' in json_data:
            status = json_data.pop('status')
            if status not in self.allowed_statuses:
                raise ValueError("Job status must be one of: %s" %
                                 self.allowed_statuses)
            if self.status not in ('pass', 'fail'):
                self.status = status
        elif 'success' in json_data:
            success = json_data.pop('success')
            self.status = status_map.get(success)
            self.success = success
        elif self.success is None and self.status is None:
            self.status = 'unknown'

        if old_status in (None, 'queued') and self.status == 'running':
            self.started = datetime.utcnow()

        if self.status != old_status:
            # Submit pass/fail/dead stats to statsd
            if self.status in ('pass', 'fail', 'dead'):
                counter = get_statsd_client().get_counter('jobs.status')
                counter.increment(self.status)
            self.run.set_status()

        if old_run_status != 'running' and self.run.status == 'running':
            self.run.started = self.started

        target_nodes_q = self.target_nodes.options(load_only('id', 'name'))
        if len(json_data.get('targets', {})) > len(target_nodes_q.all()):
            # Populate self.target_nodes, creating Node objects if necessary
            targets = json_data['targets']
            for target_key in targets.keys():
                if '@' in target_key:
                    hostname = target_key.split('@')[1]
                else:
                    hostname = target_key
                node_q = Node.query.options(load_only('id', 'name'))\
                    .filter(Node.name == hostname)
                try:
                    node = node_q.one()
                except NoResultFound:
                    node = Node(name=hostname)
                    mtype = json_data.get('machine_type')
                    if mtype:
                        node.machine_type = mtype
                if node not in self.target_nodes:
                    self.target_nodes.append(node)

        for k, v in json_data.items():
            key = k.replace('-', '_')
            if key == 'updated':
                self.set_updated(v)
                continue
            # Correct potentially-incorrect Run.suite/branch values
            # We started putting the suite/branch names in the job config on
            # 5/1/2014
            elif key == 'suite' and self.run.suite != v:
                self.run.suite = v
            elif key == 'branch' and self.run.branch != v:
                self.run.branch = v
            # Correct a potential 'multi' value parsed from the run name to be
            # equal to the actual value given to the runs
            elif key == 'machine_type' and self.run.machine_type != v:
                self.run.machine_type = v
            if key in self.allowed_keys:
                setattr(self, key, v)
Пример #36
0
def uniqueCategory(Category_name):
    categories = session.query(Category).options(load_only(Category.name))
    for category in categories:
	    if Category_name.lower() == category.name.lower():
	       return False
    return True
def transition_attributes(profile=None,
                          group_size=1000,
                          debug=False,
                          delete_table=False):
    """
    Migrate the DbAttribute table into the attributes column of db_dbnode.
    """
    if not is_dbenv_loaded():
        transition_load_db_env(profile=profile)

    class DbAttribute(Base):
        """
        DbAttribute table, use only for migration purposes.
        """
        __tablename__ = ATTR_TABLE_NAME

        id = Column(Integer, primary_key=True)

        key = Column(String(1024), nullable=False)
        datatype = Column(String(10), nullable=False)

        tval = Column(Text, nullable=False)
        fval = Column(Float)
        ival = Column(Integer)
        bval = Column(Boolean)
        dval = Column(DateTime(timezone=True))

        dbnode_id = Column(Integer, ForeignKey('db_dbnode.id'), nullable=False)
        dbnode = relationship('DbNode', backref='old_attrs')

    print("\nStarting migration of attributes")

    inspector = reflection.Inspector.from_engine(sa.get_scoped_session().bind)

    table_names = inspector.get_table_names()
    if NODE_TABLE_NAME not in table_names:
        raise Exception(
            "There is no {} table in the database. Transition"
            "to SQLAlchemy can not be done. Exiting".format(NODE_TABLE_NAME))

    node_table_cols = inspector.get_columns(NODE_TABLE_NAME)
    col_names = [_["name"] for _ in node_table_cols]

    if ATTR_COL_NAME in col_names:
        print(
            "Column named {} found at the {} table of the database. I assume "
            "that the migration of the attributes has already been done and "
            "therefore I proceed with the next migration step.".format(
                ATTR_COL_NAME, NODE_TABLE_NAME))
        return

    session = sa.get_scoped_session()

    with session.begin(subtransactions=True):
        print("Creating columns..")
        session.execute('ALTER TABLE db_dbnode ADD COLUMN attributes '
                        'JSONB DEFAULT \'{}\'')
        from aiida.backends.sqlalchemy.models.node import DbNode
        total_nodes = session.query(func.count(DbNode.id)).scalar()

        total_groups = int(math.ceil(total_nodes / float(group_size)))
        error = False

        for i in xrange(total_groups):
            print("Migrating group {} of {}".format(i, total_groups))

            nodes = DbNode.query.options(
                subqueryload('old_attrs'),
                load_only('id', 'attributes')).order_by(
                    DbNode.id)[i * group_size:(i + 1) * group_size]

            for node in nodes:
                attrs, err_ = attributes_to_dict(
                    sorted(node.old_attrs, key=lambda a: a.key))
                error |= err_

                node.attributes = attrs
                session.add(node)

            # Remove the db_dbnode from sqlalchemy, to allow the GC to do its
            # job.
            session.flush()
            session.expunge_all()

            del nodes
            gc.collect()

        if error:
            cont = query_yes_no(
                "There has been some errors during the "
                "migration. Do you want to continue?", "no")
            if not cont:
                session.rollback()
                sys.exit(-1)
        if delete_table:
            session.execute('DROP TABLE db_dbattribute')
    session.commit()
    print("Migration of attributes finished.")
def cleanup_function(**context):

    logging.info("Retrieving max_execution_date from XCom")
    max_date = context["ti"].xcom_pull(task_ids=print_configuration.task_id,
                                       key="max_date")
    max_date = dateutil.parser.parse(max_date)  # stored as iso8601 str in xcom

    airflow_db_model = context["params"].get("airflow_db_model")
    state = context["params"].get("state")
    age_check_column = context["params"].get("age_check_column")
    keep_last = context["params"].get("keep_last")
    keep_last_filters = context["params"].get("keep_last_filters")
    keep_last_group_by = context["params"].get("keep_last_group_by")

    logging.info("Configurations:")
    logging.info("max_date:                 " + str(max_date))
    logging.info("enable_delete:            " + str(ENABLE_DELETE))
    logging.info("session:                  " + str(session))
    logging.info("airflow_db_model:         " + str(airflow_db_model))
    logging.info("state:                    " + str(state))
    logging.info("age_check_column:         " + str(age_check_column))
    logging.info("keep_last:                " + str(keep_last))
    logging.info("keep_last_filters:        " + str(keep_last_filters))
    logging.info("keep_last_group_by:          " + str(keep_last_group_by))

    logging.info("")

    logging.info("Running Cleanup Process...")

    query = session.query(airflow_db_model).options(
        load_only(age_check_column))

    logging.info("INITIAL QUERY : " + str(query))

    if keep_last:

        subquery = session.query(func.max(DagRun.execution_date))
        # workaround for MySQL "table specified twice" issue
        # https://github.com/teamclairvoyant/airflow-maintenance-dags/issues/41
        if keep_last_filters is not None:
            for entry in keep_last_filters:
                subquery = subquery.filter(entry)

            logging.info("SUB QUERY [keep_last_filters]: " + str(subquery))

        if keep_last_group_by is not None:
            subquery = subquery.group_by(keep_last_group_by)
            logging.info("SUB QUERY [keep_last_group_by]: " + str(subquery))

        subquery = subquery.from_self()

        query = query.filter(and_(age_check_column.notin_(subquery)),
                             and_(age_check_column <= max_date))

    else:
        query = query.filter(age_check_column <= max_date, )

    entries_to_delete = query.all()

    logging.info("Query : " + str(query))
    logging.info("Process will be Deleting the following " +
                 str(airflow_db_model.__name__) + "(s):")
    for entry in entries_to_delete:
        logging.info("\tEntry: " + str(entry) + ", Date: " +
                     str(entry.__dict__[str(age_check_column).split(".")[1]]))

    logging.info("Process will be Deleting " + str(len(entries_to_delete)) +
                 " " + str(airflow_db_model.__name__) + "(s)")

    if ENABLE_DELETE:
        logging.info("Performing Delete...")
        #using bulk delete
        query.delete(synchronize_session=False)
        session.commit()
        logging.info("Finished Performing Delete")
    else:
        logging.warn("You're opted to skip deleting the db entries!!!")

    logging.info("Finished Running Cleanup Process")
Пример #39
0
def get_all_bibcodes():
    with app.session_scope() as session:
        for r in session.query(Records).options(load_only(['bibcode'])).all():
            yield r.bibcode
Пример #40
0
 def get_genres(self, uri):
     genres = session.query(Audio_Params).filter(Audio_Params.uri == uri). \
         options(load_only("jazz", "rb", "rock", "country", "dance", "hh", "classical", "pop", "ed",
                           "speed", "vol", "valence", "instru"))
     return genres
Пример #41
0
def getPhysician(columns=None):
    u = Physician.query
    if columns:
        u = u.options(orm.load_only(*columns))
    return u
Пример #42
0
    def get_permissions_for_user(cls, user, allow_admin=True):
        """Get the permissions for all rooms for a user.

        In case of multipass-based groups it will try to get a list of
        all groups the user is in, and if that's not possible check the
        permissions one by one for each room (which may result in many
        group membership lookups).

        It is recommended to not call this in any place where performance
        matters and to memoize the result.
        """
        # XXX: When changing the logic in here, make sure to update can_* as well!
        all_rooms_query = (Room.query.filter(~Room.is_deleted).options(
            load_only('id', 'protection_mode',
                      'reservations_need_confirmation', 'is_reservable',
                      'owner_id'),
            joinedload('owner').load_only('id'), joinedload('acl_entries')))
        is_admin = allow_admin and cls.is_user_admin(user)
        if (is_admin and allow_admin) or not user.can_get_all_multipass_groups:
            # check one by one if we can't get a list of all groups the user is in
            return {
                r.id: {
                    'book': r.can_book(user, allow_admin=allow_admin),
                    'prebook': r.can_prebook(user, allow_admin=allow_admin),
                    'override': r.can_override(user, allow_admin=allow_admin),
                    'moderate': r.can_moderate(user, allow_admin=allow_admin),
                    'manage': r.can_manage(user, allow_admin=allow_admin),
                }
                for r in all_rooms_query
            }

        criteria = [
            db.and_(RoomPrincipal.type == PrincipalType.user,
                    RoomPrincipal.user_id == user.id)
        ]
        for group in user.local_groups:
            criteria.append(
                db.and_(RoomPrincipal.type == PrincipalType.local_group,
                        RoomPrincipal.local_group_id == group.id))
        for group in user.iter_all_multipass_groups():
            criteria.append(
                db.and_(
                    RoomPrincipal.type == PrincipalType.multipass_group,
                    RoomPrincipal.multipass_group_provider ==
                    group.provider.name,
                    db.func.lower(RoomPrincipal.multipass_group_name) ==
                    group.name.lower()))

        data = {}
        permissions = {'book', 'prebook', 'override', 'moderate', 'manage'}
        prebooking_required_rooms = set()
        non_reservable_rooms = set()
        for room in all_rooms_query:
            is_owner = user == room.owner
            data[room.id] = {x: False for x in permissions}
            if room.reservations_need_confirmation:
                prebooking_required_rooms.add(room.id)
            if not room.is_reservable:
                non_reservable_rooms.add(room.id)
            if (room.is_reservable and
                (room.is_public or is_owner)) or (is_admin and allow_admin):
                if not room.reservations_need_confirmation or is_owner or (
                        is_admin and allow_admin):
                    data[room.id]['book'] = True
                if room.reservations_need_confirmation:
                    data[room.id]['prebook'] = True
            if is_owner or (is_admin and allow_admin):
                data[room.id]['override'] = True
                data[room.id]['moderate'] = True
                data[room.id]['manage'] = True
        query = (RoomPrincipal.query.join(Room).filter(
            ~Room.is_deleted, db.or_(*criteria)).options(
                load_only('room_id', 'full_access', 'permissions')))
        for principal in query:
            is_reservable = principal.room_id not in non_reservable_rooms
            for permission in permissions:
                if not is_reservable and not (is_admin and allow_admin
                                              ) and permission in ('book',
                                                                   'prebook'):
                    continue
                explicit = permission == 'prebook' and principal.room_id not in prebooking_required_rooms
                check_permission = None if permission == 'manage' else permission
                if principal.has_management_permission(check_permission,
                                                       explicit=explicit):
                    data[principal.room_id][permission] = True
        return data
Пример #43
0
    def verify_user(
        self, client_unique_id: str, client_database_id: str, client_id: str
    ) -> bool:
        """
        Verify a user if they are in a known group, otherwise nothing is done.
        Groups are revoked/updated if necessary

        :param client_unique_id: The client's UUID
        :param client_database_id: The database ID
        :param client_id: The client's temporary ID during the session
        :return: True if the user has/had a known group and False if the user is new
        """

        def revoked(response: str):
            if account:
                account.invalidate(self.session)

            changes = ts3bot.sync_groups(
                self, client_database_id, account, remove_all=True
            )

            reason = "unknown reason"
            if response == "groups_revoked_missing_key":
                reason = "missing API key"
            elif response == "groups_revoked_invalid_key":
                reason = "invalid API key"

            logging.info(
                "Revoked user's (cldbid:%s) groups (%s) due to %s.",
                client_database_id,
                changes["removed"],
                reason,
            )
            self.send_message(client_id, response)

        # Get all current groups
        server_groups = self.exec_("servergroupsbyclientid", cldbid=client_database_id)

        known_groups: typing.List[int] = (
            [
                _.group_id
                for _ in self.session.query(ts3bot.database.models.WorldGroup).options(
                    load_only(ts3bot.database.models.WorldGroup.group_id)
                )
            ]
            + [
                _.group_id
                for _ in self.session.query(ts3bot.database.models.Guild)
                .filter(ts3bot.database.models.Guild.group_id.isnot(None))
                .options(load_only(ts3bot.database.models.Guild.group_id))
            ]
            + [
                int(Config.get("teamspeak", "generic_world_id")),
                int(Config.get("teamspeak", "generic_guild_id")),
            ]
        )

        # Check if user has any known groups
        has_group = False
        has_skip_group = False
        for server_group in server_groups:
            if int(server_group.get("sgid", -1)) in known_groups:
                has_group = True
            if server_group.get("name") in Config.whitelist_groups:
                has_skip_group = True

        # Skip users without any known groups
        if not has_group:
            return False

        # Skip users in whitelisted groups that should be ignored like
        # guests, music bots, etc
        if has_skip_group:
            return True

        # Grab user's account info
        account = models.Account.get_by_identity(self.session, client_unique_id)

        # User does not exist in DB
        if not account:
            revoked("groups_revoked_missing_key")
            return True

        # User was checked, don't check again
        if ts3bot.timedelta_hours(
            datetime.datetime.today() - account.last_check
        ) < Config.getfloat("verify", "on_join_hours"):
            return True

        logging.debug("Checking %s/%s", account, client_unique_id)

        try:
            account.update(self.session)
            # Sync groups
            ts3bot.sync_groups(self, client_database_id, account)
        except ts3bot.InvalidKeyException:
            revoked("groups_revoked_invalid_key")
        except (
            requests.RequestException,
            ts3bot.RateLimitException,
            ts3bot.ApiErrBadData,
        ):
            logging.exception("Error during API call")

        return True
    def _ser(cls,
             to_return=None,
             filter_by=None,
             limit=None,
             offset=None,
             query=None,
             skip_nones=False,
             order_by=None,
             session=None,
             expose_all=False,
             params=None):
        """
                Prepare query and fields to fetch obtain (from it)
                The query only fetches necessary fields.
            :param to_return: list of fields to return
            :param filter_by: dict of SQLAlchemy clause to filter by
            :param limit: maximum amount of objects fetched
            :param offset: offset value for the result
            :param query: optional base query
            :param skip_nones: Skip filter_by entries that have a "None" value
            :param order_by: enforce result ordering, multiple via tuple
            :param session: Explict session to use for query
            :param expose_all: Whether to Return not exposed fields
            :param params: Query parameters
            :return: tuple(query, json_to_serialize)
        """
        assert params is None or isinstance(params, dict)
        assert to_return is None or isinstance(to_return, (list, tuple))

        if to_return is None:
            assert isinstance(cls.default_serialization, tuple)
            to_return = list(cls.default_serialization)

        assert len(to_return) == len(
            set(to_return)), [x for x in to_return if to_return.count(x) > 1]

        # expand relationships to default fields
        expanded = []
        for path in to_return:
            expanded += cls.expand(path)
        to_return = expanded

        # remove not exposed columns
        if expose_all is not True:
            to_return = list(filter(cls._is_exposed_column, to_return))

        # todo: should only expire column that use param
        # remove duplicated and store so we know what to populate
        json_to_populate = list(set(to_return))
        # obtain all columns that need fetching from db
        to_fetch = list(set(cls._get_query_columns(to_return)))

        if query is None:
            query = cls.query
        if session is not None:
            query = query.with_session(session)
        if params is not None:
            query = query.params(**params)
        # ensure that fresh data is loaded
        query = query.populate_existing()
        if filter_by is not None:
            query = cls.filter(filter_by, query, skip_nones=skip_nones)

        # handle consistent ordering and tuple in all cases
        if order_by is None:
            order_by = cls.id
        if isinstance(order_by, tuple):
            if order_by[-1] != cls.id:
                order_by = order_by + (cls.id, )
        else:
            if order_by != cls.id:
                order_by = order_by, cls.id
            else:
                order_by = (order_by, )
        assert isinstance(order_by, tuple)

        # join columns in order_by where necessary
        data = {'query': query}
        order_by = cls._substitute_clause(data, order_by)
        query = data['query']

        # we only need foreign key and request columns
        # Note: Primary keys are loaded automatically by sqlalchemy
        fks = [col.name for col in cls.__table__.columns if col.foreign_keys]
        eager_cols = [col for col in to_fetch if "." not in col]
        to_load = [getattr(cls, e) for e in list(set(fks + eager_cols))]
        assert all(hasattr(e, 'type') for e in to_load)
        query = query.options(load_only(*to_load))
        # only return one line per result model so we can use limit and offset
        query = query.distinct(cls.id)
        dense_rank = func.dense_rank().over(  # remember the actual order
            order_by=order_by).label("dense_rank")
        query = query.add_columns(dense_rank)
        query = query.from_self(cls)
        query = query.order_by(dense_rank)

        if limit is not None:
            query = query.limit(limit)
        if offset is not None:
            query = query.offset(offset)

        query = cls._eager_load(to_fetch, query)

        # for query debugging use
        # import sqlalchemy.dialects.postgresql as postgresql
        # print(query.statement.compile(dialect=postgresql.dialect()))
        # print("===========")

        return query, json_to_populate
Пример #45
0
def _query_all_rooms_for_acl_check():
    return (Room.query
            .filter(~Room.is_deleted)
            .options(load_only('id', 'protection_mode', 'reservations_need_confirmation'),
                     joinedload('owner').load_only('id'),
                     joinedload('acl_entries')))
Пример #46
0
def fetch_corresponding_thread(db_session, namespace_id, message):
    """fetch a thread matching the corresponding message. Returns None if
       there's no matching thread."""
    # handle the case where someone is self-sending an email.
    if not message.from_addr or not message.to_addr:
        return None

    message_from = [t[1] for t in message.from_addr]
    message_to = [t[1] for t in message.to_addr]

    # FIXME: for performance reasons, we make the assumption that a reply
    # to a message always has a similar subject. This is only
    # right 95% of the time.
    clean_subject = cleanup_subject(message.subject)

    # XXX: It is much faster to sort client-side by message date. We therefore
    # use `contains_eager` and `outerjoin` to fetch the messages by thread in
    # no particular order (as opposed to `joinedload`, which would use the
    # order_by on the Message._thread backref).  We also use a limit to avoid
    # scanning too many / large threads.
    threads = (db_session.query(Thread).filter(
        Thread.namespace_id == namespace_id,
        Thread._cleaned_subject == clean_subject,
    ).outerjoin(Message, Thread.messages).order_by(desc(Thread.id)).options(
        load_only("id", "discriminator"),
        contains_eager(Thread.messages).load_only("from_addr", "to_addr",
                                                  "bcc_addr", "cc_addr",
                                                  "received_date"),
    ).limit(MAX_MESSAGES_SCANNED))

    for thread in threads:
        messages = sorted(thread.messages, key=attrgetter("received_date"))
        for match in messages:
            # A lot of people BCC some address when sending mass
            # emails so ignore BCC.
            match_bcc = match.bcc_addr if match.bcc_addr else []
            message_bcc = message.bcc_addr if message.bcc_addr else []

            match_emails = set([
                t[1].lower() for t in match.participants if t not in match_bcc
            ])
            message_emails = set([
                t[1].lower() for t in message.participants
                if t not in message_bcc
            ])

            # A conversation takes place between two or more persons.
            # Are there more than two participants in common in this
            # thread? If yes, it's probably a related thread.
            if len(match_emails & message_emails) >= 2:
                # No need to loop through the rest of the messages
                # in the thread
                if len(messages) >= MAX_THREAD_LENGTH:
                    break
                else:
                    return match.thread

            match_from = [t[1] for t in match.from_addr]
            match_to = [t[1] for t in match.from_addr]

            if (len(message_to) == 1 and message_from == message_to
                    and match_from == match_to and message_to == match_from):
                # Check that we're not over max thread length in this case
                # No need to loop through the rest of the messages
                # in the thread.
                if len(messages) >= MAX_THREAD_LENGTH:
                    break
                else:
                    return match.thread

    return None
Пример #47
0
def update_tweet_info(session, tw):
    entities = tw.entities.copy()
    if hasattr(tw, 'extended_entities'):
        for (k, v) in tw.extended_entities.items():
            entities[k] = v

    update_user_info(session, tw.user)
    if hasattr(tw, 'quoted_status'):
        quoted_status = tw.quoted_status
        if type(quoted_status) == dict:
            quoted_status = tweepy.Status.parse(api, quoted_status)
        update_tweet_info(session, quoted_status)
    if hasattr(tw, 'retweeted_status'):
        update_tweet_info(session, tw.retweeted_status)

    tw_db = session.query(models.Tweet)\
        .options(load_only("id"))\
        .filter_by(id=int(tw.id_str))\
        .one_or_none()
    if tw_db is None:
        tw_db = models.Tweet(id=int(tw.id_str))
        session.add(tw_db)
    if tw.coordinates is not None:
        tw_db.coordinates_longitude = tw.coordinates['coordinates'][0]
        tw_db.coordinates_latitude = tw.coordinates['coordinates'][1]
    else:
        tw_db.coordinates_longitude = None
        tw_db.coordinates_latitude = None
    tw_db.created_at = tw.created_at
    if hasattr(tw, 'current_user_retweet'):
        tw_db.current_user_retweet = \
            int_or_None(tw.current_user_retweet['id_str'])
    else:
        tw_db.current_user_retweet = None
    tw_db.favorite_count = tw.favorite_count
    tw_db.favorited = tw.favorited
    tw_db.filter_level = getattr(tw, 'filter_level', None)
    tw_db.in_reply_to_screen_name = tw.in_reply_to_screen_name
    tw_db.in_reply_to_status_id = int_or_None(tw.in_reply_to_status_id_str)
    tw_db.in_reply_to_user_id = int_or_None(tw.in_reply_to_user_id_str)
    tw_db.lang = tw.lang
    if hasattr(tw, 'place') and tw.place is not None:
        place = {}
        for k in [
                'attributes', 'country', 'code', 'country_code', 'full_name',
                'id', 'name', 'place_type', 'url'
        ]:
            if hasattr(tw.place, k):
                place[k] = getattr(tw.place, k)
        place['bounding_box'] = {}
        place['bounding_box']['coordinates'] = \
            tw.place.bounding_box.coordinates
        place['bounding_box']['type'] = \
            tw.place.bounding_box.type
        tw_db.place = json.dumps(place)
    else:
        tw_db.place = None
    tw_db.possibly_sensitive = getattr(tw, 'possibly_sensitive', None)
    tw_db.quoted_status_id = \
        int_or_None(getattr(tw, 'quoted_status_id_str', None))
    if hasattr(tw, 'scopes') and tw.scopes is not None:
        tw_db.scopes = json.dumps(tw.scopes)
    else:
        tw_db.scopes = None
    tw_db.retweet_count = tw.retweet_count
    tw_db.retweeted = tw.retweeted
    if hasattr(tw, 'retweeted_status'):
        tw_db.retweeted_status_id = int_or_None(tw.retweeted_status.id_str)
    else:
        tw_db.retweeted_status_id = None
    tw_db.source = tw.source
    tw_db.source_url = tw.source_url
    tw_db.text = tw.text
    tw_db.truncated = tw.truncated
    tw_db.user_id = int_or_None(tw.user.id_str)
    if hasattr(tw, 'withheld_copyright'):
        tw_db.withheld_copyright = tw.withheld_copyright
    else:
        tw_db.withheld_copyright = None
    if hasattr(tw, 'withheld_in_countries'):
        tw_db.withheld_in_countries = tw.withheld_in_countries
    else:
        tw_db.withheld_in_countries = None
    if hasattr(tw, 'withheld_scope'):
        tw_db.withheld_scope = tw.withheld_scope
    else:
        tw_db.withheld_scope = None
    session.commit()

    if not hasattr(tw, 'retweeted_status'):
        for m in entities.get('media', []):
            update_media_info(session, tw, m)
        for ht in entities.get('hashtags', []):
            tweet_id = int(tw.id_str)
            indices_begin = ht['indices'][0]
            indices_end = ht['indices'][1]
            ht_db = session.query(models.TweetHashtag)\
                .options(load_only("tweet_id", "indices_begin",
                                   "indices_end"))\
                .filter_by(tweet_id=tweet_id,
                           indices_begin=indices_begin,
                           indices_end=indices_end)\
                .one_or_none()
            if ht_db is None:
                ht_db = models.TweetHashtag(tweet_id=int(tw.id_str),
                                            indices_begin=indices_begin,
                                            indices_end=indices_end)
                session.add(ht_db)
            ht_db.text = ht['text']
            session.commit()
        for url in entities.get('urls', []):
            tweet_id = int(tw.id_str)
            indices_begin = url['indices'][0]
            indices_end = url['indices'][1]
            url_db = session.query(models.TweetUrl)\
                .options(load_only("tweet_id", "indices_begin",
                                   "indices_end"))\
                .filter_by(tweet_id=tweet_id,
                           indices_begin=indices_begin,
                           indices_end=indices_end)\
                .one_or_none()
            if url_db is None:
                url_db = models.TweetUrl(tweet_id=int(tw.id_str),
                                         indices_begin=indices_begin,
                                         indices_end=indices_end)
                session.add(url_db)
            url_db.url = url['url']
            url_db.display_url = url['display_url']
            url_db.expanded_url = url['expanded_url']
            session.commit()
        for sym in entities.get('symbols', []):
            tweet_id = int(tw.id_str)
            indices_begin = sym['indices'][0]
            indices_end = sym['indices'][1]
            sym_db = session.query(models.TweetSymbol)\
                .options(load_only("tweet_id", "indices_begin",
                                   "indices_end"))\
                .filter_by(tweet_id=tweet_id,
                           indices_begin=indices_begin,
                           indices_end=indices_end)\
                .one_or_none()
            if sym_db is None:
                sym_db = models.TweetSymbol(tweet_id=int(tw.id_str),
                                            indices_begin=indices_begin,
                                            indices_end=indices_end)
                session.add(sym_db)
            sym_db.text = sym['text']
            session.commit()
        for um in entities.get('user_mentions', []):
            tweet_id = int(tw.id_str)
            indices_begin = um['indices'][0]
            indices_end = um['indices'][1]
            um_db = session.query(models.TweetUserMention)\
                .options(load_only("tweet_id", "indices_begin",
                                   "indices_end"))\
                .filter_by(tweet_id=tweet_id,
                           indices_begin=indices_begin,
                           indices_end=indices_end)\
                .one_or_none()
            if um_db is None:
                um_db = models.TweetUserMention(tweet_id=int(tw.id_str),
                                                indices_begin=indices_begin,
                                                indices_end=indices_end)
                session.add(um_db)
            um_db.user_id = int(um['id_str'])
            um_db.screen_name = um['screen_name']
            um_db.name = um['name']
            session.commit()
Пример #48
0
	def calc_acl_edges_sql(self, session, ad_id):
		#enumerating owners
		query = session.query(JackDawADDACL.owner_sid, JackDawADDACL.sid).filter(~JackDawADDACL.owner_sid.in_(["S-1-3-0", "S-1-5-18"])).filter(JackDawADDACL.ad_id == ad_id)
		for owner_sid, sid in query.all():
			self.add_edge(owner_sid, sid, label='Owner')
		
		#queriing generic access
		query = session.query(JackDawADDACL)\
						.filter(JackDawADDACL.ace_type == 'ACCESS_ALLOWED_ACE_TYPE')\
						.filter(~JackDawADDACL.ace_sid.in_(["S-1-3-0", "S-1-5-18"]))\
						.filter(JackDawADDACL.ad_id == ad_id)
		#print('ACCESS_ALLOWED_ACE_TYPE')
		for acl in query.all():
			if acl.ace_mask_generic_all == True:
				self.add_edge(acl.ace_sid, acl.sid, label='GenericALL')
			
			if acl.ace_mask_generic_write == True:
				self.add_edge(acl.ace_sid, acl.sid, label='GenericWrite')
				
			if acl.ace_mask_write_owner == True:
				self.add_edge(acl.ace_sid, acl.sid, label='WriteOwner')
				
			if acl.ace_mask_write_dacl == True:
				self.add_edge(acl.ace_sid, acl.sid, label='WriteDacl')
				
			if acl.object_type in ['user', 'domain'] and acl.ace_mask_control_access == True:
				self.add_edge(acl.ace_sid, acl.sid, label='ExtendedRightALL')
		
		#queriing only necessary fileds
		fields = ['ace_mask_generic_all', 'ace_mask_write_dacl', 'ace_mask_write_owner', 'ace_mask_generic_write', 'ace_objecttype', 'object_type', 'ace_mask_write_prop', 'ace_mask_control_access']
		#queriing object type access
		query = session.query(JackDawADDACL)\
						.options(load_only(*fields))\
						.filter(JackDawADDACL.ace_type == 'ACCESS_ALLOWED_OBJECT_ACE_TYPE')\
						.filter(JackDawADDACL.ad_id == ad_id)\
						.filter(~JackDawADDACL.ace_sid.in_(["S-1-3-0", "S-1-5-18"]))\
						.filter(~and_(JackDawADDACL.ace_hdr_flag_inherited == True, JackDawADDACL.ace_hdr_flag_inherit_only == True))\
						.filter(or_(JackDawADDACL.ace_hdr_flag_inherited == False,\
									JackDawADDACL.ace_hdr_flag_inherit_only == False,\
									and_(JackDawADDACL.ace_hdr_flag_inherited == True, JackDawADDACL.ace_hdr_flag_inherit_only == True, JackDawADDACL.ace_inheritedobjecttype == JackDawADDACL.object_type_guid)))
								

		#print('ACCESS_ALLOWED_OBJECT_ACE_TYPE')
		for acl in query.all():			
			if any([acl.ace_mask_generic_all, acl.ace_mask_write_dacl, acl.ace_mask_write_owner, acl.ace_mask_generic_write]):
				if acl.ace_objecttype is not None and not ace_applies(acl.ace_objecttype, acl.object_type):
					continue
				
				if acl.ace_mask_generic_all == True:
					self.add_edge(acl.ace_sid, acl.sid, label='GenericALL')
					continue
			
				if acl.ace_mask_generic_write == True:
					self.add_edge(acl.ace_sid, acl.sid, label='GenericWrite')
					
					if acl.object_type != 'domain':
						continue
					
				if acl.ace_mask_write_dacl == True:
					self.add_edge(acl.ace_sid, acl.sid, label='WriteDacl')
					
				if acl.ace_mask_write_owner == True:
					self.add_edge(acl.ace_sid, acl.sid, label='WriteOwner')
					
			if acl.ace_mask_write_prop == True:
				if acl.object_type in ['user','group'] and acl.ace_objecttype is None:
					self.add_edge(acl.ace_sid, acl.sid, label='GenericWrite')
					
				if acl.object_type == 'group' and acl.ace_objecttype == 'bf9679c0-0de6-11d0-a285-00aa003049e2':
					self.add_edge(acl.ace_sid, acl.sid, label='AddMember')
					
				
			
			if acl.ace_mask_control_access == True:
				if acl.object_type in ['user','group'] and acl.ace_objecttype is None:
					self.add_edge(acl.ace_sid, acl.sid, label='ExtendedAll')
				
				if acl.object_type == 'domain' and acl.ace_objecttype == '1131f6ad-9c07-11d1-f79f-00c04fc2dcd2':
					# 'Replicating Directory Changes All'
					self.add_edge(acl.ace_sid, acl.sid, label='GetChangesALL')
						
				if acl.object_type == 'domain' and acl.ace_objecttype == '1131f6aa-9c07-11d1-f79f-00c04fc2dcd2':
						# 'Replicating Directory Changes'
						self.add_edge(acl.ace_sid, acl.sid, label='GetChanges')
						
				if acl.object_type == 'user' and acl.ace_objecttype == '00299570-246d-11d0-a768-00aa006e0529':
						# 'Replicating Directory Changes'
						self.add_edge(acl.ace_sid, acl.sid, label='User-Force-Change-Password')
Пример #49
0
    def get_pokestops(swLat, swLng, neLat, neLng, oSwLat=None, oSwLng=None,
                      oNeLat=None, oNeLng=None, timestamp=0,
                      eventless_stops=True, quests=True, invasions=True,
                      lures=True, geofences=None, exclude_geofences=None):
        columns = [
            'pokestop_id', 'name', 'image', 'latitude', 'longitude',
            'last_updated', 'incident_grunt_type', 'incident_expiration',
            'active_fort_modifier', 'lure_expiration'
        ]

        if quests:
            quest_columns = [
                'GUID', 'quest_timestamp', 'quest_task', 'quest_type',
                'quest_stardust', 'quest_pokemon_id', 'quest_pokemon_form_id',
                'quest_pokemon_costume_id', 'quest_reward_type',
                'quest_item_id', 'quest_item_amount'
            ]
            hours = int(args.quest_reset_time.split(':')[0])
            minutes = int(args.quest_reset_time.split(':')[1])
            reset_time = datetime.today().replace(
                hour=hours, minute=minutes, second=0, microsecond=0
            )
            reset_timestamp = datetime.timestamp(reset_time)
            query = (
                db.session.query(Pokestop, TrsQuest)
                .outerjoin(
                    TrsQuest,
                    and_(
                        Pokestop.pokestop_id == TrsQuest.GUID,
                        TrsQuest.quest_timestamp >= reset_timestamp
                    )
                )
                .options(
                    Load(Pokestop).load_only(*columns),
                    Load(TrsQuest).load_only(*quest_columns)
                )
            )
        else:
            query = Pokestop.query.options(load_only(*columns))

        if not eventless_stops:
            conds = []
            if quests:
                conds.append(TrsQuest.GUID.isnot(None))
            if invasions:
                conds.append(Pokestop.incident_expiration > datetime.utcnow())
            if lures:
                conds.append(Pokestop.lure_expiration > datetime.utcnow())
            query = query.filter(or_(*conds))

        if timestamp > 0:
            # If timestamp is known only send last scanned PokéStops.
            t = datetime.utcfromtimestamp(timestamp / 1000)
            query = query.filter(Pokestop.last_updated > t)

        if swLat and swLng and neLat and neLng:
            query = query.filter(
                Pokestop.latitude >= swLat,
                Pokestop.longitude >= swLng,
                Pokestop.latitude <= neLat,
                Pokestop.longitude <= neLng
            )

        if oSwLat and oSwLng and oNeLat and oNeLng:
            # Exclude PokéStops within old boundaries.
            query = query.filter(
                ~and_(
                    Pokestop.latitude >= oSwLat,
                    Pokestop.longitude >= oSwLng,
                    Pokestop.latitude <= oNeLat,
                    Pokestop.longitude <= oNeLng
                )
            )

        if geofences:
            sql = geofences_to_query(geofences, 'pokestop')
            query = query.filter(text(sql))

        if exclude_geofences:
            sql = geofences_to_query(exclude_geofences, 'pokestop')
            query = query.filter(~text(sql))

        result = query.all()

        now = datetime.utcnow()
        pokestops = []
        for r in result:
            pokestop_orm = r[0] if quests else r
            quest_orm = r[1] if quests else None
            pokestop = orm_to_dict(pokestop_orm)
            if quest_orm is not None:
                pokestop['quest'] = {
                    'scanned_at': quest_orm.quest_timestamp * 1000,
                    'task': quest_orm.quest_task,
                    'reward_type': quest_orm.quest_reward_type,
                    'item_id': quest_orm.quest_item_id,
                    'item_amount': quest_orm.quest_item_amount,
                    'pokemon_id': quest_orm.quest_pokemon_id,
                    'form_id': quest_orm.quest_pokemon_form_id,
                    'costume_id': quest_orm.quest_pokemon_costume_id,
                    'stardust': quest_orm.quest_stardust
                }
            else:
                pokestop['quest'] = None
            if (pokestop['incident_expiration'] is not None
                    and (pokestop['incident_expiration'] < now
                         or not invasions)):
                pokestop['incident_grunt_type'] = None
                pokestop['incident_expiration'] = None
            if (pokestop['lure_expiration'] is not None
                    and (pokestop['lure_expiration'] < now or not lures)):
                pokestop['active_fort_modifier'] = None
                pokestop['lure_expiration'] = None
            pokestops.append(pokestop)

        return pokestops
Пример #50
0
def get_goods_info(db: Session):
    return db.query(models.Goods).options(
        load_only(models.Goods.id, models.Goods.name)).all()
Пример #51
0
    def validate_fields(self, data):
        """
        This validator is used to only allow users to update the field entry for their user.
        It's not possible to exclude it because without the PK Marshmallow cannot load the right instance
        """
        fields = data.get("fields")
        if fields is None:
            return

        current_team = get_current_team()

        if is_admin():
            team_id = data.get("id")
            if team_id:
                target_team = Teams.query.filter_by(id=data["id"]).first()
            else:
                target_team = current_team

            # We are editting an existing
            if self.view == "admin" and self.instance:
                target_team = self.instance
                provided_ids = []
                for f in fields:
                    f.pop("id", None)
                    field_id = f.get("field_id")

                    # # Check that we have an existing field for this. May be unnecessary b/c the foriegn key should enforce
                    field = TeamFields.query.filter_by(
                        id=field_id).first_or_404()

                    # Get the existing field entry if one exists
                    entry = TeamFieldEntries.query.filter_by(
                        field_id=field.id, team_id=target_team.id).first()
                    if entry:
                        f["id"] = entry.id
                        provided_ids.append(entry.id)

                # Extremely dirty hack to prevent deleting previously provided data.
                # This needs a better soln.
                entries = (TeamFieldEntries.query.options(
                    load_only("id")).filter_by(team_id=target_team.id).all())
                for entry in entries:
                    if entry.id not in provided_ids:
                        fields.append({"id": entry.id})
        else:
            provided_ids = []
            for f in fields:
                # Remove any existing set
                f.pop("id", None)
                field_id = f.get("field_id")
                value = f.get("value")

                # # Check that we have an existing field for this. May be unnecessary b/c the foriegn key should enforce
                field = TeamFields.query.filter_by(id=field_id).first_or_404()

                # Get the existing field entry if one exists
                entry = TeamFieldEntries.query.filter_by(
                    field_id=field.id, team_id=current_team.id).first()

                if field.required is True and value.strip() == "":
                    raise ValidationError(f"Field '{field.name}' is required",
                                          field_names=["fields"])

                if field.editable is False and entry is not None:
                    raise ValidationError(
                        f"Field '{field.name}' cannot be editted",
                        field_names=["fields"],
                    )

                if entry:
                    f["id"] = entry.id
                    provided_ids.append(entry.id)

            # Extremely dirty hack to prevent deleting previously provided data.
            # This needs a better soln.
            entries = (TeamFieldEntries.query.options(
                load_only("id")).filter_by(team_id=current_team.id).all())
            for entry in entries:
                if entry.id not in provided_ids:
                    fields.append({"id": entry.id})
Пример #52
0
    def apply(self, pending=False, inactive=False):
        entity = self.basequery._entity_zero()
        base_fields = set(map(lambda x: x.key, entity.column_attrs))
        relationships = set(map(lambda x: x.key, entity.relationships))
        keys = columns(self.model, strformat=True)

        self.fields = base_fields.difference(set(self.queryargs.exclusions))
        # Now detect whether we want relationships
        if self.queryargs.include:
            fields = set()
            for item in self.queryargs.include:
                if item not in base_fields:
                    raise HTTPBadRequest(
                        '{} is not a recognised attribute of this resource'.
                        format(item))
                fields.add(item)
            self.fields = fields

        if self.queryargs.rels:
            if type(self.queryargs.rels) in [list, set]:
                self.fields.union(
                    set(self.model.relationattrs()).difference(
                        self.queryargs.rels))
            elif self.queryargs.rels:
                self.fields.union(relationships)
                self.queryargs.rels = relationships

        updated_rels = set()

        if type(self.queryargs.rels) in [list, set]:
            for item in self.queryargs.rels:
                if self.check_key(item):
                    pass
                updated_rels.add(item)

        self.queryargs.rels = updated_rels

        if not self.queryargs.sortkey or self.queryargs.sortkey == '':
            self.queryargs.sortkey = entity.primary_key[0].name

        if self.queryargs.sortkey in keys:
            column = getattr(self.basequery._entity_zero().attrs,
                             self.queryargs.sortkey)
            if self.queryargs.descending and self.queryargs.sortkey in self.model.keys(
            ):
                self.basequery = self.basequery.order_by(desc(column))
            else:
                self.basequery = self.basequery.order_by(column)

        filters = self.queryargs.filters
        self.basequery, filters = self.check_relationship_filtering(
            self.basequery, filters)

        # filters = tuple(filter(lambda k: '.' not in k, filters.keys()))

        if 'active' in keys:
            if pending:
                self.flags.append('P')
            if inactive:
                self.flags.append('N')
            self.basequery = self.set_active_filter(self.basequery, self.flags)

        try:
            self.basequery = self.basequery.filter_by(**filters)
        except InvalidRequestError as exc:
            raise HTTPBadRequest(str(exc))

        for key, value in self.queryargs.max:
            self.basequery = self.basequery.filter(
                getattr(self.model, key) <= value)

        if self.queryargs.page:
            self.pagedquery = self.basequery.paginate(
                self.queryargs.page, self.queryargs.pagesize or 50, False)
            self.paginated = True
            return self

        self.basequery = self.basequery.group_by(self.model.id)
        self.basequery = self.basequery.limit(self.queryargs.limit)
        self.basequery = self.basequery.options(load_only(*self.fields))
        return self
Пример #53
0
def getHospital(columns=None):
    u = Hospital.query
    if columns:
        u = u.options(orm.load_only(*columns))
    return u
Пример #54
0
 def unused(cls, days=7):
     age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
     return (cls.query.filter(
         Query.id.is_(None),
         cls.retrieved_at < age_threshold).outerjoin(Query)).options(
             load_only('id'))
Пример #55
0
def serialize_categories_ical(category_ids,
                              user,
                              event_filter=True,
                              event_filter_fn=None,
                              update_query=None):
    """Export the events in a category to iCal

    :param category_ids: Category IDs to export
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    :param event_filter_fn: A callable that determines which events to include (after querying)
    :param update_query: A callable that can update the query used to retrieve the events.
                         Must return the updated query object.
    """
    own_room_strategy = joinedload('own_room')
    own_room_strategy.load_only('building', 'floor', 'number', 'verbose_name')
    own_room_strategy.lazyload('owner')
    own_venue_strategy = joinedload('own_venue').load_only('name')
    query = (Event.query.filter(
        Event.category_chain_overlaps(category_ids), ~Event.is_deleted,
        event_filter).options(
            load_only('id', 'category_id', 'start_dt', 'end_dt', 'title',
                      'description', 'own_venue_name', 'own_room_name',
                      'protection_mode', 'access_key'),
            subqueryload('acl_entries'), joinedload('person_links'),
            own_room_strategy, own_venue_strategy).order_by(Event.start_dt))
    if update_query:
        query = update_query(query)
    it = iter(query)
    if event_filter_fn:
        it = ifilter(event_filter_fn, it)
    events = list(it)
    # make sure the parent categories are in sqlalchemy's identity cache.
    # this avoids query spam from `protection_parent` lookups
    _parent_categs = (
        Category._get_chain_query(
            Category.id.in_({e.category_id
                             for e in events}))  # noqa: F841
        .options(load_only('id', 'parent_id', 'protection_mode'),
                 joinedload('acl_entries')).all())
    cal = ical.Calendar()
    cal.add('version', '2.0')
    cal.add('prodid', '-//CERN//INDICO//EN')

    now = now_utc(False)
    for event in events:
        if not event.can_access(user):
            continue
        location = ('{} ({})'.format(event.room_name, event.venue_name)
                    if event.venue_name and event.room_name else
                    (event.venue_name or event.room_name))
        cal_event = ical.Event()
        cal_event.add(
            'uid',
            u'indico-event-{}@{}'.format(event.id,
                                         url_parse(config.BASE_URL).host))
        cal_event.add('dtstamp', now)
        cal_event.add('dtstart', event.start_dt)
        cal_event.add('dtend', event.end_dt)
        cal_event.add('url', event.external_url)
        cal_event.add('summary', event.title)
        cal_event.add('location', location)
        description = []
        if event.person_links:
            speakers = [
                u'{} ({})'.format(x.full_name, x.affiliation)
                if x.affiliation else x.full_name for x in event.person_links
            ]
            description.append(u'Speakers: {}'.format(u', '.join(speakers)))

        if event.description:
            desc_text = unicode(
                event.description) or u'<p/>'  # get rid of RichMarkup
            try:
                description.append(
                    unicode(html.fromstring(desc_text).text_content()))
            except ParserError:
                # this happens e.g. if desc_text contains only a html comment
                pass
        description.append(event.external_url)
        cal_event.add('description', u'\n'.join(description))
        cal.add_component(cal_event)
    return BytesIO(cal.to_ical())
Пример #56
0
def file_upload(request):
    # If we're in read-only mode, let upload clients know
    if request.flags.enabled("read-only"):
        raise _exc_with_message(
            HTTPForbidden, "Read-only mode: Uploads are temporarily disabled"
        )

    # Log an attempt to upload
    metrics = request.find_service(IMetricsService, context=None)
    metrics.increment("warehouse.upload.attempt")

    # Before we do anything, if there isn't an authenticated user with this
    # request, then we'll go ahead and bomb out.
    if request.authenticated_userid is None:
        raise _exc_with_message(
            HTTPForbidden, "Invalid or non-existent authentication information."
        )

    # Ensure that user has a verified, primary email address. This should both
    # reduce the ease of spam account creation and activity, as well as act as
    # a forcing function for https://github.com/pypa/warehouse/issues/3632.
    # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,
    #       we might consider a different condition, possibly looking at
    #       User.is_active instead.
    if not (request.user.primary_email and request.user.primary_email.verified):
        raise _exc_with_message(
            HTTPBadRequest,
            (
                "User {!r} does not have a verified primary email address. "
                "Please add a verified primary email before attempting to "
                "upload to PyPI. See {project_help} for more information."
                "for more information."
            ).format(
                request.user.username,
                project_help=request.help_url(_anchor="verified-email"),
            ),
        ) from None

    # Do some cleanup of the various form fields
    for key in list(request.POST):
        value = request.POST.get(key)
        if isinstance(value, str):
            # distutils "helpfully" substitutes unknown, but "required" values
            # with the string "UNKNOWN". This is basically never what anyone
            # actually wants so we'll just go ahead and delete anything whose
            # value is UNKNOWN.
            if value.strip() == "UNKNOWN":
                del request.POST[key]

            # Escape NUL characters, which psycopg doesn't like
            if "\x00" in value:
                request.POST[key] = value.replace("\x00", "\\x00")

    # We require protocol_version 1, it's the only supported version however
    # passing a different version should raise an error.
    if request.POST.get("protocol_version", "1") != "1":
        raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.")

    # Check if any fields were supplied as a tuple and have become a
    # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a
    # FieldStorage, however.
    # ref: https://github.com/pypa/warehouse/issues/2185
    # ref: https://github.com/pypa/warehouse/issues/2491
    for field in set(request.POST) - {"content", "gpg_signature"}:
        values = request.POST.getall(field)
        if any(isinstance(value, FieldStorage) for value in values):
            raise _exc_with_message(HTTPBadRequest, f"{field}: Should not be a tuple.")

    # Look up all of the valid classifiers
    all_classifiers = request.db.query(Classifier).all()

    # Validate and process the incoming metadata.
    form = MetadataForm(request.POST)

    # Add a validator for deprecated classifiers
    form.classifiers.validators.append(_no_deprecated_classifiers(request))

    form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]
    if not form.validate():
        for field_name in _error_message_order:
            if field_name in form.errors:
                break
        else:
            field_name = sorted(form.errors.keys())[0]

        if field_name in form:
            field = form[field_name]
            if field.description and isinstance(field, wtforms.StringField):
                error_message = (
                    "{value!r} is an invalid value for {field}. ".format(
                        value=field.data, field=field.description
                    )
                    + "Error: {} ".format(form.errors[field_name][0])
                    + "See "
                    "https://packaging.python.org/specifications/core-metadata"
                )
            else:
                error_message = "Invalid value for {field}. Error: {msgs[0]}".format(
                    field=field_name, msgs=form.errors[field_name]
                )
        else:
            error_message = "Error: {}".format(form.errors[field_name][0])

        raise _exc_with_message(HTTPBadRequest, error_message)

    # Ensure that we have file data in the request.
    if "content" not in request.POST:
        raise _exc_with_message(HTTPBadRequest, "Upload payload does not have a file.")

    # Look up the project first before doing anything else, this is so we can
    # automatically register it if we need to and can check permissions before
    # going any further.
    try:
        project = (
            request.db.query(Project)
            .filter(
                Project.normalized_name == func.normalize_pep426_name(form.name.data)
            )
            .one()
        )
    except NoResultFound:
        # Check for AdminFlag set by a PyPI Administrator disabling new project
        # registration, reasons for this include Spammers, security
        # vulnerabilities, or just wanting to be lazy and not worry ;)
        if request.flags.enabled("disallow-new-project-registration"):
            raise _exc_with_message(
                HTTPForbidden,
                (
                    "New project registration temporarily disabled. "
                    "See {projecthelp} for details"
                ).format(projecthelp=request.help_url(_anchor="admin-intervention")),
            ) from None

        # Before we create the project, we're going to check our blacklist to
        # see if this project is even allowed to be registered. If it is not,
        # then we're going to deny the request to create this project.
        if request.db.query(
            exists().where(
                BlacklistedProject.name == func.normalize_pep426_name(form.name.data)
            )
        ).scalar():
            raise _exc_with_message(
                HTTPBadRequest,
                (
                    "The name {name!r} isn't allowed. "
                    "See {projecthelp} "
                    "for more information."
                ).format(
                    name=form.name.data,
                    projecthelp=request.help_url(_anchor="project-name"),
                ),
            ) from None

        # Also check for collisions with Python Standard Library modules.
        if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:
            raise _exc_with_message(
                HTTPBadRequest,
                (
                    "The name {name!r} isn't allowed (conflict with Python "
                    "Standard Library module name). See "
                    "{projecthelp} for more information."
                ).format(
                    name=form.name.data,
                    projecthelp=request.help_url(_anchor="project-name"),
                ),
            ) from None

        # The project doesn't exist in our database, so first we'll check for
        # projects with a similar name
        squattees = (
            request.db.query(Project)
            .filter(
                func.levenshtein(
                    Project.normalized_name, func.normalize_pep426_name(form.name.data)
                )
                <= 2
            )
            .all()
        )

        # Next we'll create the project
        project = Project(name=form.name.data)
        request.db.add(project)

        # Now that the project exists, add any squats which it is the squatter for
        for squattee in squattees:
            request.db.add(Squat(squatter=project, squattee=squattee))

        # Then we'll add a role setting the current user as the "Owner" of the
        # project.
        request.db.add(Role(user=request.user, project=project, role_name="Owner"))
        # TODO: This should be handled by some sort of database trigger or a
        #       SQLAlchemy hook or the like instead of doing it inline in this
        #       view.
        request.db.add(
            JournalEntry(
                name=project.name,
                action="create",
                submitted_by=request.user,
                submitted_from=request.remote_addr,
            )
        )
        request.db.add(
            JournalEntry(
                name=project.name,
                action="add Owner {}".format(request.user.username),
                submitted_by=request.user,
                submitted_from=request.remote_addr,
            )
        )

    # Check that the user has permission to do things to this project, if this
    # is a new project this will act as a sanity check for the role we just
    # added above.
    if not request.has_permission("upload", project):
        raise _exc_with_message(
            HTTPForbidden,
            (
                "The credential associated with user '{0}' "
                "isn't allowed to upload to project '{1}'. "
                "See {2} for more information."
            ).format(
                request.user.username,
                project.name,
                request.help_url(_anchor="project-name"),
            ),
        )

    # Update name if it differs but is still equivalent. We don't need to check if
    # they are equivalent when normalized because that's already been done when we
    # queried for the project.
    if project.name != form.name.data:
        project.name = form.name.data

    # Render our description so we can save from having to render this data every time
    # we load a project description page.
    rendered = None
    if form.description.data:
        description_content_type = form.description_content_type.data
        if not description_content_type:
            description_content_type = "text/x-rst"

        rendered = readme.render(
            form.description.data, description_content_type, use_fallback=False
        )

        # Uploading should prevent broken rendered descriptions.
        if rendered is None:
            if form.description_content_type.data:
                message = (
                    "The description failed to render "
                    "for '{description_content_type}'."
                ).format(description_content_type=description_content_type)
            else:
                message = (
                    "The description failed to render "
                    "in the default format of reStructuredText."
                )
            raise _exc_with_message(
                HTTPBadRequest,
                "{message} See {projecthelp} for more information.".format(
                    message=message,
                    projecthelp=request.help_url(_anchor="description-content-type"),
                ),
            ) from None

    try:
        canonical_version = packaging.utils.canonicalize_version(form.version.data)
        release = (
            request.db.query(Release)
            .filter(
                (Release.project == project)
                & (Release.canonical_version == canonical_version)
            )
            .one()
        )
    except MultipleResultsFound:
        # There are multiple releases of this project which have the same
        # canonical version that were uploaded before we checked for
        # canonical version equivalence, so return the exact match instead
        release = (
            request.db.query(Release)
            .filter(
                (Release.project == project) & (Release.version == form.version.data)
            )
            .one()
        )
    except NoResultFound:
        release = Release(
            project=project,
            _classifiers=[
                c for c in all_classifiers if c.classifier in form.classifiers.data
            ],
            dependencies=list(
                _construct_dependencies(
                    form,
                    {
                        "requires": DependencyKind.requires,
                        "provides": DependencyKind.provides,
                        "obsoletes": DependencyKind.obsoletes,
                        "requires_dist": DependencyKind.requires_dist,
                        "provides_dist": DependencyKind.provides_dist,
                        "obsoletes_dist": DependencyKind.obsoletes_dist,
                        "requires_external": DependencyKind.requires_external,
                        "project_urls": DependencyKind.project_url,
                    },
                )
            ),
            canonical_version=canonical_version,
            description=Description(
                content_type=form.description_content_type.data,
                raw=form.description.data or "",
                html=rendered or "",
                rendered_by=readme.renderer_version(),
            ),
            **{
                k: getattr(form, k).data
                for k in {
                    # This is a list of all the fields in the form that we
                    # should pull off and insert into our new release.
                    "version",
                    "summary",
                    "license",
                    "author",
                    "author_email",
                    "maintainer",
                    "maintainer_email",
                    "keywords",
                    "platform",
                    "home_page",
                    "download_url",
                    "requires_python",
                }
            },
            uploader=request.user,
            uploaded_via=request.user_agent,
        )
        request.db.add(release)
        # TODO: This should be handled by some sort of database trigger or
        #       a SQLAlchemy hook or the like instead of doing it inline in
        #       this view.
        request.db.add(
            JournalEntry(
                name=release.project.name,
                version=release.version,
                action="new release",
                submitted_by=request.user,
                submitted_from=request.remote_addr,
            )
        )

    # TODO: We need a better solution to this than to just do it inline inside
    #       this method. Ideally the version field would just be sortable, but
    #       at least this should be some sort of hook or trigger.
    releases = (
        request.db.query(Release)
        .filter(Release.project == project)
        .options(orm.load_only(Release._pypi_ordering))
        .all()
    )
    for i, r in enumerate(
        sorted(releases, key=lambda x: packaging.version.parse(x.version))
    ):
        r._pypi_ordering = i

    # Pull the filename out of our POST data.
    filename = request.POST["content"].filename

    # Make sure that the filename does not contain any path separators.
    if "/" in filename or "\\" in filename:
        raise _exc_with_message(
            HTTPBadRequest, "Cannot upload a file with '/' or '\\' in the name."
        )

    # Make sure the filename ends with an allowed extension.
    if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:
        raise _exc_with_message(
            HTTPBadRequest,
            "Invalid file extension: Use .egg, .tar.gz, .whl or .zip "
            "extension. (https://www.python.org/dev/peps/pep-0527)",
        )

    # Make sure that our filename matches the project that it is being uploaded
    # to.
    prefix = pkg_resources.safe_name(project.name).lower()
    if not pkg_resources.safe_name(filename).lower().startswith(prefix):
        raise _exc_with_message(
            HTTPBadRequest,
            "Start filename for {!r} with {!r}.".format(project.name, prefix),
        )

    # Check the content type of what is being uploaded
    if not request.POST["content"].type or request.POST["content"].type.startswith(
        "image/"
    ):
        raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")

    # Ensure that the package filetype is allowed.
    # TODO: Once PEP 527 is completely implemented we should be able to delete
    #       this and just move it into the form itself.
    if not project.allow_legacy_files and form.filetype.data not in {
        "sdist",
        "bdist_wheel",
        "bdist_egg",
    }:
        raise _exc_with_message(HTTPBadRequest, "Unknown type of file.")

    # The project may or may not have a file size specified on the project, if
    # it does then it may or may not be smaller or larger than our global file
    # size limits.
    file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))

    with tempfile.TemporaryDirectory() as tmpdir:
        temporary_filename = os.path.join(tmpdir, filename)

        # Buffer the entire file onto disk, checking the hash of the file as we
        # go along.
        with open(temporary_filename, "wb") as fp:
            file_size = 0
            file_hashes = {
                "md5": hashlib.md5(),
                "sha256": hashlib.sha256(),
                "blake2_256": hashlib.blake2b(digest_size=256 // 8),
            }
            for chunk in iter(lambda: request.POST["content"].file.read(8096), b""):
                file_size += len(chunk)
                if file_size > file_size_limit:
                    raise _exc_with_message(
                        HTTPBadRequest,
                        "File too large. "
                        + "Limit for project {name!r} is {limit} MB. ".format(
                            name=project.name, limit=file_size_limit // (1024 * 1024)
                        )
                        + "See "
                        + request.help_url(_anchor="file-size-limit"),
                    )
                fp.write(chunk)
                for hasher in file_hashes.values():
                    hasher.update(chunk)

        # Take our hash functions and compute the final hashes for them now.
        file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}

        # Actually verify the digests that we've gotten. We're going to use
        # hmac.compare_digest even though we probably don't actually need to
        # because it's better safe than sorry. In the case of multiple digests
        # we expect them all to be given.
        if not all(
            [
                hmac.compare_digest(
                    getattr(form, "{}_digest".format(digest_name)).data.lower(),
                    digest_value,
                )
                for digest_name, digest_value in file_hashes.items()
                if getattr(form, "{}_digest".format(digest_name)).data
            ]
        ):
            raise _exc_with_message(
                HTTPBadRequest,
                "The digest supplied does not match a digest calculated "
                "from the uploaded file.",
            )

        # Check to see if the file that was uploaded exists already or not.
        is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)
        if is_duplicate:
            return Response()
        elif is_duplicate is not None:
            raise _exc_with_message(
                HTTPBadRequest,
                # Note: Changing this error message to something that doesn't
                # start with "File already exists" will break the
                # --skip-existing functionality in twine
                # ref: https://github.com/pypa/warehouse/issues/3482
                # ref: https://github.com/pypa/twine/issues/332
                "File already exists. See "
                + request.help_url(_anchor="file-name-reuse"),
            )

        # Check to see if the file that was uploaded exists in our filename log
        if request.db.query(
            request.db.query(Filename).filter(Filename.filename == filename).exists()
        ).scalar():
            raise _exc_with_message(
                HTTPBadRequest,
                "This filename has already been used, use a "
                "different version. "
                "See " + request.help_url(_anchor="file-name-reuse"),
            )

        # Check to see if uploading this file would create a duplicate sdist
        # for the current release.
        if (
            form.filetype.data == "sdist"
            and request.db.query(
                request.db.query(File)
                .filter((File.release == release) & (File.packagetype == "sdist"))
                .exists()
            ).scalar()
        ):
            raise _exc_with_message(
                HTTPBadRequest, "Only one sdist may be uploaded per release."
            )

        # Check the file to make sure it is a valid distribution file.
        if not _is_valid_dist_file(temporary_filename, form.filetype.data):
            raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")

        # Check that if it's a binary wheel, it's on a supported platform
        if filename.endswith(".whl"):
            wheel_info = _wheel_file_re.match(filename)
            plats = wheel_info.group("plat").split(".")
            for plat in plats:
                if not _valid_platform_tag(plat):
                    raise _exc_with_message(
                        HTTPBadRequest,
                        "Binary wheel '{filename}' has an unsupported "
                        "platform tag '{plat}'.".format(filename=filename, plat=plat),
                    )

        # Also buffer the entire signature file to disk.
        if "gpg_signature" in request.POST:
            has_signature = True
            with open(os.path.join(tmpdir, filename + ".asc"), "wb") as fp:
                signature_size = 0
                for chunk in iter(
                    lambda: request.POST["gpg_signature"].file.read(8096), b""
                ):
                    signature_size += len(chunk)
                    if signature_size > MAX_SIGSIZE:
                        raise _exc_with_message(HTTPBadRequest, "Signature too large.")
                    fp.write(chunk)

            # Check whether signature is ASCII armored
            with open(os.path.join(tmpdir, filename + ".asc"), "rb") as fp:
                if not fp.read().startswith(b"-----BEGIN PGP SIGNATURE-----"):
                    raise _exc_with_message(
                        HTTPBadRequest, "PGP signature isn't ASCII armored."
                    )
        else:
            has_signature = False

        # TODO: This should be handled by some sort of database trigger or a
        #       SQLAlchemy hook or the like instead of doing it inline in this
        #       view.
        request.db.add(Filename(filename=filename))

        # Store the information about the file in the database.
        file_ = File(
            release=release,
            filename=filename,
            python_version=form.pyversion.data,
            packagetype=form.filetype.data,
            comment_text=form.comment.data,
            size=file_size,
            has_signature=bool(has_signature),
            md5_digest=file_hashes["md5"],
            sha256_digest=file_hashes["sha256"],
            blake2_256_digest=file_hashes["blake2_256"],
            # Figure out what our filepath is going to be, we're going to use a
            # directory structure based on the hash of the file contents. This
            # will ensure that the contents of the file cannot change without
            # it also changing the path that the file is saved too.
            path="/".join(
                [
                    file_hashes[PATH_HASHER][:2],
                    file_hashes[PATH_HASHER][2:4],
                    file_hashes[PATH_HASHER][4:],
                    filename,
                ]
            ),
            uploaded_via=request.user_agent,
        )
        request.db.add(file_)

        # TODO: This should be handled by some sort of database trigger or a
        #       SQLAlchemy hook or the like instead of doing it inline in this
        #       view.
        request.db.add(
            JournalEntry(
                name=release.project.name,
                version=release.version,
                action="add {python_version} file {filename}".format(
                    python_version=file_.python_version, filename=file_.filename
                ),
                submitted_by=request.user,
                submitted_from=request.remote_addr,
            )
        )

        # TODO: We need a better answer about how to make this transactional so
        #       this won't take affect until after a commit has happened, for
        #       now we'll just ignore it and save it before the transaction is
        #       committed.
        storage = request.find_service(IFileStorage)
        storage.store(
            file_.path,
            os.path.join(tmpdir, filename),
            meta={
                "project": file_.release.project.normalized_name,
                "version": file_.release.version,
                "package-type": file_.packagetype,
                "python-version": file_.python_version,
            },
        )
        if has_signature:
            storage.store(
                file_.pgp_path,
                os.path.join(tmpdir, filename + ".asc"),
                meta={
                    "project": file_.release.project.normalized_name,
                    "version": file_.release.version,
                    "package-type": file_.packagetype,
                    "python-version": file_.python_version,
                },
            )

    # Log a successful upload
    metrics.increment("warehouse.upload.ok", tags=[f"filetype:{form.filetype.data}"])

    return Response()
Пример #57
0
def getWorks_On(columns=None):
    u = Works_On.query
    if columns:
        u = u.options(orm.load_only(*columns))
    return u
Пример #58
0
def get_college_profile():
    if not current_user.is_authenticated:
        college_id = None
    else:
        college_id = current_user.college_id

    college_tests = (CollegeTest.query.join(CollegeTest.test).options(
        contains_eager(CollegeTest.test)).options(
            load_only(CollegeTest.test_id, CollegeTest.is_free,
                      CollegeTest.is_active)).filter(
                          CollegeTest.college_id == college_id)).all()

    test_ids = [college_test.test_id for college_test in college_tests]

    current_user_id = current_user.id if not current_user.is_anonymous else None

    # this has been copied from Tests.py (list tests)

    tests = (Test.query.filter_by(**request.args).filter(
        Test.id.in_(test_ids)).join(
            CollegeTest,
            and_(
                CollegeTest.test_id == Test.id,
                CollegeTest.college_id == current_user.college_id,
                CollegeTest.is_active == True,
            )).outerjoin(Test.sections).outerjoin(Section.questions).outerjoin(
                OrderTest, OrderTest.test_id == Test.id).outerjoin(
                    Order,
                    and_(Order.id == OrderTest.order_id,
                         Order.user_id == current_user_id)).outerjoin(
                             TestAttempt,
                             and_(TestAttempt.test_id == Test.id,
                                  TestAttempt.is_complete == 1,
                                  TestAttempt.user_id == current_user_id)).
             options(
                 contains_eager(Test.sections).load_only(
                     Section.id, Section.total_time).contains_eager(
                         Section.questions).load_only(Question.id)).options(
                             contains_eager(Test.test_attempts).load_only(
                                 TestAttempt.is_complete)).options(
                                     load_only(Test.id, Test.name,
                                               Test.character, Test.price,
                                               Test.type, Test.logo,
                                               Test.allow_section_jumps)).
             options(contains_eager(Test.orders).load_only(
                 Order.status)).filter(Test.is_active == 1).all())
    fields = [
        'id', 'name', 'character', 'allow_section_jumps', 'price', 'logo',
        'orders', 'sections', {
            'sections': ['id', 'total_time', 'questions', {
                'questions': 'id'
            }]
        }, 'type', 'test_attempts', {
            'orders': ['status']
        }, {
            'test_attempts': ['is_complete']
        }
    ]
    tests = [test.todict(fields) for test in tests]

    # begin with paid tests #
    for test in tests:
        test['is_purchased'] = False  # set key, false by default
        for order in test['orders']:
            test['status'] = order['status']
            if order['status'] == "paid":
                test['is_purchased'] = True

        # if len(test['orders']) > 0 :  # if a paid order exists
        #     test['is_purchased'] = True  # set is purchased to true

        for college_test in college_tests:
            if college_test.test_id == test['id'] and college_test.is_free:
                test['is_purchased'] = True

        del test['orders']

        test['is_complete'] = False  # set key , false by default

        if len(test['test_attempts']):
            test['is_complete'] = True

        del test['test_attempts']

        question_count = 0
        section_count = 0

        total_time = 0
        for section in test["sections"]:
            section_count += 1
            total_time += section['total_time']

            for question in section["questions"]:
                question_count += 1

        test['section_count'] = section_count
        test['question_count'] = question_count
        test['total_time'] = total_time

        del test['sections']

    return tests
Пример #59
0
def uniqueItem(item_name):
    items = session.query(CategoryItem).options(load_only(CategoryItem.title))
    for item in items:
	    if item_name.lower() == item.title.lower():
	       return False
    return True
Пример #60
0
def getPatient(columns=None):
    u = Patient.query
    if columns:
        u = u.options(orm.load_only(*columns))
    return u