Beispiel #1
0
def _search(query):
    # to support super fast paging results, cache the intermediates
    results = None
    cache_time = 60
    if query.cache:
        key = query.cache_key()
        results = cache.get(key)
        if results:
            # put it back again - this basically extends the lease
            cache.add(key, results, cache_time)

    if not results:
        results = combined_search_results(query)
        facets = results['facets']
        results = apply_normalizers(results)
        if query.cache:
            dumped = zlib.compress(pickle.dumps((results, facets)))
            logger.debug("cached search results %s", len(dumped))
            cache.set(key, dumped, cache_time)

    else:
        results, facets = pickle.loads(zlib.decompress(results))

    # @todo - sorting should be done in the backend as it can optimize if
    # the query is restricted to one model. has implications for caching...
    if query.sort == 'title':
        keyfunc = lambda r: r.title().lower()
    elif query.sort == 'last_modified':
        old = datetime(1,1,1)
        keyfunc = lambda r: r.last_modified() or old
    else:
        keyfunc = lambda r: getattr(r, query.sort)()
    results.sort(key=keyfunc, reverse=not query.order)

    return results, facets
Beispiel #2
0
def _search(query):
    # to support super fast paging results, cache the intermediates
    results = None
    cache_time = 60
    if query.cache:
        key = query.cache_key()
        results = cache.get(key)
        if results:
            # put it back again - this basically extends the lease
            cache.add(key, results, cache_time)

    if not results:
        results = combined_search_results(query)
        facets = results['facets']
        results = apply_normalizers(results)
        if query.cache:
            dumped = zlib.compress(pickle.dumps((results, facets)))
            logger.debug("cached search results %s", len(dumped))
            cache.set(key, dumped, cache_time)

    else:
        results, facets = pickle.loads(zlib.decompress(results))

    # @todo - sorting should be done in the backend as it can optimize if
    # the query is restricted to one model. has implications for caching...
    if query.sort == 'title':
        keyfunc = lambda r: r.title().lower()
    elif query.sort == 'last_modified':
        old = datetime(1, 1, 1)
        keyfunc = lambda r: r.last_modified() or old
    else:
        keyfunc = lambda r: getattr(r, query.sort)()
    results.sort(key=keyfunc, reverse=not query.order)

    return results, facets
Beispiel #3
0
 def get_context_data(self, **kwargs):
     context = super(GroupDetailView, self).get_context_data(**kwargs)
     context['object'] = self.group
     context['maps'] = self.group.resources(resource_type=Map)
     context['layers'] = self.group.resources(resource_type=Layer)
     context['is_member'] = self.group.user_is_member(self.request.user)
     context['is_manager'] = self.group.user_is_role(self.request.user, "manager")
     context['object_list'] = apply_normalizers({'users': [obj.user.profile for obj in context['object_list']]})
     context['total'] = self.get_queryset().count()
     return context
Beispiel #4
0
 def get_context_data(self, **kwargs):
     context = super(GroupDetailView, self).get_context_data(**kwargs)
     context['object'] = self.group
     context['maps'] = self.group.resources(resource_type=Map)
     context['layers'] = self.group.resources(resource_type=Layer)
     context['is_member'] = self.group.user_is_member(self.request.user)
     context['is_manager'] = self.group.user_is_role(self.request.user, "manager")
     context['object_list'] = apply_normalizers({'users': [obj.user.profile for obj in context['object_list']]})
     context['total'] = self.get_queryset().count()
     return context