Example #1
0
    def _post_save(self, instance, **kwargs):
        """
        Pushes changes to an instance into the cache, and removes invalid (changed)
        lookup values.
        """
        pk_name = instance._meta.pk.name
        pk_names = ('pk', pk_name)
        pk_val = instance.pk
        for key in self.cache_fields:
            if key in pk_names:
                continue
            # store pointers
            cache.set(self._get_from_cache_key(**{key: getattr(instance, key)}), pk_val, self.cache_ttl)  # 1 hour

        # Ensure we dont serialize the database into the cache
        db = instance._state.db
        instance._state.db = None
        # store actual object
        cache.set(self._get_from_cache_key(**{pk_name: pk_val}), instance, self.cache_ttl)
        instance._state.db = db

        # Kill off any keys which are no longer valid
        for key in self.cache_fields:
            if key not in instance.__cache_data:
                continue
            value = instance.__cache_data[key]
            if value != getattr(instance, key):
                cache.delete(self._get_from_cache_key(**{key: value}))

        self._cache_state(instance)
Example #2
0
 def get_context_data(self, **kwargs):
     context = super(BugmailStatsView, self).get_context_data(**kwargs)
     json_stats = cache.get(self.cache_key)
     if not json_stats:
         wks_ago = (now() - timedelta(days=14)).date()
         stats = BugmailStat.objects.stats_for_range(wks_ago)
         stats_dict = {
             BugmailStat.TOTAL: defaultdict(int),
             BugmailStat.USED: defaultdict(int),
         }
         for s in stats:
             stats_dict[s.stat_type][date_to_js(s.date)] += s.count
         all_stats = {
             'total': [],
             'used': [],
             'x_axis': [],
         }
         stats_total = stats_dict[BugmailStat.TOTAL]
         stats_used = stats_dict[BugmailStat.USED]
         for d in date_range(wks_ago):
             d = date_to_js(d)
             all_stats['x_axis'].append(d)
             all_stats['total'].append([d, stats_total[d]])
             all_stats['used'].append([d, stats_used[d]])
         json_stats = json.dumps(all_stats)
         cache.set(self.cache_key, json_stats, 1800)  # 30 minutes
     context['stats'] = json_stats
     return context
Example #3
0
 def __unicode__(self):
     name = cache.get("assocation-%s" % self.user)
     if name == None:
         user = CoreUser.objects.using("actionkit").get(pk=self.user)
         name = "%s %s = %s" % (user.first_name, user.last_name, self.salesforce_id)
         cache.set("assocation-%s" % self.user, name, 6000)
     return name
Example #4
0
def cache_akara_version():
    try:
        version = urlopen(AKARA_VERSION_URL).read(100)
    except:
        version = "Unknown"
    cache.set("akara_version", version, 60)
    return version
Example #5
0
def review_viewing(request):
    if 'addon_id' not in request.POST:
        return {}

    addon_id = request.POST['addon_id']
    user_id = request.amo_user.id
    current_name = ''
    is_user = 0
    key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
    interval = amo.EDITOR_VIEWING_INTERVAL

    # Check who is viewing.
    currently_viewing = cache.get(key)

    # If nobody is viewing or current user is, set current user as viewing
    if not currently_viewing or currently_viewing == user_id:
        # We want to save it for twice as long as the ping interval,
        # just to account for latency and the like.
        cache.set(key, user_id, interval * 2)
        currently_viewing = user_id
        current_name = request.amo_user.name
        is_user = 1
    else:
        current_name = UserProfile.objects.get(pk=currently_viewing).name

    return {'current': currently_viewing, 'current_name': current_name,
            'is_user': is_user, 'interval_seconds': interval}
Example #6
0
    def _put_cursor(self, zero_based_page, cursor):
        if not self.object_list.supports_cursors or cursor is None:
            return

        logging.info("Storing cursor for page: %s" % (zero_based_page))
        key = self._make_key(str(zero_based_page))
        cache.set(key, cursor)
Example #7
0
    def get_erudit_object(self):
        """
        Returns the liberuditarticle's object associated with the considered Django object.
        """
        fedora_xml_content_key = 'fedora-object-{pid}'.format(pid=self.pid)
        fedora_xml_content = cache.get(fedora_xml_content_key, None)

        try:
            assert fedora_xml_content is None
            fedora_xml_content = self.fedora_object.xml_content
        except (RequestFailed, ConnectionError) as e:  # pragma: no cover
            logger.warn("Exception: {}, pid: {}".format(e, self.pid))
            if settings.DEBUG:
                # In DEBUG mode RequestFailed or ConnectionError errors can occur
                # really often because the dataset provided by the Fedora repository
                # is not complete.
                return
            raise
        except AssertionError:
            # We've fetched the XML content from the cache so we just pass
            pass
        else:
            # Stores the XML content of the object for further use
            cache.set(
                fedora_xml_content_key, fedora_xml_content, self.fedora_xml_content_cache_timeout)

        return self.erudit_class(fedora_xml_content) if fedora_xml_content else None
Example #8
0
    def get(self, request, pk):
        """
        接收id, 返回市或者区的数据
        :param request:
        :param pk:
        :return:
        """
        sub_data = cache.get('sub_area_' + pk)
        if not sub_data:
            try:
                # 1.获取市区的数据
                sub_model_list = Area.objects.filter(parent=pk)

                # 2.获取省份的数据
                parent_model = Area.objects.get(id=pk)

                sub_list = []

                # 3.遍历(拼接)
                for sub_model in sub_model_list:
                    sub_list.append({'id': sub_model.id,
                                     'name': sub_model.name})

                # 4.再次拼接
                sub_data = {'id': parent_model.id,
                            'name': parent_model.name,
                            'subs': sub_list}
                cache.set('sub_area_' + pk, sub_data, 3600)
            except Exception as e:
                return http.JsonResponse({'code': RETCODE.DBERR,
                                          'errmsg': '获取市区数据出错'})
        # 5.返回
        return http.JsonResponse({'code': RETCODE.OK,
                                  'errmsg': 'OK',
                                  'sub_data': sub_data})
Example #9
0
    def get(self, request):
        '''
        从数据库中获取省份数据, 返回前端
        :param request:
        :return:
        '''

        province_list = cache.get('province_list')

        if not province_list:

            try:
                # 1. 从数据库中获取省份数据(条件: parent为空)
                province_model_list = Area.objects.filter(parent__isnull=True)

                province = []

                # 2. 遍历(拿取每一个)
                for province_model in province_model_list:
                    # 3. 拼接格式 [{'id':'', 'name':''}]
                    province.append({'id': province_model.id,
                                     'name': province_model.name})
                # 增加缓存
                cache.set('province_list', province, 3600)
            except Exception as e:
                return http.JsonResponse({'code': RETCODE.DBERR,
                                          'errmsg': '数据库出错'})

        # 4. 返回
        return http.JsonResponse({'code': RETCODE.OK,
                                  'errmsg': 'ok',#
                                  'province_list': province_list})
Example #10
0
def get_media(request, *args, **kwargs):
    """
    :param request:
    :return: list of media (movies and/or series)
    """
    category_id = request.GET.get('category_id') if request.GET.get('category_id') else None
    start_movies = request.GET.get('start_movies') if request.GET.get('start_movies') else ''
    start_series = request.GET.get('start_series') if request.GET.get('start_series') else ''
    length = int(request.GET.get('length')) if request.GET.get('length') else None

    category = Category.objects.get(pk=category_id)
    if not length:
        length = category.previews_length
    response = []
    if category_id and length and (start_movies != '' or start_series != ''):
        start_movies = int(start_movies)
        start_series = int(start_series)
        cache_key = '%s-%d-%d-%d' % (category_id, start_movies, start_series, length)
        media = cache.get(cache_key)
        if not media:
            movies_length, series_length = get_movies_series_share(length)
            limit_movies = start_movies + movies_length
            limit_series = start_series + series_length
            media = list(Movie.objects.raw_query({'categories': {'$elemMatch': {'id': ObjectId(category.id)}}, 'visible': True}).order_by('-id')[start_movies:limit_movies])
            series = list(Series.objects.raw_query({'categories': {'$elemMatch': {'id': ObjectId(category.id)}}, 'visible': True}).order_by('-id')[start_series:limit_series])
            media.extend(series)
            cache.set(cache_key, media, 8 * 3600)
        if request.GET.get('shuffle'):
            shuffle(media)
        response = [item.to_dict() for item in media]
    return HttpResponse(
        json.dumps(response),
        'content-type: text/json'
    )
Example #11
0
def get_recommended_for_single_category(request, *args, **kwargs):
    category_id = request.GET.get('category_id')
    response = []
    if category_id:
        category = Category.objects.get(pk=category_id)
        member = request.user
        cache_key = member.email + ':recommended-' + category_id
        recommended = cache.get(cache_key)
        if not recommended:
            exclude_list_keys = cache.get(member.email + ':' + EXCLUDE_LIST_KEYS_KEY)
            exclude_list = []
            if not exclude_list_keys:
                exclude_list_keys = set()
            else:
                for key in exclude_list_keys:
                    items = cache.get(key)
                    if items:
                        exclude_list.extend(items)
            recommended = get_recommended_for_category(category, category.previews_length, exclude_list)
            exclude_list_keys.add(cache_key)
            cache.set(cache_key, recommended)
            cache.set(member.email + ':' + EXCLUDE_LIST_KEYS_KEY, exclude_list_keys)
        response = [item.to_dict() for item in recommended]
    return HttpResponse(
        json.dumps(response),
        'content-type: text/json'
    )
Example #12
0
def fetch_contact_field_results(org, contact_field, segment):
    from ureport.polls.models import CACHE_ORG_FIELD_DATA_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
    from ureport.polls.models import UREPORT_RUN_FETCHED_DATA_CACHE_TIME

    start = time.time()
    print "Fetching  %s for %s with segment %s" % (contact_field, org.name, segment)

    cache_time = UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
    if segment and segment.get('location', "") == "District":
        cache_time = UREPORT_RUN_FETCHED_DATA_CACHE_TIME

    try:
        segment = substitute_segment(org, segment)

        this_time = datetime.now()

        temba_client = org.get_temba_client()
        client_results = temba_client.get_results(contact_field=contact_field, segment=segment)

        results_data = temba_client_flow_results_serializer(client_results)
        cleaned_results_data = results_data

        print "Fetch took %ss" % (time.time() - start)

        key = CACHE_ORG_FIELD_DATA_KEY % (org.pk, slugify(unicode(contact_field)), slugify(unicode(segment)))
        cache.set(key, {'time': datetime_to_ms(this_time), 'results': cleaned_results_data}, cache_time)
    except:
        client.captureException()
        import traceback
        traceback.print_exc()
Example #13
0
 def test_filefield_pickling(self):
     # Push an object into the cache to make sure it pickles properly
     obj = Storage()
     obj.normal.save("django_test.txt", ContentFile("more content"))
     obj.normal.close()
     cache.set("obj", obj)
     self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
Example #14
0
def fetch_reporter_group(org):
    start = time.time()
    print "Fetching reporter group for %s" % org.name
    try:
        from ureport.polls.models import CACHE_ORG_REPORTER_GROUP_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

        this_time = datetime.now()

        reporter_group = org.get_config('reporter_group')
        if reporter_group:
            temba_client = org.get_temba_client()
            groups = temba_client.get_groups(name=reporter_group)

            key = CACHE_ORG_REPORTER_GROUP_KEY % (org.pk, slugify(unicode(reporter_group)))
            group_dict = dict()
            if groups:
                group = groups[0]
                group_dict = dict(size=group.size, name=group.name, uuid=group.uuid)
            cache.set(key,
                      {'time': datetime_to_ms(this_time), 'results': group_dict},
                      UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
    except:
        client.captureException()
        import traceback
        traceback.print_exc()
    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    print "Fetch %s reporter group took %ss" % (org.name, time.time() - start)
Example #15
0
def fetch_old_sites_count():
    import requests, re
    from ureport.polls.models import UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    this_time = datetime.now()
    linked_sites = list(getattr(settings, 'PREVIOUS_ORG_SITES', []))

    for site in linked_sites:
        count_link = site.get('count_link', "")
        if count_link:
            try:
                response = requests.get(count_link)
                response.raise_for_status()

                count = int(re.search(r'\d+', response.content).group())
                key = "org:%s:reporters:%s" % (site.get('name').lower(), 'old-site')
                cache.set(key,
                          {'time': datetime_to_ms(this_time), 'results': dict(size=count)},
                          UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
            except:
                import traceback
                traceback.print_exc()

    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    print "Fetch old sites counts took %ss" % (time.time() - start)
Example #16
0
 def set(cls, addon, using=None):
     q = (Review.objects.latest().filter(addon=addon).using(using)
          .values_list('rating').annotate(models.Count('rating')))
     counts = dict(q)
     ratings = [(rating, counts.get(rating, 0)) for rating in range(1, 6)]
     two_days = 60 * 60 * 24 * 2
     cache.set(cls.key(addon), ratings, two_days)
Example #17
0
def render_makeup_form(request, item=None):
    """ This function renders form for selling makeup. """

    initial = {}
    data = {
        'title': Makeup._meta.verbose_name,
        'policy_url': POLICY_URL
    }

    loc = request.COOKIES.get('ads_location', None)
    if item is None:
        if request.user.is_authenticated() and request.user.profile.phone:
            initial['phone'] = request.user.profile.phone
        if loc is not None:
            for k, v in settings.LOCATION:
                if loc == k and k != 'all':
                    initial['location'] = loc
    
    result = extra.get_form_or_err(request, forms.MakeupForm, item, **initial)
    if result.has_key('url'):
        return HttpResponseRedirect(result['url'])
    else:
        data.update(result)

    cache_key = 'makeup_feat_items_%s' % loc
    data['featured_items'] = cache.get(cache_key)

    if not data['featured_items']:
        data['featured_items'] = Makeup.get_featured(loc)
        cache.set(cache_key, data['featured_items'], 900)

    return render(request, FORM_TMPL, data, 
                  context_instance=RequestContext(request))
Example #18
0
    def dispatch(self, request, *args, **kwargs):
        self.request = request
        self.args = args
        self.kwargs = kwargs

        if (self.response and not isinstance(self.response, HttpResponse)
           and not callable(self.response)):
            raise TypeError("The `response` keyword argument must "
                            "be a either HttpResponse instance or "
                            "callable with `request` argument")

        if self.request.method == self.method:
            key = self.cache_key()

            if cache.get(key):
                if callable(self.response):
                    return self.response(request)

                elif self.response:
                    return self.response

                else:
                    return HttpResponseForbidden('Try slowing down a little.')

            cache.set(key, 1, self.duration)

        return super(ThrottleMixin, self).dispatch(request, *args, **kwargs)
Example #19
0
 def get_by_name(cls, name):
     key = cls.get_cache_key(name)
     value = cache.get(key, CACHE_MISS)
     if value is CACHE_MISS:
         value = Configuration.__get_by_name(name)
         cache.set(key, value)
     return value
Example #20
0
def brand_tree(category=None):
    """
    Creates an unordered list of the brands.

    Example::

        <ul>
            <li>Books
                <ul>
                <li>Science Fiction
                    <ul>
                    <li>Space stories</li>
                    <li>Robot stories</li>
                    </ul>
                </li>
                <li>Non-fiction</li>
                </ul>
        </ul>
    """
    key = 'shop_tree_%s' % category.slug
    if cache.get(key):
        brands = cache.get(key)
    else:
        if category:
            brands = Brand.objects.filter(categories__slug=category.slug)
        else:
            brands = Brand.objects.all()
        cache.set(key, brands, 86000)
    return {"brands": brands, "category": category.slug}
    def get_lock(self):
        lock = cache.get(self.lock_key)
        if not lock:
            cache.set(self.lock_key, True, 60)
            return True

        return False
Example #22
0
    def run(self, name, local, remote, cache_key, **kwargs):
        """
        The main work horse of the transfer task. Calls the transfer
        method with the local and remote storage backends as given
        with the parameters.

        :param name: name of the file to transfer
        :type name: str
        :param local: local storage class to transfer from
        :type local: dotted import path or :class:`~django:django.core.files.storage.Storage` subclass
        :param remote: remote storage class to transfer to
        :type remote: dotted import path or :class:`~django:django.core.files.storage.Storage` subclass
        :param cache_key: cache key to set after a successful transfer
        :type cache_key: str
        :rtype: task result
        """
        result = self.transfer(name, local, remote, **kwargs)

        if result is True:
            cache.set(cache_key, True)
        elif result is False:
            self.retry(name, local, remote, cache_key, **kwargs)
        else:
            raise ValueError("Task '%s' did not return True/False but %s" %
                             (self.__class__, result))
        return result
Example #23
0
 def get_sites (self):
   ret = cache.get(PIZZA_SITES_KEY)
   if ret is None:
     ret = Site.objects.all().values()
     cache.set(PIZZA_SITES_KEY, ret)
     
   return ret
Example #24
0
def podcast_episodes(request, id):
    podcast = get_object_or_404(Podcast, id=id)
    context = {}
    episodes = Episode.objects.filter(podcast=podcast)

    if (
        not episodes.exists()
        or episodes.count() == episodes.filter(title__isnull=True).count()
    ):
        download_episodes_task(id)

    if podcast.link is None and podcast.summary is None and podcast.subtitle is None:
        cache_key = "download_podcast_metadata:{}".format(podcast.id)
        if not cache.get(cache_key):
            download_podcast_metadata(podcast.id)
            cache.set(cache_key, True, 60)

    context["episodes"] = []
    for episode in episodes.order_by("-published"):
        context["episodes"].append(
            {
                "duration": episode.duration,
                "published": episode.published,
                "title": episode.title,
                "summary": episode.summary,
                "guid": episode.guid,
            }
        )
    return http.JsonResponse(context)
Example #25
0
    def get_from_cache(self, key):
        """
        Fetches the snippet from cache.

        Returns a `Snippet` object or `None`.

        This method addes every queried key to the cache to ensure that misses
        doesn't continue to generate database lookups. Since `None` is the
        default return value for a cache miss, the method uses -1 as the miss
        value. If this is returned we know that the value should not be present
        in the database, either.
        """
        snippet = cache.get('snippet:{0}'.format(key))

        if snippet == -1:
            return None

        if snippet is None:
            try:
                snippet = Snippet.objects.get(key=key)
            except Snippet.DoesNotExist:
                cache.set('snippet:{0}'.format(key), -1)
            else:
                cache.set('snippet:{0}'.format(key), snippet)

        return snippet
Example #26
0
 def balance(self):
     """return power balance"""
     balance = cache.get('district_%s_balance' % self.id)
     if not balance:
         reps = self.representative_set.select_related('house', 'party').all()
         balance = {
             'all' : {
                 'D' : 0,
                 'R' : 0,
             },
             'House' : {
                 'D' : 0,
                 'R' : 0,
             },
             'Senate' : {
                 'D' : 0,
                 'R' : 0,
             },
         }
         # do balance for all, house and senate
         for rep in reps:
             balance['all'][rep.party.code] = balance['all'][rep.party.code] + 1
             balance[rep.house.name][rep.party.code] = balance[rep.house.name][rep.party.code] + 1
         cache.set('district_%s_balance' % self.id, balance)
     return balance
Example #27
0
 def finish(self, file_id):
     file_id = int(file_id)
     newfiles = dict([(k, v) for (k, v) in self.files().items()
                      if k != file_id])
     cache.set(self.file_key, newfiles)
     if not newfiles:
         cache.delete(self.version_key)
Example #28
0
 def default_site (self):
   ret = cache.get(PIZZA_DEFAULT_SITE_KEY)
   if ret is None:
     ret = Site.objects.filter(id=settings.SITE_ID).values()[0]
     cache.set(PIZZA_DEFAULT_SITE_KEY, ret)
     
   return ret
Example #29
0
    def __call__(self, request, *args, **kwargs):
        is_json = self.feed_type is JSONGenerator
        jsoncallback = request.GET.get('jsoncallback')
        is_jsonp = is_json and bool(jsoncallback)
        vary = (
            is_json,
            is_jsonp,
            request.GET.get('count'),
            request.GET.get('startIndex'),
            # We need to vary on start-index as well since
            # :meth:`_get_opensearch_data` uses it as an alternate source for
            # startIndex.
            request.GET.get('start-index'),
            request.GET.get('startPage'),
            repr(args),
            repr(kwargs),
        )
        cache_key = self._get_cache_key(request, vary)

        response = cache.get(cache_key)
        if response is None:
            response = super(BaseVideosFeed, self).__call__(request,
                                                            *args, **kwargs)
            if is_jsonp:
                response = HttpResponse(u"%s(%s);" % (jsoncallback,
                            response.content), mimetype='text/javascript')
            cache.set(cache_key, response, 15*60)
        return response
def _get_github_events(author):
    """Retrieves all the public events for the given GitHub author.

    Events are retrieved from GitHub's API at
    https://api.github.com/users/<user>/events

    GitHub has a rate limit of 60. In order to not exhaust the limit as
    quickly, the events are cached. A GitHub E-tag is also stored, and used
    in the header of the request so that GitHub will not count the request
    towards the rate limit if events are unchanged.
    """
    headers = {'Connection': 'close'}

    if len(author.github_etag) > 0:
        headers['If-None-Match'] = author.github_etag

    url = 'https://api.github.com/users/%s/events' % author.github_user

    response = requests.get(url, headers=headers)

    # print response

    # We didn't get a response or we've reached our GitHub limit of 60.
    if not response or int(response.headers["X-RateLimit-Remaining"]) == 0:
        return []

    if response.status_code == 200:
        # Store the etag for future use
        author.github_etag = response.headers['ETag']
        author.save()

        events = []

        for event in response.json():
            content = _build_github_event_text(event, author)
            if content is not None:
                # Construct the GitHub event post
                post = Post(content=content,
                            content_type=Post.PLAIN_TEXT,
                            visibility=Post.PRIVATE,
                            author=author,
                            publication_date=dateutil.parser.parse(event['created_at']))
                events.append(post.getJsonObj())

        # Cache these results in the event that we've reached our rate
        # limit, or we get a 304 because the events haven't changed.
        cache.set(author.user.id, events, None)
        return events
    elif response.status_code == 304:
        # Results haven't changed, let's just return the cache, if one exists,
        # otherwise, we need to get it again.
        cached = cache.get(author.user.id)
        if cached is None:
            author.github_etag = ''
            return _get_github_events(author)
        else:
            return cached
    else:
        # print 'ERROR: API at %s returned %d' % url, response.status_code
        return []
Example #31
0
def get_course_enrollment_details(course_id):
    """Get the course modes for course. Also get enrollment start and end date, invite only, etc.

    Given a course_id, return a serializable dictionary of properties describing course enrollment information.

    Args:
        course_id (str): The Course to get enrollment information for.

    Returns:
        A serializable dictionary of course enrollment information.

    Example:
        >>> get_course_enrollment_details("edX/DemoX/2014T2")
        {
            "course_id": "edX/DemoX/2014T2",
            "enrollment_end": 2014-12-20T20:18:00Z,
            "course_modes": [
                {
                    "slug": "honor",
                    "name": "Honor Code Certificate",
                    "min_price": 0,
                    "suggested_prices": "",
                    "currency": "usd",
                    "expiration_datetime": null,
                    "description": null
                }
            ],
            "enrollment_start": 2014-10-15T20:18:00Z,
            "invite_only": False
        }

    """
    cache_key = u"enrollment.course.details.{course_id}".format(
        course_id=course_id)

    cached_enrollment_data = None
    try:
        cached_enrollment_data = cache.get(cache_key)
    except Exception:
        # The cache backend could raise an exception (for example, memcache keys that contain spaces)
        log.exception(
            u"Error occurred while retrieving course enrollment details from the cache"
        )

    if cached_enrollment_data:
        log.info(u"Get enrollment data for course %s (cached)", course_id)
        return cached_enrollment_data

    course_enrollment_details = _data_api().get_course_enrollment_info(
        course_id)

    try:
        cache_time_out = getattr(settings,
                                 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
        cache.set(cache_key, course_enrollment_details, cache_time_out)
    except Exception:
        # Catch any unexpected errors during caching.
        log.exception(
            u"Error occurred while caching course enrollment details for course %s",
            course_id)
        raise errors.CourseEnrollmentError(
            u"An unexpected error occurred while retrieving course enrollment details."
        )

    log.info(u"Get enrollment data for course %s", course_id)
    return course_enrollment_details
Example #32
0
 def authentication_failed(self, request, **credentials):
     cache_key = self._get_login_attempts_cache_key(request, **credentials)
     data = cache.get(cache_key, [])
     dt = timezone.now()
     data.append(time.mktime(dt.timetuple()))
     cache.set(cache_key, data, app_settings.LOGIN_ATTEMPTS_TIMEOUT)
Example #33
0
 def set(self, key, value):
     cache.set(key, value, self.expires)
def user_data(request, pk=None, *args, **kwargs):
    UserModel = get_user_model()

    try:
        user = UserModel.objects.get(username=pk)
    except UserModel.DoesNotExist:
        raise exceptions.NotFound(_('User does not exist'))

    lang = get_language() or 'en'

    def entry_to_data(entry):
        return {
            'id': entry.pk,
            'title': entry.title,
            'subtitle': entry.subtitle or None,
            'type': entry.type.get('label').get(lang),
            'role': entry.owner_role_display,
            'location': entry.location_display,
            'year': entry.year_display,
        }

    def to_data_dict(label, data, sort=True):
        if sort:
            data = sorted(data, key=lambda x: x.get('year') or '0000', reverse=True) if data else []
        return {
            'label': label,
            'data': data,
        }

    published_entries_query = Entry.objects.filter(owner=user, published=True, type__isnull=False,).filter(
        Q(data__contains={'architecture': [{'source': user.username}]})
        | Q(data__contains={'authors': [{'source': user.username}]})
        | Q(data__contains={'artists': [{'source': user.username}]})
        | Q(data__contains={'winners': [{'source': user.username}]})
        | Q(data__contains={'granted_by': [{'source': user.username}]})
        | Q(data__contains={'jury': [{'source': user.username}]})
        | Q(data__contains={'music': [{'source': user.username}]})
        | Q(data__contains={'conductors': [{'source': user.username}]})
        | Q(data__contains={'composition': [{'source': user.username}]})
        | Q(data__contains={'organisers': [{'source': user.username}]})
        | Q(data__contains={'lecturers': [{'source': user.username}]})
        | Q(data__contains={'design': [{'source': user.username}]})
        | Q(data__contains={'commissions': [{'source': user.username}]})
        | Q(data__contains={'editors': [{'source': user.username}]})
        | Q(data__contains={'publishers': [{'source': user.username}]})
        | Q(data__contains={'curators': [{'source': user.username}]})
        | Q(data__contains={'fellow_scholar': [{'source': user.username}]})
        | Q(data__contains={'funding': [{'source': user.username}]})
        | Q(data__contains={'organisations': [{'source': user.username}]})
        | Q(data__contains={'project_lead': [{'source': user.username}]})
        | Q(data__contains={'project_partnership': [{'source': user.username}]})
        | Q(data__contains={'software_developers': [{'source': user.username}]})
        | Q(data__contains={'directors': [{'source': user.username}]})
        | Q(data__contains={'contributors': [{'source': user.username}]})
    )

    cache_key = f'user_data__{pk}_{lang}'

    cache_time, entries_count, usr_data = cache.get(cache_key, (None, None, None))

    if cache_time:
        last_modified = published_entries_query.aggregate(Max('date_changed'))['date_changed__max']
        if last_modified and last_modified < cache_time and entries_count == published_entries_query.count():
            return Response(usr_data)

    title_key = 'title'
    subtitle_key = 'subtitle'
    type_key = 'type'
    role_key = 'role'
    location_key = 'location'
    year_key = 'year'

    usr_data = {
        'entry_labels': {
            title_key: get_preflabel('title'),
            subtitle_key: get_preflabel('subtitle'),
            type_key: get_preflabel('type'),
            role_key: get_preflabel('role'),
            location_key: get_preflabel('location'),
            year_key: get_preflabel('year'),
        },
        'data': [],
    }

    document_schema = DocumentSchema()
    conference_schema = ConferenceSchema()
    event_schema = EventSchema()
    publications_label = get_altlabel_collection('collection_document_publication', lang=lang)
    monographs_label = get_altlabel_collection('collection_monograph', lang=lang)
    monographs_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_monograph')
    monographs_data = []
    composite_volumes_label = get_altlabel_collection('collection_composite_volume', lang=lang)
    composite_volumes_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_composite_volume'
    )
    composite_volumes_data = []
    articles_label = get_altlabel_collection('collection_article', lang=lang)
    articles_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_article')
    articles_data = []
    chapters_label = get_altlabel_collection('collection_chapter', lang=lang)
    chapters_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_chapter')
    chapters_data = []
    reviews_label = get_altlabel_collection('collection_review', lang=lang)
    reviews_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_review')
    reviews_data = []
    general_documents_publications_label = get_altlabel_collection(
        'collection_general_document_publication', lang=lang
    )
    general_documents_publications_data = []
    research_projects_label = get_altlabel_collection('collection_research_project', lang=lang)
    research_projects_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_research_project'
    )
    research_projects_data = []
    awards_and_grants_label = get_altlabel_collection('collection_awards_and_grants', lang=lang)
    awards_and_grants_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_awards_and_grants'
    )
    awards_and_grants_data = []
    fellowships_visiting_affiliations_label = get_altlabel_collection(
        'collection_fellowship_visiting_affiliation',
        lang=lang,
    )
    fellowships_visiting_affiliations_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_fellowship_visiting_affiliation'
    )
    fellowships_visiting_affiliations_data = []
    exhibitions_label = get_altlabel_collection('collection_exhibition', lang=lang)
    exhibitions_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_exhibition')
    exhibitions_data = []
    supervisions_of_theses_label = get_altlabel_collection('collection_supervision_of_theses', lang=lang)
    supervisions_of_theses_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_supervision_of_theses'
    )
    supervisions_of_theses_data = []
    teaching_label = get_altlabel_collection('collection_teaching', lang=lang)
    teaching_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_teaching')
    teaching_data = []
    education_qualifications_label = get_altlabel_collection('collection_education_qualification', lang=lang)
    education_qualifications_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_education_qualification'
    )
    education_qualifications_data = []
    conferences_symposiums_label = get_altlabel_collection('collection_conference_symposium', lang=lang)
    conferences_symposiums_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_conference'
    )
    conferences_symposiums_data = []
    conference_contributions_label = get_altlabel_collection('collection_conference_contribution', lang=lang)
    conference_contributions_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_conference_contribution'
    )
    conference_contributions_data = []
    architectures_label = get_altlabel_collection('collection_architecture', lang=lang)
    architectures_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_architecture')
    architectures_data = []
    audios_label = get_altlabel_collection('collection_audio', lang=lang)
    audios_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_audio')
    audios_data = []
    concerts_label = get_altlabel_collection('collection_concert', lang=lang)
    concerts_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_concert')
    concerts_data = []
    design_label = get_altlabel_collection('collection_design', lang=lang)
    design_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_design')
    design_data = []
    events_label = get_altlabel_collection('collection_event', lang=lang)
    events_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_event')
    visual_and_verbal_presentations_label = get_altlabel_collection('collection_visual_verbal_presentation', lang=lang)
    visual_and_verbal_presentations_types = get_collection_members(
        'http://base.uni-ak.ac.at/portfolio/taxonomy/collection_visual_verbal_presentation'
    )
    visual_and_verbal_presentations_data = []
    memberships_label = get_altlabel_collection('collection_membership ', lang=lang)
    memberships_data = []
    expert_functions_label = get_altlabel_collection('collection_expert_function ', lang=lang)
    expert_functions_data = []
    general_activities_label = get_altlabel_collection('collection_general_activity ', lang=lang)
    general_activities_data = []
    festivals_label = get_altlabel_collection('collection_festival', lang=lang)
    festivals_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_festival')
    festivals_data = []
    images_label = get_altlabel_collection('collection_image', lang=lang)
    images_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_image')
    images_data = []
    performances_label = get_altlabel_collection('collection_performance', lang=lang)
    performances_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_performance')
    performances_data = []
    sculptures_label = get_altlabel_collection('collection_sculpture', lang=lang)
    sculptures_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_sculpture')
    sculptures_data = []
    software_label = get_altlabel_collection('collection_software', lang=lang)
    software_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_software')
    software_data = []
    videos_label = get_altlabel_collection('collection_film_video', lang=lang)
    videos_types = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_film_video')
    videos_data = []
    general_publications_label = 'Sonstige Veröffentlichungen' if lang == 'de' else 'General Publications'
    general_publications_data = []

    published_entries = published_entries_query.order_by('title')

    for e in published_entries:
        entry_type = e.type.get('source')
        # Publications
        if entry_type in DOCUMENT_TYPES:
            e_data = document_schema.load(e.data).data
            # Monographs
            if (
                entry_type in monographs_types
                and e_data.authors is not None
                and any(i.source == user.username for i in e_data.authors)
            ):
                monographs_data.append(entry_to_data(e))
            # Composite Volumes
            elif (
                entry_type in composite_volumes_types
                and e_data.editors is not None
                and any(i.source == user.username for i in e_data.editors)
            ):
                composite_volumes_data.append(entry_to_data(e))
            # Articles
            elif (
                entry_type in articles_types
                and e_data.authors is not None
                and any(i.source == user.username for i in e_data.authors)
            ):
                articles_data.append(entry_to_data(e))
            # Chapters
            elif (
                entry_type in chapters_types
                and e_data.authors is not None
                and any(i.source == user.username for i in e_data.authors)
            ):
                chapters_data.append(entry_to_data(e))
            # Reviews
            elif (
                entry_type in reviews_types
                and e_data.authors is not None
                and any(i.source == user.username for i in e_data.authors)
            ):
                reviews_data.append(entry_to_data(e))
            # Supervisions of theses
            elif (
                entry_type in supervisions_of_theses_types
                and e_data.contributors is not None
                and any(
                    i.source == user.username
                    and i.roles is not None
                    and any(
                        r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/expertizing'
                        or r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/supervisor'
                        for r in i.roles
                    )
                    for i in e_data.contributors
                )
            ):
                supervisions_of_theses_data.append(entry_to_data(e))
            # General Documents/Publications
            else:
                general_documents_publications_data.append(entry_to_data(e))
        elif entry_type in conferences_symposiums_types:
            e_data = conference_schema.load(e.data).data
            # Teaching
            if (
                entry_type in teaching_types + education_qualifications_types
                and e_data.lecturers is not None
                and any(i.source == user.username for i in e_data.lecturers)
            ):
                teaching_data.append(entry_to_data(e))
            # Education & Qualifications
            elif (
                entry_type in education_qualifications_types
                and e_data.contributors is not None
                and any(
                    i.source == user.username
                    and i.roles is not None
                    and any(r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/attendance' for r in i.roles)
                    for i in e_data.contributors
                )
            ):
                education_qualifications_data.append(entry_to_data(e))
            # Conferences & Symposiums
            else:
                conferences_symposiums_data.append(entry_to_data(e))
        elif entry_type in events_types:
            e_data = event_schema.load(e.data).data
            # Visual and verbal presentations
            if entry_type in visual_and_verbal_presentations_types:
                visual_and_verbal_presentations_data.append(entry_to_data(e))
            # Memberships
            elif e_data.contributors is not None and any(
                i.source == user.username
                and i.roles is not None
                and any(
                    r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/member'
                    or r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/board_member'
                    or r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/advisory_board'
                    for r in i.roles
                )
                for i in e_data.contributors
            ):
                memberships_data.append(entry_to_data(e))
            # Expert Functions
            elif e_data.contributors is not None and any(
                i.source == user.username
                and i.roles is not None
                and any(r.source == 'http://base.uni-ak.ac.at/portfolio/vocabulary/expertizing' for r in i.roles)
                for i in e_data.contributors
            ):
                expert_functions_data.append(entry_to_data(e))
            # General Activities
            else:
                general_activities_data.append(entry_to_data(e))
        # Research Projects
        elif entry_type in research_projects_types:
            research_projects_data.append(entry_to_data(e))
        # Awards and Grants
        elif entry_type in awards_and_grants_types:
            awards_and_grants_data.append(entry_to_data(e))
        # Fellowships and visiting affiliations
        elif entry_type in fellowships_visiting_affiliations_types:
            fellowships_visiting_affiliations_data.append(entry_to_data(e))
        # Exhibitions
        elif entry_type in exhibitions_types:
            exhibitions_data.append(entry_to_data(e))
        # Conference contributons
        elif entry_type in conference_contributions_types:
            conference_contributions_data.append(entry_to_data(e))
        # Architectures
        elif entry_type in architectures_types:
            architectures_data.append(entry_to_data(e))
        # Audios
        elif entry_type in audios_types:
            audios_data.append(entry_to_data(e))
        # Concerts
        elif entry_type in concerts_types:
            concerts_data.append(entry_to_data(e))
        # Design
        elif entry_type in design_types:
            design_data.append(entry_to_data(e))
        # Festivals
        elif entry_type in festivals_types:
            festivals_data.append(entry_to_data(e))
        # Images
        elif entry_type in images_types:
            images_data.append(entry_to_data(e))
        # Performances
        elif entry_type in performances_types:
            performances_data.append(entry_to_data(e))
        # Sculptures
        elif entry_type in sculptures_types:
            sculptures_data.append(entry_to_data(e))
        # Software
        elif entry_type in software_types:
            software_data.append(entry_to_data(e))
        # Films/Videos
        elif entry_type in videos_types:
            videos_data.append(entry_to_data(e))
        # General Publications
        else:
            general_publications_data.append(e)

    # Publications
    publications_data = []

    for lbl, d in [
        (monographs_label, monographs_data),
        (composite_volumes_label, composite_volumes_data),
        (articles_label, articles_data),
        (chapters_label, chapters_data),
        (reviews_label, reviews_data),
        (general_documents_publications_label, general_documents_publications_data),
    ]:
        if d:
            publications_data.append(to_data_dict(lbl, d))

    # Teaching
    teaching_collected_data = []

    for lbl, d in [
        (supervisions_of_theses_label, supervisions_of_theses_data),
        (teaching_label, teaching_data),
    ]:
        if d:
            teaching_collected_data.append(to_data_dict(lbl, d))

    # Activities
    activities_data = []

    for lbl, d in [
        (memberships_label, memberships_data),
        (expert_functions_label, expert_functions_data),
        (visual_and_verbal_presentations_label, visual_and_verbal_presentations_data),
        (general_activities_label, general_activities_data),
    ]:
        if d:
            activities_data.append(to_data_dict(lbl, d))

    # Create return data in desired order
    for lbl, d, sort in (
        (publications_label, publications_data, False),
        (research_projects_label, research_projects_data, True),
        (awards_and_grants_label, awards_and_grants_data, True),
        (fellowships_visiting_affiliations_label, fellowships_visiting_affiliations_data, True),
        (exhibitions_label, exhibitions_data, True),
        (teaching_label, teaching_collected_data, False),
        (conferences_symposiums_label, conferences_symposiums_data, True),
        (conference_contributions_label, conference_contributions_data, True),
        (architectures_label, architectures_data, True),
        (audios_label, audios_data, True),
        (concerts_label, concerts_data, True),
        (design_label, design_data, True),
        (education_qualifications_label, education_qualifications_data, True),
        (events_label, activities_data, False),
        (festivals_label, festivals_data, True),
        (images_label, images_data, True),
        (performances_label, performances_data, True),
        (sculptures_label, sculptures_data, True),
        (software_label, software_data, True),
        (videos_label, videos_data, True),
        (general_publications_label, general_publications_data, True),
    ):
        if d:
            usr_data['data'].append(to_data_dict(lbl, d, sort=sort))

    usr_data = usr_data if usr_data['data'] else {'data': []}

    entries_count = published_entries_query.count()

    cache.set(cache_key, (timezone.now(), entries_count, usr_data), 86400)

    return Response(usr_data)
Example #35
0
def get_product_filters(category, product_filter, price_filter, sorting):
    """Returns the next product filters based on products which are in the given
    category and within the result set of the current filters.
    """
    if price_filter:
        ck_price_filter = "%s|%s" % (price_filter["min"], price_filter["max"])
    else:
        ck_price_filter = ""

    if product_filter:
        ck_product_filter = ""
        for pf in product_filter:
            ck_product_filter += pf[0] + "|"
            ck_product_filter += "|".join(pf[1])
    else:
        ck_product_filter = ""

    cache_key = "%s-productfilters-%s-%s-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX,
        category.slug, ck_product_filter, ck_price_filter, sorting)

    result = cache.get(cache_key)
    if result is not None:
        return result

    properties_mapping = get_property_mapping()
    options_mapping = get_option_mapping()

    # The base for the calulation of the next filters are the filtered products
    products = get_filtered_products_for_category(
        category, product_filter, price_filter, sorting)
    if not products:
        return []

    # ... and their variants
    all_products = []
    for product in products:
        all_products.append(product)
        all_products.extend(product.variants.filter(active=True))

    # Get the ids for use within the customer SQL
    product_ids = ", ".join([str(p.id) for p in all_products])

    # Create dict out of already set filters
    set_filters = dict(product_filter)

    cursor = connection.cursor()
    cursor.execute("""SELECT DISTINCT property_id
                      FROM catalog_productpropertyvalue""")

    property_ids = ", ".join([str(p[0]) for p in cursor.fetchall()])

    # if there is either no products or no property ids there can also be no
    # product filters.
    if not product_ids or not property_ids:
        return []

    result = []
    ########## Number Fields ###################################################

    cursor = connection.cursor()
    cursor.execute("""SELECT property_id, min(value_as_float), max(value_as_float)
                      FROM catalog_productpropertyvalue
                      WHERE type=%s
                      AND product_id IN (%s)
                      AND property_id IN (%s)
                      GROUP BY property_id""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))

    for row in cursor.fetchall():

        property = properties_mapping[row[0]]

        if not property.is_number_field or not property.filterable:
            continue

        # If the filter for a property is already set, we display only the
        # set filter.
        if str(row[0]) in set_filters.keys():
            values = set_filters[str(row[0])]
            result.append({
                "id": row[0],
                "position": property.position,
                "object": property,
                "name": property.name,
                "title": property.title,
                "unit": property.unit,
                "items": [{"min": float(values[0]), "max": float(values[1])}],
                "show_reset": True,
                "show_quantity": False,
            })
            continue

        # Otherwise we display all steps.
        items = _calculate_steps(product_ids, property, row[1], row[2])

        result.append({
            "id": row[0],
            "position": property.position,
            "object": property,
            "name": property.name,
            "title": property.title,
            "unit": property.unit,
            "show_reset": False,
            "show_quantity": True,
            "items": items,
        })

    ########## Select Fields ###################################################
    # Count entries for current filter
    cursor = connection.cursor()
    cursor.execute("""SELECT property_id, value, parent_id
                      FROM catalog_productpropertyvalue
                      WHERE type=%s
                      AND product_id IN (%s)
                      AND property_id IN (%s)""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))

    already_count = {}
    amount = {}
    for row in cursor.fetchall():
        # We count a property/value pair just one time per *product*. For
        # "products with variants" this could be stored several times within the
        # catalog_productpropertyvalue. Imagine a variant with two properties
        # color and size:
        #   v1 = color:red / size: s
        #   v2 = color:red / size: l
        # But we want to count color:red just one time. As the product with
        # variants is displayed at not the variants.

        if "%s%s%s" % (row[2], row[0], row[1]) in already_count:
            continue
        already_count["%s%s%s" % (row[2], row[0], row[1])] = 1

        if row[0] not in amount:
            amount[row[0]] = {}

        if row[1] not in amount[row[0]]:
            amount[row[0]][row[1]] = 0

        amount[row[0]][row[1]] += 1

    cursor.execute("""SELECT property_id, value
                      FROM catalog_productpropertyvalue
                      WHERE product_id IN (%s)
                      AND property_id IN (%s)
                      AND type=%s
                      GROUP BY property_id, value""" % (product_ids, property_ids, PROPERTY_VALUE_TYPE_FILTER))

    # Group properties and values (for displaying)
    set_filters = dict(product_filter)
    properties = {}
    for row in cursor.fetchall():

        property = properties_mapping[row[0]]

        if property.is_number_field or not property.filterable or not row[1]:
            continue

        if row[0] in properties == False:
            properties[row[0]] = []

        # If the property is a select field we want to display the name of the
        # option instead of the id.
        position = 1
        if property.is_select_field:
            try:
                name = options_mapping[row[1]].name
                position = options_mapping[row[1]].position
            except KeyError:
                name = row[1]
        else:
            name = row[1]

        value = row[1]

        # if the property within the set filters we just show the selected value
        if str(row[0]) in set_filters.keys():
            if str(row[1]) in set_filters.values():
                properties[row[0]] = [{
                    "id": row[0],
                    "value": value,
                    "name": name,
                    "position": position,
                    "quantity": amount[row[0]][row[1]],
                    "show_quantity": False,
                }]
            continue
        else:
            if not row[0] in properties:
                properties[row[0]] = []
            properties[row[0]].append({
                "id": row[0],
                "value": value,
                "name": name,
                "position": position,
                "quantity": amount[row[0]][row[1]],
                "show_quantity": True,
            })

    # Transform the group properties into a list of dicts
    set_filter_keys = set_filters.keys()

    for property_id, values in properties.items():

        property = properties_mapping[property_id]

        # Sort the values. NOTE: This has to be done here (and not via SQL) as
        # the value field of the property is a char field and can't ordered
        # properly for numbers.
        values.sort(lambda a, b: cmp(a["position"], b["position"]))

        result.append({
            "id": property_id,
            "position": property.position,
            "unit": property.unit,
            "show_reset": str(property_id) in set_filter_keys,
            "name": property.name,
            "title": property.title,
            "items": values,
        })

    result.sort(lambda a, b: cmp(a["position"], b["position"]))
    cache.set(cache_key, result)

    return result
Example #36
0
    def get(self, request):
        """提供省市区数据查询"""
        # """获取查询参数area_id"""
        area_id = request.GET.get('area_id')
        # 判断area_id有没有值,如果没有值说明要查询所有省
        if area_id is None:
            # """查询所有省数据""""
            # 当要查询所有省数据时 ,先尝试性的去redis中查询,如果没有再去sql查询
            province_list = cache.get('province_list')
            if not province_list:

                province_qs = Area.objects.filter(parent=None)
                # 把查询集中的模型对象转换成字典格式
                province_list = []  #用来装每一个省的字典数据
                for province_model in province_qs:
                    province_list.append({
                        'id': province_model.id,
                        'name': province_model.name
                    })

                cache.set('province_list', province_list, 3600)
                # 为了避免频繁访问mysql数据库,提升访问速度
            # 响应
            return http.JsonResponse({
                'code': RETCODE.OK,
                'errmsg': 'OK',
                'province_list': province_list
            })
        else:
            """查询指定省下面的所有市或者指定市下面的所有区数据"""
            # 获取指定省或市的缓存数据
            sub_data = cache.get('sub_area' + area_id)
            if sub_data is None:

                # 把当前area_id指定的单个省或者市查询出来
                try:
                    parent_model = Area.objects.get(id=area_id)
                except Area.DoesNotExist:
                    return http.JsonResponse({
                        'code': RETCODE.PARAMERR,
                        'errmsg': 'area_id不存在'
                    })

                # 再通过当个省或市查询出它的下级所有行政区
                # parent_model.parent 直接外建代表拿到一的那方模型
                subs_qs = parent_model.subs.all()  #通过一查询多时,需要再后面多写一个all
                # 定义一个列表变量用来包装所有下级行政区的字典数据
                sub_list = []
                # 遍历行政区查询集,把每个模型转换成字典
                for sub_model in subs_qs:
                    sub_list.append({
                        'id': sub_model.id,
                        'name': sub_model.name
                    })
                    # 包装好响应数据
                sub_data = {
                    'id': parent_model.id,
                    'name': parent_model.name,
                    'subs': sub_list
                }

                # 把当前数据进行缓存
                cache.set('sub_area_' + area_id, sub_data, 3600)

                # 响应
            return http.JsonResponse({
                'code': RETCODE.OK,
                'errmsg': 'OK',
                'sub_data': sub_data
            })
Example #37
0
def initchannel(channel_name):
    cache.set('channel_name', channel_name, 60 * 60)
Example #38
0
def blog_feeds():
    blogs = cache.get('blogs')
    if not blogs:
        blogs = Blog.objects.all()
        cache.set('blogs', list(blogs))
    return {'blogs': blogs}
Example #39
0
 def save(self, message, time=60 * 5):
     cache.set(self.key, message, time)
Example #40
0
def _dirty_cache_and_prune_messages(instance=None, **kwargs):
    cache.set('osso.userchat.channel%d' % instance.channel.id, instance.id,
              CACHE_TIME)
    instance.channel.prune()
Example #41
0
 def test_1_store(self):
     """Store a value in the cache."""
     cache.set(CACHE_KEY, 'smoo')
Example #42
0
 def save(self, time=60):
     cache.set(self.cache_key(), self.data, time)
Example #43
0
def set_cached_value(obj, fn, value, timeout=settings.OBJECT_CACHE_TIMEOUT):
    key = iri_to_uri(obj.get_cachekey() + ":" + fn)
    return cache.set(key, value, timeout)
Example #44
0
 def test_redis_cache(self):
     cache.set("test", 1)
     self.assertEqual(cache.get("test"), 1)
Example #45
0
    def render(self, context):
        if self.is_variable:
            real_key = template.Variable(self.key).resolve(context)
        else:
            real_key = self.key

        if isinstance(self.template_name, template.Variable):
            real_tpl = self.template_name.resolve(context)
        else:
            real_tpl = self.template_name

        context['chunk_key'] = real_key
        if self.content_type == 'edit':
            context['tag'] = self.tag
        sources = dict(text=Chunk,
                       edit=Chunk,
                       image=Image,
                       media=Media,
                       group=Group)
        model = sources[self.content_type]

        obj = None
        # try to get cached object
        if self.cache_time > 0:
            cache_key = CACHE_PREFIX + self.content_type + get_language(
            ) + real_key
            obj = cache.get(cache_key)
        # otherwise get it from database
        if obj is None:
            if self.content_type == 'group':
                obj = model.objects.filter(key=real_key)
            else:
                try:
                    obj = model.objects.get(key=real_key)
                except model.DoesNotExist:
                    # this place we should create an empty object in database
                    obj = model(key=real_key)
                    if self.content_type == 'image':
                        # image object must exist, so save the stub picture
                        filename = join(dirname(__file__), '..', 'static',
                                        'chunks', 'stub.png')
                        with open(filename, 'r') as file:
                            obj.image.save(basename(filename),
                                           File(file),
                                           save=True)
                    else:
                        obj.content = real_key
                        obj.save()

            # cache the object
            if self.cache_time == 0:
                logger.debug("Don't cache %s" % (real_key, ))
            else:
                if self.cache_time is None or self.cache_time == 'None':
                    logger.debug("Caching %s for the cache's default timeout" %
                                 real_key)
                    cache.set(cache_key, obj)
                else:
                    logger.debug("Caching %s for %s seconds" %
                                 (real_key, str(self.cache_time)))
                    cache.set(cache_key, obj, int(self.cache_time))

        # Eventually we want to pass the whole context to the template so that
        # users have the maximum of flexibility of what to do in there.
        if self.with_template:
            new_ctx = template.Context(context)
            if hasattr(obj, 'content'):
                obj.content = Template(obj.content).render(new_ctx)
            new_ctx.update({'obj': obj})
            tpl = template.loader.get_template(real_tpl)
            return tpl.render(new_ctx)
        elif hasattr(obj, 'image'):
            return obj.image.url
        elif hasattr(obj, 'content'):
            return obj.content
        else:
            return None
Example #46
0
    def top_scorers(cls,
                    days=30,
                    language=None,
                    project=None,
                    limit=5,
                    offset=0):
        """Returns users with the top scores.

        :param days: period of days to account for scores.
        :param language: limit results to the given language code.
        :param project: limit results to the given project code.
        :param limit: limit results to this number of users. Values other
            than positive numbers will return the entire result set.
        """
        cache_kwargs = {
            'days': days,
            'language': language,
            'project': project,
            'limit': limit,
            'offset': offset,
        }
        cache_key = make_method_key(cls, 'top_scorers', cache_kwargs)

        top_scorers = cache.get(cache_key, None)
        if top_scorers is not None:
            return top_scorers

        now = timezone.now()
        past = now + datetime.timedelta(-days)

        lookup_kwargs = {
            'creation_time__range': [past, now],
        }

        if language is not None:
            lookup_kwargs.update({
                'submission__translation_project__language__code':
                language,
            })

        if project is not None:
            lookup_kwargs.update({
                'submission__translation_project__project__code':
                project,
            })

        meta_user_ids = cls.objects.meta_users().values_list('id', flat=True)
        top_scores = ScoreLog.objects.values("user").filter(
            **lookup_kwargs).exclude(user__pk__in=meta_user_ids, ).annotate(
                total_score=Sum('score_delta'),
                suggested=Sum(
                    Case(When(action_code=TranslationActionCodes.SUGG_ADDED,
                              then='wordcount'),
                         default=0,
                         output_field=models.IntegerField())),
                translated=Sum(
                    Case(When(translated_wordcount__isnull=False,
                              then='translated_wordcount'),
                         default=0,
                         output_field=models.IntegerField())),
                reviewed=Sum(
                    Case(When(
                        action_code__in=[
                            TranslationActionCodes.SUGG_REVIEWED_ACCEPTED,
                            TranslationActionCodes.REVIEWED,
                            TranslationActionCodes.EDITED,
                        ],
                        translated_wordcount__isnull=True,
                        then='wordcount',
                    ),
                         default=0,
                         output_field=models.IntegerField())),
            ).order_by('-total_score')[offset:]

        if isinstance(limit, (int, long)) and limit > 0:
            top_scores = top_scores[:limit]

        users = dict((user.id, user) for user in cls.objects.filter(
            pk__in=[item['user'] for item in top_scores]))

        top_scorers = []
        for item in top_scores:
            item['user'] = users[item['user']]
            item['public_total_score'] = _humanize_score(item['total_score'])
            top_scorers.append(item)

        cache.set(cache_key, top_scorers, 60)
        return top_scorers
Example #47
0
 def set_task_error_msg(self, error_msg):
     logger.info('Set task error msg')
     cache.set(self.CACHE_KEY_LDAP_USERS_SYNC_TASK_ERROR_MSG, error_msg,
               None)
Example #48
0
 def set_cache(self):
     cache.set(self.__class__.__name__, self)
Example #49
0
 def set_users(self, users):
     logger.info('Set ldap users to cache, count: {}'.format(len(users)))
     cache.set(self.CACHE_KEY_USERS, users, None)
Example #50
0
 def cache_repost_count(self):
     self._repost_count = self.reposts.count()
     cache.set(
         self.cache_key("repost_count"), self._repost_count, settings.CACHE_EXPIRE
     )
Example #51
0
def _writelocked_store_langs(video_id, langs):
    cache_key = _video_writelocked_langs_key(video_id)
    cache.set(cache_key, langs, 5 * 60)
    return langs
Example #52
0
 def set_task_status(self, status):
     logger.info('Set task status: {}'.format(status))
     cache.set(self.CACHE_KEY_LDAP_USERS_SYNC_TASK_STATUS, status, None)
Example #53
0
def cache_set_url(hash: str, url: str) -> str:
    return cache.set('short:url:{}'.format(hash), url, 60 * 60 * 24 * 7)
Example #54
0
 def set_cache(self, token):
     key = self.CACHE_KEY_USER_RESET_PASSWORD_PREFIX.format(token)
     cache.set(key, {'id': self.id, 'email': self.email}, 3600)
Example #55
0
def get_instance(model, instance_or_pk, timeout=None, using=None):
    """
    Returns the ``model`` instance with a primary key of ``instance_or_pk``.

    If the data is cached it will be returned from there, otherwise the regular
    Django ORM is queried for this instance and the data stored in the cache.

    If omitted, the timeout value defaults to
    ``settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT`` instead of 0 (zero).

    Example::

        >>> get_instance(User, 1) # Cache miss
        <User: lamby>
        >>> get_instance(User, 1) # Cache hit
        <User: lamby>
        >>> User.objects.get(pk=1) == get_instance(User, 1)
        True

    """
    pk = getattr(instance_or_pk, 'pk', instance_or_pk)
    key = instance_key(model, instance_or_pk)
    data = cache.get(key)

    if data is not None:
        try:
            # Try and construct instance from dictionary
            instance = model(pk=pk, **data)

            # Ensure instance knows that it already exists in the database,
            # otherwise we will fail any uniqueness checks when saving the
            # instance.
            instance._state.adding = False

            # Specify database so that instance is setup correctly. We don't
            # namespace cached objects by their origin database, however.
            instance._state.db = using or DEFAULT_DB_ALIAS

            return instance
        except:
            # Error when deserialising - remove from the cache; we will
            # fallback and return the underlying instance
            cache.delete(key)

    # Use the default manager so we are never filtered by a .get_queryset()


#    import logging
#    log = logging.getLogger("tracking")
#    log.info( str(pk) )

    instance = model._default_manager.using(using).get(pk=pk)

    data = {}
    for field in instance._meta.fields:
        # Harmless to save, but saves space in the dictionary - we already know
        # the primary key when we lookup
        if field.primary_key:
            continue

        if field.get_internal_type() == 'FileField':
            # Avoid problems with serializing FileFields
            # by only serializing the file name
            file = getattr(instance, field.attname)
            data[field.attname] = file.name
        else:
            data[field.attname] = getattr(instance, field.attname)

    if timeout is None:
        timeout = app_settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT

    cache.set(key, data, timeout)

    return instance
Example #56
0
 def test_already_queued(self, switch_is_active, delay):
     switch_is_active.return_value = True
     cache.set(settings.WIKI_REBUILD_TOKEN, True)
     schedule_rebuild_kb()
     assert cache.get(settings.WIKI_REBUILD_TOKEN)
     assert not delay.called
Example #57
0
 def cache_set(key, value, timeout):
     cache.set(key, value, timeout=timeout)
Example #58
0
def cache_set_hash_taken(hash: str) -> str:
    return cache.set('hash:taken:{}'.format(hash), 1, None)
Example #59
0
 def set_cache(cls, key, value, timeout):
     result = cache.set(key, value, timeout)
     if cls.cache_database_fallback:
         DatabaseCache.set(key, value, timeout)
     return result
Example #60
0
    def get_or_create_list(self, key, paramdict, forcerender=True):
        #returns list of rendered objs
        cache = memcache.get(key)
        if cache is not None and not forcerender:
            cached_list = cache[0]
            tot_items = cache[1]
        elif cache is None or forcerender:
            if paramdict == {}:
                key, rtype, paramdict = interpret_hash(key)
            ctype_id = paramdict.get('TYPE_KEY', None)
            obj_id = paramdict.get('OBJ_KEY', None)
            start = paramdict.get('START_KEY', None)
            end = paramdict.get('END_KEY', None)
            dimension = paramdict.get('DIM_KEY', None)
            ctype_list = paramdict.get('CTYPE_KEY', None)
            phasekey = paramdict.get('PHASE_KEY', None)

            if ctype_id is not None and obj_id is not None:
                content_type = ContentType.objects.get(pk=ctype_id)
                parent = content_type.get_object_for_this_type(pk=obj_id)
            else:
                parent = None

            if start is None or end is None:
                paramdict['START_KEY'] = 0
                paramdict['END_KEY'] = 10

            if dimension is None:
                dimension = 'h'
                paramdict['DIM_KEY'] = 'hn'

            #later these functions can be rendered via some loosely coupled method
            if self.template == 'issues':
                func = get_ranked_list
                update = True
            elif self.template == 'comments':
                func = get_comments
                update = False
            elif self.template == 'yea':
                func = get_argument_list
                dimension = "yea"
                update = False
            elif self.template == 'nay':
                func = get_argument_list
                dimension = "nay"
                update = False
            elif self.template == 'children':
                func = get_ranked_list
                update = False
            elif self.template == 'topics':
                func = get_topics
                update = True
            elif self.template == 'users':
                func = get_users
                update = True
            else:
                func = get_ranked_list
                update = False
            #TODO
            #elif self.template == 'users':
            #    func = get_topics
            #    update = True

            kwr = {
                'parent': parent,
                'start': paramdict['START_KEY'],
                'end': paramdict['END_KEY'],
                'dimension': dimension,
                'ctype_list': ctype_list
            }
            if phasekey is not None:
                kwr['phase'] = phasekey
            cached_list, tot_items = func(**kwr)
            if update:
                codes = memcache.get("rank_update_codes")
                #stores all the encoded pages for tasks/update_ranks
                newkey, rendertype, paramdict = interpret_hash(key)
                if codes is not None:
                    codes[key] = paramdict
                    memcache.set("rank_update_codes", codes)
                else:
                    codes = {}
                    memcache.set("rank_update_codes", codes)
                #save newly rendered list
            memcache.set(key, (cached_list, tot_items))
        return cached_list, tot_items