示例#1
0
	def get_possible_score(self):
		if self.type == 'o':
			""" Caches question point total for every request. (The only problem would be if
				questions are changed after point total has been calculated.  But this cannot
				happen, because point total is calculated on each request, and questions can
				only be changed in separate requests.)
			"""
			if not hasattr(self, '_question_point_total'):
				self._question_point_total = None
				try:
					cache_key = "Assignment-OnlineQuiz-%d" % self.id
					quiz = cache.get(cache_key)
					if quiz is None:
						quiz = self.quiz
						cache.add(cache_key, quiz)
					
					cache_key = "OnlineQuiz-Point-Sum-%d" % quiz.id
					self._question_point_total = cache.get(cache_key)
					if self._question_point_total is None:
						question_agg = quiz.questions.aggregate(point_sum=Sum("points"))
						self._question_point_total = question_agg['point_sum'] or 0
						cache.add(cache_key, self._question_point_total)
				except:
					self._question_point_total = 0
				
			return self._question_point_total
		else:
			return self.possible_score
示例#2
0
    def cache_objects(self, objects):
        """Cache query_key => objects, then update the flush lists."""
        # Adding to the flush lists has a race condition: if simultaneous
        # processes are adding to the same list, one of the query keys will be
        # dropped.  Using redis would be safer.
        query_key = self.query_key()
        cache.add(query_key, objects, timeout=self.timeout)

        # Add this query to the flush list of each object.  We include
        # query_flush so that other things can be cached against the queryset
        # and still participate in invalidation.
        flush_keys = [o.flush_key() for o in objects]
        query_flush = flush_key(self.query_string)

        flush_lists = collections.defaultdict(list)
        for key in flush_keys:
            flush_lists[key].extend([query_key, query_flush])
        flush_lists[query_flush].append(query_key)

        # Add each object to the flush lists of its foreign keys.
        for obj in objects:
            obj_flush = obj.flush_key()
            for key in map(flush_key, obj._cache_keys()):
                if key != obj_flush:
                    flush_lists[key].append(obj_flush)
        add_to_flush_list(flush_lists)
示例#3
0
    def similar_users(self, values=False):
        """
    Returns a cached list of similar users for this user.
    """
        # UNCACHED VERSION
        # if values:
        #   Recommender.objects.get_similar_users(self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False))
        # else:
        #   return [ item[1] for item in Recommender.objects.get_similar_users(self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False))]

        # CACHED VERSION.
        cache_key = slugify(u"similar_users_%s" % self.__unicode__())
        similar_users = cache.get(cache_key)
        if similar_users == []:
            return similar_users
        if not similar_users:
            similar_users = Recommender.objects.get_similar_users(
                self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False)
            )
            cache.add(cache_key, similar_users, 7200)
            similar_users = cache.get(cache_key)
        if values:
            return similar_users
        else:
            return [item[1] for item in similar_users]
示例#4
0
    def test_unicode(self):
        # Unicode values can be cached
        stuff = {
            'ascii': 'ascii_value',
            'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
            'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
            'ascii2': {'x': 1}
        }
        # Test `set`
        for (key, value) in stuff.items():
            cache.set(key, value)
            assert cache.get(key) == value

        # Test `add`
        for (key, value) in stuff.items():
            cache.delete(key)
            cache.add(key, value)
            assert cache.get(key) == value

        # Test `set_many`
        for (key, value) in stuff.items():
            cache.delete(key)
        cache.set_many(stuff)
        for (key, value) in stuff.items():
            assert cache.get(key) == value
示例#5
0
文件: utils.py 项目: vden/TsoguNG
def direct_cast(cls):
	""" приводим тип к реальному типу потомка.
	в рекурсивной функции -- особая уличная магия определения
	верного класса для последовательно наследованных типов """

	key = "direct_cast_%s"%str(cls.id)
	data = cache.get(key)
	ocls = cls

	if data: return data

	def try_cast_by_list(d, cls):
		for c in d.keys():
			try:
				cls = eval("cls.%s"%c.lower())
				return try_cast_by_list(d[c], cls) or cls
			except:
				r = try_cast_by_list(d[c], cls)
				if r is None: continue
				return r

	cls = try_cast_by_list(TOP_CLASS_DICT, cls)
	if not cls:
		return ocls
	cache.add(key, cls, 4*60*60)
	return cls
示例#6
0
 def test_add(self):
     # A key can be added to a cache
     result = cache.add("addkey1", "value")
     assert result
     result = cache.add("addkey1", "newvalue")
     assert not result
     assert cache.get("addkey1") == "value"
示例#7
0
文件: models.py 项目: Uwanja/kitsune
    def related_documents(self):
        """Return documents that are 'morelikethis' one."""
        # Only documents in default IA categories have related.
        if self.category not in settings.IA_DEFAULT_CATEGORIES:
            return []

        # First try to get the results from the cache
        key = 'wiki_document:related_docs:%s' % self.id
        documents = cache.get(key)
        if documents:
            return documents

        try:
            documents = self.morelikethis(
                self.get_document_id(self.id),
                s=self.get_s().filter(
                    model=self.get_model_name(),
                    document_locale=self.locale,
                    document_is_archived=False,
                    document_category__in=settings.IA_DEFAULT_CATEGORIES),
                fields=[
                    'document_title',
                    'document_summary',
                    'document_content'])
            cache.add(key, documents)
        except (ESTimeoutError, ESMaxRetryError, ESException):
            documents = []

        return documents
示例#8
0
文件: utils.py 项目: vden/TsoguNG
def get_object_by_url(path_info):
	from core.models import BaseObject

	path_info = path_info.strip('/')
	hsh = "get_object_by_url_" + hashlib.md5(path_info.encode('utf8')).hexdigest()
	# проверяем наличие в кэше
	data = cache.get(hsh)
	if data:
		print "GOBU: get from cache", data.id
		return data


	head = BaseObject.objects.filter(parent__isnull=True)[0]
	current_page = head

	path = filter(lambda x: x!='', path_info.split("/"))

	for p in path:
		pages = BaseObject.objects.filter(parent=current_page)
		for i in pages:
			#TODO: оптимизировать алгоритм выбора следующей страницы,
			# иначе при паре тысяч подчиненных объектов будет ***.
			current_page = (i.slug == p) and i or None
			#	print current_page, i.slug,
			if current_page: break
		if not current_page:
			raise Http404()

	cache.add(hsh, current_page.direct_cast(), 4*60*60)
	return current_page.direct_cast()
示例#9
0
def AddBookListCache(mkey, book_list, **kw):
    cache_list = []
    dlist = []
    for book in book_list:
        try:
            book.writer
        except:
            continue
        dkey = book.writer.writer + ' ' + book.title
        if not dkey in dlist: 
            # description content
            content = GetBookContent(book, 'content.opf')
            dlist.append(dkey)
            cache_list.append({
               'id': book.id,
               'counter': (kw.get('page_num', 1)-1) * kw.get('per_page', 0) + (len(cache_list)+1),
               'writer': {'id': book.writer.id, 'writer': book.writer.writer, 'count': book.writer.count},
               'subject': {'id': book.subject.id, 'subject': book.subject.subject, 'count': book.subject.count},
               'title': book.title,
               'tr_wrt': to_translit(book.writer.writer),
               'tr_subj': to_translit(book.subject.subject),
               'tr_titl': to_translit(book.title),
               'content': truncatewords(content, 80),
               'index': book.index,
               'date': book.date })
    cache.add('books:' + str(mkey), str(cache_list))
def user_login_failure(identifier):
    cache.add(key(identifier), 0)
    count = cache.incr(key(identifier))
    if app_settings.A2_LOGIN_FAILURE_COUNT_BEFORE_WARNING and count >= app_settings.A2_LOGIN_FAILURE_COUNT_BEFORE_WARNING:
        logger = logging.getLogger('authentic2.user_login_failure')
        logger.warning(u'user %s failed to login more than %d times in a row',
                       identifier, count)
示例#11
0
 def cache_results(self, results):
     """
     Create invalidation signals for these results in the form of CacheBotSignals.
     A CacheBotSignal stores a model and it's accessor path to self.queryset.model.
     """
     # cache the results   
     invalidation_dict = {}
     if cache.add(self.result_key, results, conf.CACHE_SECONDS):
         
         invalidation_dict.update(dict([(key, self.result_key) for key in self.get_invalidation_keys(results)]))
 
         for child, negate in self.queryset._get_where_clause(self.queryset.query.where):     
             constraint, lookup_type, value_annotation, params = child                
             for model_class, accessor_path in self._get_join_paths(constraint.alias, constraint.col):
                 if self._is_valid_flush_path(accessor_path):  
                     invalidation_key = self._register_signal(model_class, accessor_path, lookup_type, negate, params)
                     invalidation_dict[invalidation_key] = self.result_key
                         
                 for join_tuple in self.queryset.query.join_map.keys():
                     if join_tuple[0] == model_class._meta.db_table and self._is_valid_flush_path(accessor_path): 
                         model_klass, m2m = self.queryset._get_model_class_from_table(join_tuple[1]) 
                         invalidation_key = self._register_signal(model_klass, join_tuple[3], lookup_type, negate, params)
                         invalidation_dict[invalidation_key] = self.result_key
         
         # need to add and append to prevent race conditions
         # replace this with batch operations later
         for flush_key, flush_list in invalidation_dict.iteritems():
             added = cache.add(flush_key, self.result_key, 0)
             if not added:
                 cache.append(flush_key, ',%s' % self.result_key)
    def test_delete(self):
        key = 'test_delete_1'
        cache.add(key, 'ololo')
        cache.delete(key)
        self.assertIsNone(cache.get(key))

        cache.delete('ololo delete key')
示例#13
0
文件: views.py 项目: mweisman/geonode
def _search(query):
    # to support super fast paging results, cache the intermediates
    results = None
    cache_time = 60
    if query.cache:
        key = query.cache_key()
        results = cache.get(key)
        if results:
            # put it back again - this basically extends the lease
            cache.add(key, results, cache_time)

    if not results:
        results = combined_search_results(query)
        facets = results['facets']
        results = apply_normalizers(results)
        if query.cache:
            dumped = zlib.compress(pickle.dumps((results, facets)))
            logger.debug("cached search results %s", len(dumped))
            cache.set(key, dumped, cache_time)

    else:
        results, facets = pickle.loads(zlib.decompress(results))

    # @todo - sorting should be done in the backend as it can optimize if
    # the query is restricted to one model. has implications for caching...
    if query.sort == 'title':
        keyfunc = lambda r: r.title().lower()
    elif query.sort == 'last_modified':
        old = datetime(1,1,1)
        keyfunc = lambda r: r.last_modified() or old
    else:
        keyfunc = lambda r: getattr(r, query.sort)()
    results.sort(key=keyfunc, reverse=not query.order)

    return results, facets
示例#14
0
 def favorite_varieties(self):
     """
 An algorithmically generated list of a user's favorite beer categories.
 """
     cache_key = slugify(u"favorite_varieties_%s" % self.__unicode__())
     favorite_varieties = cache.get(cache_key)
     if favorite_varieties == []:
         return favorite_varieties
     if not favorite_varieties:
         faves = self.user.faves.filter(withdrawn=False)
         reviews = self.user.review_created.all()
         varieties = Category.objects.all()
         favorite_varieties = {}
         for fave in faves:
             if not favorite_varieties.has_key(fave.content_object.variety):
                 favorite_varieties[fave.content_object.variety] = 5
             favorite_varieties[fave.content_object.variety] = favorite_varieties[fave.content_object.variety] + 5
         for review in reviews:
             if not favorite_varieties.has_key(review.beer.variety):
                 favorite_varieties[review.beer.variety] = 1
             if review.rating > 80:
                 favorite_varieties[review.beer.variety] = favorite_varieties[review.beer.variety] + 5
             elif review.rating > 60:
                 favorite_varieties[review.beer.variety] = favorite_varieties[review.beer.variety] + 4
             elif review.rating > 40:
                 favorite_varieties[review.beer.variety] = favorite_varieties[review.beer.variety] + 3
             elif review.rating > 20:
                 favorite_varieties[review.beer.variety] = favorite_varieties[review.beer.variety] + 2
             else:
                 favorite_varieties[review.beer.variety] = favorite_varieties[review.beer.variety] + 1
         items = [(value, key) for key, value in favorite_varieties.items()]
         items.sort(reverse=True)
         cache.add(cache_key, [item[1] for item in items], 28800)
         favorite_varieties = cache.get(cache_key)
     return favorite_varieties
示例#15
0
    def similar_beers_by_reviews(self):
        """
    Returns a cached list of beers similar to this one, based on reviews.
    i.e. "People who liked this beer also liked..."
    """
        # UNCACHED VERSION
        # if self.rating:
        #   return [recommendation[1] for recommendation in Recommender.objects.get_similar_items(self, User.objects.all(), Beer.objects.filter(rating__isnull=False))]
        # else:
        #   return []

        # CACHED VERSION.
        if self.rating:
            cache_key = slugify(u"similar_beers_by_reviews_%s" % self.slug)
            similar_beers_by_reviews = cache.get(cache_key)
            if similar_beers_by_reviews == []:
                return similar_beers_by_reviews
            if not similar_beers_by_reviews:
                cache.add(
                    cache_key,
                    [
                        recommendation[1]
                        for recommendation in Recommender.objects.get_similar_items(
                            self, User.objects.all(), Beer.objects.filter(rating__isnull=False)
                        )
                    ],
                    7200,
                )
                similar_beers_by_reviews = cache.get(cache_key)
            return similar_beers_by_reviews
        else:
            return []
示例#16
0
    def recommended_for_users(self):
        """
    Returns a cached list of users that this beer is recommended for.
    """
        # UNCACHED VERSION
        # if self.rating:
        #   return Recommender.objects.get_best_users_for_item(self, User.objects.all(), Beer.objects.filter(rating__isnull=False))
        # else:
        #   return []

        # CACHED VERSION.
        if self.rating:
            cache_key = slugify(u"recommended_for_users_%s" % self.slug)
            recommended_for_users = cache.get(cache_key)
            if recommended_for_users == []:
                return recommended_for_users
            if not recommended_for_users:
                cache.add(
                    cache_key,
                    Recommender.objects.get_best_users_for_item(
                        self, User.objects.all(), Beer.objects.filter(rating__isnull=False)
                    ),
                    7200,
                )
                recommended_for_users = cache.get(cache_key)
            return recommended_for_users
        else:
            return []
示例#17
0
 def iterator(self):
     super_iterator = super(CachingQuerySet, self).iterator()
     while True:
         obj = super_iterator.next()
         # Use cache.add instead of cache.set to prevent race conditions (see CachingManager)
         cache.add(obj.cache_key, obj)
         yield obj
示例#18
0
文件: facets.py 项目: icaaq/kitsune
def _documents_for(locale, topics, products=None):
    """Returns a list of articles that apply to passed in topics and products.

    """
    # First try to get the results from the cache
    documents = cache.get(_documents_for_cache_key(locale, topics, products))
    if documents:
        statsd.incr('wiki.facets.documents_for.cache')
        return documents

    try:
        # Then try ES
        documents = _es_documents_for(locale, topics, products)
        cache.add(
            _documents_for_cache_key(locale, topics, products), documents)
        statsd.incr('wiki.facets.documents_for.es')
    except (ESMaxRetryError, ESTimeoutError, ESException):
        # Finally, hit the database (through cache machine)
        # NOTE: The documents will be the same ones returned by ES
        # but they won't be in the correct sort (by votes in the last
        # 30 days). It is better to return them in the wrong order
        # than not to return them at all.
        documents = _db_documents_for(locale, topics, products)
        statsd.incr('wiki.facets.documents_for.db')

    return documents
示例#19
0
文件: views.py 项目: magatz/pybbm
    def get_queryset(self):
        if not perms.may_view_topic(self.request.user, self.topic):
            raise PermissionDenied
        if self.request.user.is_authenticated() or not defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER:
            Topic.objects.filter(id=self.topic.id).update(views=F('views') + 1)
        else:
            cache_key = util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)
            cache.add(cache_key, 0)
            if cache.incr(cache_key) % defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER == 0:
                Topic.objects.filter(id=self.topic.id).update(views=F('views') +
                                                                    defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
                cache.set(cache_key, 0)
        qs = self.topic.posts.all().select_related('user')
        if defaults.PYBB_PROFILE_RELATED_NAME:
            if defaults.PYBB_POST_SORT_REVERSE:
                qs = qs.select_related('user__%s' % defaults.PYBB_PROFILE_RELATED_NAME).order_by('-created')
            else:
                qs = qs.select_related('user__%s' % defaults.PYBB_PROFILE_RELATED_NAME)

        if not perms.may_moderate_topic(self.request.user, self.topic):
            if defaults.PYBB_POST_SORT_REVERSE:
                qs = perms.filter_posts(self.request.user, qs).order_by('-created')
            else:
                qs = perms.filter_posts(self.request.user, qs)
        return qs
示例#20
0
def _content_parsed(obj):
    cache_key = obj.html_cache_key % obj.id
    html = cache.get(cache_key)
    if html is None:
        html = wiki_to_html(obj.content)
        cache.add(cache_key, html, CACHE_TIMEOUT)
    return html
示例#21
0
    def related_documents(self):
        """Return documents that are 'morelikethis' one."""
        # Only documents in default IA categories have related.
        if self.redirect_url() or not self.current_revision or self.category not in settings.IA_DEFAULT_CATEGORIES:
            return []

        # First try to get the results from the cache
        key = "wiki_document:related_docs:%s" % self.id
        documents = cache.get(key)
        if documents is not None:
            statsd.incr("wiki.related_documents.cache.hit")
            log.debug("Getting MLT for {doc} from cache.".format(doc=repr(self)))
            return documents

        try:
            statsd.incr("wiki.related_documents.cache.miss")
            mt = self.get_mapping_type()
            documents = mt.morelikethis(
                self.id,
                s=mt.search().filter(
                    document_locale=self.locale,
                    document_is_archived=False,
                    document_category__in=settings.IA_DEFAULT_CATEGORIES,
                    product__in=[p.slug for p in self.get_products()],
                ),
                fields=["document_title", "document_summary", "document_content"],
            )[:3]
            cache.add(key, documents)
        except ES_EXCEPTIONS:
            statsd.incr("wiki.related_documents.esexception")
            log.exception("ES MLT related_documents")
            documents = []

        return documents
示例#22
0
    def __init__(self):
        self.sid = None
        self.key = None
        self.session = None
        string_cookie = os.environ.get('HTTP_COOKIE', '')
        self.cookie = Cookie.SimpleCookie()
        self.cookie.load(string_cookie)

        # check for existing cookie
        if self.cookie.get(COOKIE_NAME):
            self.sid = self.cookie[COOKIE_NAME].value
            self.key = "session-" + self.sid
	    self.session = cache.get(self.key)
            if self.session is None:
               logging.info("Invalidating session "+self.sid)
               self.sid = None
               self.key = None

        if self.session is None:
            self.sid = str(random.random())[5:]+str(random.random())[5:]
            self.key = "session-" + self.sid
            logging.info("Creating session "+self.key);
            self.session = dict()
	    cache.add(self.key, self.session, 3600)

            self.cookie[COOKIE_NAME] = self.sid
            self.cookie[COOKIE_NAME]['path'] = DEFAULT_COOKIE_PATH
            # Send the Cookie header to the browser
            print self.cookie
示例#23
0
def ajax_user_details(request):
    """
    AJAX View to give the results for related search key after applying filters
    """
    paying = request.POST.get('paying', '')
    search = request.POST.get('search', '')
    uploaded = request.POST.get('uploaded', '')
    staffpick = request.POST.get('staff', '')
    total = 0
    users = None
    if search:
        users = cache.get('%s_searched_users_%s_%s_%s' % (search, paying, uploaded, staffpick), None)
        if users is None:
            if search.startswith('^'):
                users = VimeoUser.objects.filter(name__istartswith=search[1:])
            elif search.startswith('='):
                users = VimeoUser.objects.filter(name__iexact=search[1:])
            else:
                users = VimeoUser.objects.filter(name__icontains=search)
            if paying:
                users = users.filter(paying=True)
            if uploaded:
                users = users.filter(video=True)
            if staffpick:
                users = users.filter(staffpick=True)
            cache.add('%s_searched_users_%s_%s_%s' % (search, paying, uploaded, staffpick), users,
                      settings.SEARCH_CACHE_TIME)
        if users:
            total = users.count()
            users = users[:100]
    con = dict(
        total=total,
        users=users,
    )
    return render_to_response('profiles/users.html', con, RequestContext(request))
    def test_unicode(self):
        # Unicode values can be cached
        stuff = {
            "ascii": "ascii_value",
            "unicode_ascii": "Iñtërnâtiônàlizætiøn1",
            "Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
            "ascii2": {"x": 1},
        }
        # Test `set`
        for (key, value) in stuff.items():
            cache.set(key, value)
            self.assertEqual(cache.get(key), value)

        # Test `add`
        for (key, value) in stuff.items():
            cache.delete(key)
            cache.add(key, value)
            self.assertEqual(cache.get(key), value)

        # Test `set_many`
        for (key, value) in stuff.items():
            cache.delete(key)
        cache.set_many(stuff)
        for (key, value) in stuff.items():
            self.assertEqual(cache.get(key), value)
示例#25
0
    def related_documents(self):
        """Return documents that are 'morelikethis' one."""
        # Only documents in default IA categories have related.
        if (self.redirect_url() or not self.current_revision or
            self.category not in settings.IA_DEFAULT_CATEGORIES):
            return []

        # First try to get the results from the cache
        key = 'wiki_document:related_docs:%s' % self.id
        documents = cache.get(key)
        if documents:
            log.debug('Getting MLT for {doc} from cache.'
                .format(doc=repr(self)))
            return documents

        try:
            documents = self.morelikethis(
                self.get_document_id(self.id),
                s=self.get_s().filter(
                    model=self.get_model_name(),
                    document_locale=self.locale,
                    document_is_archived=False,
                    document_category__in=settings.IA_DEFAULT_CATEGORIES),
                fields=[
                    'document_title',
                    'document_summary',
                    'document_content'])
            cache.add(key, documents)
        except (Timeout, ConnectionError, ElasticHttpError) as e:
            log.error('ES error during MLT for {doc}: {err}'
                .format(doc=repr(self), err=str(e)))
            documents = []

        return documents
示例#26
0
    def recommended_beers_by_tags(self, values=False):
        """
    Returns a cached list of recommended beers, based on tags. 
    """
        # UNCACHED VERSION
        # if values:
        #   return Recommender.objects.get_content_based_recs(self.user, Beer.objects.filter(rating__isnull=False))
        # else:
        #   return [ item[1] for item in Recommender.objects.get_content_based_recs(self.user, Beer.objects.filter(rating__isnull=False))]

        # CACHED VERSION.
        cache_key = slugify(u"recommended_beers_by_tags_%s" % self.__unicode__())
        recommended_beers_by_tags = cache.get(cache_key)
        if recommended_beers_by_tags == []:
            return recommended_beers_by_tags
        if not recommended_beers_by_tags:
            recommended_beers_by_tags = Recommender.objects.get_content_based_recs(
                self.user, Beer.objects.filter(rating__isnull=False)
            )
            cache.add(cache_key, recommended_beers_by_tags, 7200)
            recommended_beers_by_tags = cache.get(cache_key)
        if values:
            return recommended_beers_by_tags
        else:
            faves_list = [fave.content_object for fave in Fave.active_objects.filter(user=self.user)]
            return [item[1] for item in recommended_beers_by_tags if item[1] not in faves_list]
示例#27
0
    def recommended_beers_by_users(self, values=False):
        """
    Returns a cached list of recommended beers for this user, based on similarity matrix with other users.
    """
        # UNCACHED VERSION.
        # if values:
        #   return Recommender.objects.get_best_items_for_user(self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False))
        # else:
        #   return [ item[1] for item in Recommender.objects.get_best_items_for_user(self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False))]

        # CACHED VERSION.
        cache_key = slugify(u"recommended_beers_by_users_%s" % self.__unicode__())
        recommended_beers_by_users = cache.get(cache_key)
        if recommended_beers_by_users == []:
            return recommended_beers_by_users
        if not recommended_beers_by_users:
            recommended_beers_by_users = Recommender.objects.get_best_items_for_user(
                self.user, User.objects.all(), Beer.objects.filter(rating__isnull=False)
            )
            cache.add(cache_key, recommended_beers_by_users, 7200)
        if values:
            return recommended_beers_by_users
        else:
            faves_list = [fave.content_object for fave in Fave.active_objects.filter(user=self.user)]
            return [item[1] for item in recommended_beers_by_users if item[1] not in faves_list]
示例#28
0
    def should_be_throttled(self, identifier, **kwargs):
        """
        Returns whether or not the user has exceeded their throttle limit.

        Maintains a list of timestamps when the user accessed the api within
        the cache.

        Returns ``False`` if the user should NOT be throttled or ``True`` if
        the user should be throttled.
        """

        #Generate a more granular id
        new_id, url, request_method = self.get_new_id(identifier, **kwargs)
        key = self.convert_identifier_to_key(new_id)

        #See if we can get a user and adjust throttle limit
        user = self.get_user(identifier)
        throttle_at = self.get_rate_limit_for_user(user)

        # Make sure something is there.
        cache.add(key, [])

        # Weed out anything older than the timeframe.
        minimum_time = int(time.time()) - int(self.timeframe)
        times_accessed = [access for access in cache.get(key) if access >= minimum_time]
        cache.set(key, times_accessed, self.expiration)

        if len(times_accessed) >= int(throttle_at):
            # Throttle them.
            return True

        # Let them through.
        return False
示例#29
0
 def should_be_throttled(self, identifier, **kwargs):
     """
     Returns whether or not the user has exceeded their throttle limit.
     
     Maintains a list of timestamps when the user accessed the api within
     the cache.
     
     Returns ``False`` if the user should NOT be throttled or ``True`` if
     the user should be throttled.
     """
     key = self.convert_identifier_to_key(identifier)
     
     # Make sure something is there.
     cache.add(key, [])
     
     # Weed out anything older than the timeframe.
     minimum_time = int(time.time()) - int(self.timeframe)
     times_accessed = [access for access in cache.get(key) if access >= minimum_time]
     cache.set(key, times_accessed, self.expiration)
     
     if len(times_accessed) >= int(self.throttle_at):
         # Throttle them.
         return True
     
     # Let them through.
     return False
示例#30
0
def get_flapage_from_cache(url):
    """
    Try get flatpage from cache entry with all flatpages by url.
    If not found, create cache and return flatpage from db.

    This probably avoid some hits on DB.
    """
    site_id = settings.SITE_ID
    cache_key = make_flatpages_cache_key()
    flatpages = cache.get(cache_key)
    if flatpages and url in flatpages:
        return flatpages[url]

    # flatpages cache not exist or flatpage not found.

    # 1. get all flatpages.
    flatpages = dict([(f.url, f) for f in
        FlatPage.objects.filter(sites__id__exact=site_id).order_by('url')])

    # 2. if url not in flatpages, raise Http404
    if url not in flatpages:
        raise Http404

    # 3. if url in flatpages, recreate cache and return flatpage
    cache.delete(cache_key)
    cache.add(cache_key, flatpages)
    return flatpages[url]
    def test_cache_versioning_add(self):

        # add, default version = 1, but manually override version = 2
        cache.add('answer1', 42, version=2)
        self.assertEqual(cache.get('answer1', version=1), None)
        self.assertEqual(cache.get('answer1', version=2), 42)

        cache.add('answer1', 37, version=2)
        self.assertEqual(cache.get('answer1', version=1), None)
        self.assertEqual(cache.get('answer1', version=2), 42)

        cache.add('answer1', 37, version=1)
        self.assertEqual(cache.get('answer1', version=1), 37)
        self.assertEqual(cache.get('answer1', version=2), 42)

        # v2 add, using default version = 2
        caches['v2'].add('answer2', 42)
        self.assertEqual(cache.get('answer2', version=1), None)
        self.assertEqual(cache.get('answer2', version=2), 42)

        caches['v2'].add('answer2', 37)
        self.assertEqual(cache.get('answer2', version=1), None)
        self.assertEqual(cache.get('answer2', version=2), 42)

        caches['v2'].add('answer2', 37, version=1)
        self.assertEqual(cache.get('answer2', version=1), 37)
        self.assertEqual(cache.get('answer2', version=2), 42)

        # v2 add, default version = 2, but manually override version = 1
        caches['v2'].add('answer3', 42, version=1)
        self.assertEqual(cache.get('answer3', version=1), 42)
        self.assertEqual(cache.get('answer3', version=2), None)

        caches['v2'].add('answer3', 37, version=1)
        self.assertEqual(cache.get('answer3', version=1), 42)
        self.assertEqual(cache.get('answer3', version=2), None)

        caches['v2'].add('answer3', 37)
        self.assertEqual(cache.get('answer3', version=1), 42)
        self.assertEqual(cache.get('answer3', version=2), 37)
示例#32
0
    def test_cache_versioning_add(self):

        # add, default version = 1, but manually override version = 2
        cache.add('answer1', 42, version=2)
        assert cache.get('answer1', version=1) is None
        assert cache.get('answer1', version=2) == 42

        cache.add('answer1', 37, version=2)
        assert cache.get('answer1', version=1) is None
        assert cache.get('answer1', version=2) == 42

        cache.add('answer1', 37, version=1)
        assert cache.get('answer1', version=1) == 37
        assert cache.get('answer1', version=2) == 42

        # v2 add, using default version = 2
        caches['v2'].add('answer2', 42)
        assert cache.get('answer2', version=1) is None
        assert cache.get('answer2', version=2) == 42

        caches['v2'].add('answer2', 37)
        assert cache.get('answer2', version=1) is None
        assert cache.get('answer2', version=2) == 42

        caches['v2'].add('answer2', 37, version=1)
        assert cache.get('answer2', version=1) == 37
        assert cache.get('answer2', version=2) == 42

        # v2 add, default version = 2, but manually override version = 1
        caches['v2'].add('answer3', 42, version=1)
        assert cache.get('answer3', version=1) == 42
        assert cache.get('answer3', version=2) is None

        caches['v2'].add('answer3', 37, version=1)
        assert cache.get('answer3', version=1) == 42
        assert cache.get('answer3', version=2) is None

        caches['v2'].add('answer3', 37)
        assert cache.get('answer3', version=1) == 42
        assert cache.get('answer3', version=2) == 37
示例#33
0
    def test_cache_versioning_add(self):

        # add, default version = 1, but manually override version = 2
        cache.add("answer1", 42, version=2)
        assert cache.get("answer1", version=1) is None
        assert cache.get("answer1", version=2) == 42

        cache.add("answer1", 37, version=2)
        assert cache.get("answer1", version=1) is None
        assert cache.get("answer1", version=2) == 42

        cache.add("answer1", 37, version=1)
        assert cache.get("answer1", version=1) == 37
        assert cache.get("answer1", version=2) == 42

        # v2 add, using default version = 2
        caches["v2"].add("answer2", 42)
        assert cache.get("answer2", version=1) is None
        assert cache.get("answer2", version=2) == 42

        caches["v2"].add("answer2", 37)
        assert cache.get("answer2", version=1) is None
        assert cache.get("answer2", version=2) == 42

        caches["v2"].add("answer2", 37, version=1)
        assert cache.get("answer2", version=1) == 37
        assert cache.get("answer2", version=2) == 42

        # v2 add, default version = 2, but manually override version = 1
        caches["v2"].add("answer3", 42, version=1)
        assert cache.get("answer3", version=1) == 42
        assert cache.get("answer3", version=2) is None

        caches["v2"].add("answer3", 37, version=1)
        assert cache.get("answer3", version=1) == 42
        assert cache.get("answer3", version=2) is None

        caches["v2"].add("answer3", 37)
        assert cache.get("answer3", version=1) == 42
        assert cache.get("answer3", version=2) == 37
示例#34
0
    def test_cache_versioning_add(self):

        # add, default version = 1, but manually override version = 2
        cache.add("answer1", 42, version=2)
        self.assertEqual(cache.get("answer1", version=1), None)
        self.assertEqual(cache.get("answer1", version=2), 42)

        cache.add("answer1", 37, version=2)
        self.assertEqual(cache.get("answer1", version=1), None)
        self.assertEqual(cache.get("answer1", version=2), 42)

        cache.add("answer1", 37, version=1)
        self.assertEqual(cache.get("answer1", version=1), 37)
        self.assertEqual(cache.get("answer1", version=2), 42)

        # v2 add, using default version = 2
        caches["v2"].add("answer2", 42)
        self.assertEqual(cache.get("answer2", version=1), None)
        self.assertEqual(cache.get("answer2", version=2), 42)

        caches["v2"].add("answer2", 37)
        self.assertEqual(cache.get("answer2", version=1), None)
        self.assertEqual(cache.get("answer2", version=2), 42)

        caches["v2"].add("answer2", 37, version=1)
        self.assertEqual(cache.get("answer2", version=1), 37)
        self.assertEqual(cache.get("answer2", version=2), 42)

        # v2 add, default version = 2, but manually override version = 1
        caches["v2"].add("answer3", 42, version=1)
        self.assertEqual(cache.get("answer3", version=1), 42)
        self.assertEqual(cache.get("answer3", version=2), None)

        caches["v2"].add("answer3", 37, version=1)
        self.assertEqual(cache.get("answer3", version=1), 42)
        self.assertEqual(cache.get("answer3", version=2), None)

        caches["v2"].add("answer3", 37)
        self.assertEqual(cache.get("answer3", version=1), 42)
        self.assertEqual(cache.get("answer3", version=2), 37)
示例#35
0
def wafflejs(request):
    flags = cache.get(FLAGS_ALL_CACHE_KEY)
    if not flags:
        flags = Flag.objects.values_list('name', flat=True)
        cache.add(FLAGS_ALL_CACHE_KEY, flags)
    flag_values = [(f, flag_is_active(request, f)) for f in flags]

    switches = cache.get(SWITCHES_ALL_CACHE_KEY)
    if not switches:
        switches = Switch.objects.values_list('name', 'active')
        cache.add(SWITCHES_ALL_CACHE_KEY, switches)

    samples = cache.get(SAMPLES_ALL_CACHE_KEY)
    if not samples:
        samples = Sample.objects.values_list('name', flat=True)
        cache.add(SAMPLES_ALL_CACHE_KEY, samples)
    sample_values = [(s, sample_is_active(s)) for s in samples]
    return render_to_response('waffle/waffle.js', {
        'flags': flag_values,
        'switches': switches,
        'samples': sample_values
    },
                              mimetype='application/x-javascript')
示例#36
0
 def add_content(self, key, value):
     cache.add(self._generate_cachable_key(key), value, None)
示例#37
0
 def test_add_fail_on_pickleerror(self):
     # Shouldn't fail silently if trying to cache an unpicklable type.
     with self.assertRaises(pickle.PickleError):
         cache.add('unpicklable', Unpicklable())
示例#38
0
    def process(self, **kwargs):
        "Processes the message before passing it on to the server"
        from sentry.helpers import get_filters

        if kwargs.get('data'):
            # Ensure we're not changing the original data which was passed
            # to Sentry
            kwargs['data'] = kwargs['data'].copy()

        request = kwargs.pop('request', None)
        if request:
            if not kwargs.get('data'):
                kwargs['data'] = {}
            kwargs['data'].update(
                dict(
                    META=request.META,
                    POST=request.POST,
                    GET=request.GET,
                    COOKIES=request.COOKIES,
                ))

            if not kwargs.get('url'):
                kwargs['url'] = request.build_absolute_uri()

        kwargs.setdefault('level', logging.ERROR)
        kwargs.setdefault('server_name', conf.NAME)

        # save versions of all installed apps
        if 'data' not in kwargs or '__sentry__' not in (kwargs['data'] or {}):
            if kwargs.get('data') is None:
                kwargs['data'] = {}
            kwargs['data']['__sentry__'] = {}

        versions = get_versions()
        kwargs['data']['__sentry__']['versions'] = versions

        if kwargs.get('view'):
            # get list of modules from right to left
            parts = kwargs['view'].split('.')
            module_list = [
                '.'.join(parts[:idx]) for idx in xrange(1,
                                                        len(parts) + 1)
            ][::-1]
            version = None
            module = None
            for m in module_list:
                if m in versions:
                    module = m
                    version = versions[m]

            # store our "best guess" for application version
            if version:
                kwargs['data']['__sentry__'].update({
                    'version': version,
                    'module': module,
                })

        if 'checksum' not in kwargs:
            checksum = construct_checksum(**kwargs)
        else:
            checksum = kwargs['checksum']

        if conf.THRASHING_TIMEOUT and conf.THRASHING_LIMIT:
            cache_key = 'sentry:%s:%s' % (kwargs.get('class_name')
                                          or '', checksum)
            added = cache.add(cache_key, 1, conf.THRASHING_TIMEOUT)
            if not added:
                try:
                    thrash_count = cache.incr(cache_key)
                except (KeyError, ValueError):
                    # cache.incr can fail. Assume we aren't thrashing yet, and
                    # if we are, hope that the next error has a successful
                    # cache.incr call.
                    thrash_count = 0
                if thrash_count > conf.THRASHING_LIMIT:
                    message_id = cache.get('%s:last_message_id' % cache_key)
                    if request:
                        # attach the sentry object to the request
                        request.sentry = {
                            'id': message_id,
                            'thrashed': True,
                        }
                    return message_id

        for filter_ in get_filters():
            kwargs = filter_(None).process(kwargs) or kwargs

        # create ID client-side so that it can be passed to application
        message_id = uuid.uuid4().hex
        kwargs['message_id'] = message_id

        # Make sure all data is coerced
        kwargs = transform(kwargs)

        self.send(**kwargs)

        if request:
            # attach the sentry object to the request
            request.sentry = {
                'id': message_id,
            }

        if conf.THRASHING_TIMEOUT and conf.THRASHING_LIMIT:
            # store the last message_id incase we hit thrashing limits
            cache.set('%s:last_message_id' % cache_key, message_id,
                      conf.THRASHING_LIMIT + 5)

        return message_id
示例#39
0
 def test_add_fail_on_pickleerror(self):
     "See https://code.djangoproject.com/ticket/21200"
     with pytest.raises(pickle.PickleError):
         cache.add("unpickable", Unpickable())
示例#40
0
def get_time_series_data(self, fitbit_user, cat, resource, date=None):
    """ Get the user's time series data """
    try:
        _type = TimeSeriesDataType.objects.get(category=cat, resource=resource)
    except TimeSeriesDataType.DoesNotExist as e:
        logger.exception("The resource %s in category %s doesn't exist" %
                         (resource, cat))
        raise Reject(e, requeue=False)

    # Create a lock so we don't try to run the same task multiple times
    sdat = date.strftime('%Y-%m-%d') if date else 'ALL'
    lock_id = '{0}-lock-{1}-{2}-{3}'.format(__name__, fitbit_user, _type, sdat)
    if not cache.add(lock_id, 'true', LOCK_EXPIRE):
        logger.debug('Already retrieving %s data for date %s, user %s' %
                     (_type, fitbit_user, sdat))
        raise Ignore()

    try:
        fbusers = UserFitbit.objects.filter(fitbit_user=fitbit_user)
        default_period = utils.get_setting('FITAPP_DEFAULT_PERIOD')
        if default_period:
            dates = {'base_date': 'today', 'period': default_period}
        else:
            dates = {'base_date': 'today', 'period': 'max'}
        if date:
            dates = {'base_date': date, 'end_date': date}
        for fbuser in fbusers:
            data = utils.get_fitbit_data(fbuser, _type, **dates)
            if utils.get_setting('FITAPP_GET_INTRADAY'):
                tz_offset = utils.get_fitbit_profile(fbuser,
                                                     'offsetFromUTCMillis')
                tz_offset = tz_offset / 3600 / 1000 * -1  # Converted to positive hours
            for datum in data:
                # Create new record or update existing record
                date = parser.parse(datum['dateTime'])
                if _type.intraday_support and \
                        utils.get_setting('FITAPP_GET_INTRADAY'):
                    resources = TimeSeriesDataType.objects.filter(
                        intraday_support=True)
                    for i, _type in enumerate(resources):
                        # Offset each call by 2 seconds so they don't bog down
                        # the server
                        get_intraday_data(fbuser.fitbit_user, _type.category,
                                          _type.resource, date, tz_offset)
                tsd, created = TimeSeriesData.objects.get_or_create(
                    user=fbuser.user,
                    resource_type=_type,
                    date=date,
                    intraday=False)
                tsd.value = datum['value']
                tsd.save()
        # Release the lock
        cache.delete(lock_id)
    except HTTPTooManyRequests as e:
        # We have hit the rate limit for the user, retry when it's reset,
        # according to the reply from the failing API call
        countdown = e.retry_after_secs + int(
            # Add exponential back-off + random jitter
            random.uniform(2, 4)**self.request.retries)
        logger.debug('Rate limit reached, will try again in {} seconds'.format(
            countdown))
        raise get_time_series_data.retry(exc=e, countdown=countdown)
    except HTTPBadRequest as e:
        # If the resource is elevation or floors, we are just getting this
        # error because the data doesn't exist for this user, so we can ignore
        # the error
        if not ('elevation' in resource or 'floors' in resource):
            exc = sys.exc_info()[1]
            logger.exception("Exception updating data for user %s: %s" %
                             (fitbit_user, exc))
            raise Reject(exc, requeue=False)
    except Exception:
        exc = sys.exc_info()[1]
        logger.exception("Exception updating data for user %s: %s" %
                         (fitbit_user, exc))
        raise Reject(exc, requeue=False)
示例#41
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    # TODO: Make that a namedtuple or a class.
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'now': requestOptions['now'],
        'localOnly': requestOptions['localOnly'],
        'template': requestOptions['template'],
        'tzinfo': requestOptions['tzinfo'],
        'forwardHeaders': extractForwardHeaders(request),
        'data': [],
        'prefetched': {},
    }
    data = requestContext['data']

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            targets = requestOptions['targets']
            if settings.REMOTE_PREFETCH_DATA and not requestOptions.get(
                    'localOnly'):
                prefetchRemoteData(requestContext, targets)

            for target in targets:
                if not target.strip():
                    continue
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(content_type='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(
                        series.start + (i * series.step),
                        requestOptions['tzinfo'])
                    writer.writerow(
                        (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"),
                         value))

            return response

        if format == 'json':
            jsonStart = time()

            series_data = []
            if 'maxDataPoints' in requestOptions and any(data):
                startTime = min([series.start for series in data])
                endTime = max([series.end for series in data])
                timeRange = endTime - startTime
                maxDataPoints = requestOptions['maxDataPoints']
                for series in data:
                    numberOfDataPoints = timeRange / series.step
                    if maxDataPoints < numberOfDataPoints:
                        valuesPerPoint = math.ceil(
                            float(numberOfDataPoints) / float(maxDataPoints))
                        secondsPerPoint = int(valuesPerPoint * series.step)
                        # Nudge start over a little bit so that the consolidation bands align with each call
                        # removing 'jitter' seen when refreshing.
                        nudge = secondsPerPoint + (
                            series.start % series.step) - (series.start %
                                                           secondsPerPoint)
                        series.start = series.start + nudge
                        valuesToLose = int(nudge / series.step)
                        for r in range(1, valuesToLose):
                            del series[0]
                        series.consolidate(valuesPerPoint)
                        timestamps = range(int(series.start),
                                           int(series.end) + 1,
                                           int(secondsPerPoint))
                    else:
                        timestamps = range(int(series.start),
                                           int(series.end) + 1,
                                           int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name,
                             tags=series.tags,
                             datapoints=datapoints))
            elif 'noNullPoints' in requestOptions and any(data):
                for series in data:
                    values = []
                    for (index, v) in enumerate(series):
                        if v is not None:
                            timestamp = series.start + (index * series.step)
                            values.append((v, timestamp))
                    if len(values) > 0:
                        series_data.append(
                            dict(target=series.name,
                                 tags=series.tags,
                                 datapoints=values))
            else:
                for series in data:
                    timestamps = range(int(series.start),
                                       int(series.end) + 1, int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name,
                             tags=series.tags,
                             datapoints=datapoints))

            output = json.dumps(
                series_data,
                indent=(2 if requestOptions['pretty'] else None)).replace(
                    'None,',
                    'null,').replace('NaN,',
                                     'null,').replace('Infinity,', '1e9999,')

            if 'jsonp' in requestOptions:
                response = HttpResponse(content="%s(%s)" %
                                        (requestOptions['jsonp'], output),
                                        content_type='text/javascript')
            else:
                response = HttpResponse(content=output,
                                        content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('JSON rendering time %6f' % (time() - jsonStart))
            log.rendering('Total request processing time %6f' %
                          (time() - start))
            return response

        if format == 'dygraph':
            labels = ['Time']
            result = '{}'
            if data:
                datapoints = [[
                    ts
                ] for ts in range(data[0].start, data[0].end, data[0].step)]
                for series in data:
                    labels.append(series.name)
                    for i, point in enumerate(series):
                        if point is None:
                            point = 'null'
                        elif point == float('inf'):
                            point = 'Infinity'
                        elif point == float('-inf'):
                            point = '-Infinity'
                        elif math.isnan(point):
                            point = 'null'
                        datapoints[i].append(point)
                line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
                lines = [
                    line_template % tuple(points) for points in datapoints
                ]
                result = '{"labels" : %s, "data" : [%s]}' % (
                    json.dumps(labels), ', '.join(lines))
            response = HttpResponse(content=result,
                                    content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('Total dygraph rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'rickshaw':
            series_data = []
            for series in data:
                timestamps = range(series.start, series.end, series.step)
                datapoints = [{
                    'x': x,
                    'y': y
                } for x, y in zip(timestamps, series)]
                series_data.append(
                    dict(target=series.name, datapoints=datapoints))
            if 'jsonp' in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" %
                    (requestOptions['jsonp'], json.dumps(series_data)),
                    mimetype='text/javascript')
            else:
                response = HttpResponse(content=json.dumps(series_data),
                                        content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('Total rickshaw rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'raw':
            response = HttpResponse(content_type='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(repr, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'svg':
            graphOptions['outputFormat'] = 'svg'
        elif format == 'pdf':
            graphOptions['outputFormat'] = 'pdf'

        if format == 'pickle':
            response = HttpResponse(content_type='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions,
                                  requestContext['forwardHeaders'])
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    useSVG = graphOptions.get('outputFormat') == 'svg'
    if useSVG and 'jsonp' in requestOptions:
        response = HttpResponse(content="%s(%s)" %
                                (requestOptions['jsonp'], json.dumps(image)),
                                content_type='text/javascript')
    elif graphOptions.get('outputFormat') == 'pdf':
        response = buildResponse(image, 'application/x-pdf')
    else:
        response = buildResponse(image,
                                 'image/svg+xml' if useSVG else 'image/png')

    if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
    else:
        add_never_cache_headers(response)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
示例#42
0
 def lock(self):
     " lock() - lock mutex, return False if mutex has already locked "
     return cache.add(self.__key, self.__value, self.__lifetime) and True or False
示例#43
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    # TODO: Make that a namedtuple or a class.
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'now': requestOptions['now'],
        'localOnly': requestOptions['localOnly'],
        'template': requestOptions['template'],
        'tzinfo': requestOptions['tzinfo'],
        'forwardHeaders': requestOptions['forwardHeaders'],
        'data': [],
        'prefetched': {},
        'xFilesFactor': requestOptions['xFilesFactor'],
    }
    data = requestContext['data']

    response = None

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        response = cache.get(requestKey)
        if response:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return response

        log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        cachedData = None
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime,
                               requestOptions['xFilesFactor'])
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            targets = requestOptions['targets']

            data.extend(evaluateTarget(requestContext, targets))

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        renderStart = time()

        format = requestOptions.get('format')
        if format == 'csv':
            response = renderViewCsv(requestOptions, data)
        elif format == 'json':
            response = renderViewJson(requestOptions, data)
        elif format == 'dygraph':
            response = renderViewDygraph(requestOptions, data)
        elif format == 'rickshaw':
            response = renderViewRickshaw(requestOptions, data)
        elif format == 'raw':
            response = renderViewRaw(requestOptions, data)
        elif format == 'pickle':
            response = renderViewPickle(requestOptions, data)

    # if response wasn't generated above, render a graph image
    if not response:
        format = 'image'
        renderStart = time()
        response = renderViewGraph(graphOptions, requestOptions, data)

    if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
    else:
        add_never_cache_headers(response)

    log.rendering('%s rendering time %6f' % (format, time() - renderStart))
    log.rendering('Total request processing time %6f' % (time() - start))

    return response
示例#44
0
def promote_page(slug, publish=None, user_id=None, languages=None, count=0):
    import cms.api
    from cms.utils import copy_plugins
    from django.contrib.auth import get_user_model

    User = get_user_model()

    # cache.add fails if the key already exists
    acquire_lock = lambda: cache.add('promoting-page', 'true', 60)

    # memcache delete is very slow, but we have to use it to take
    # advantage of using add() for atomic locking
    release_lock = lambda: cache.delete('promoting-page')

    while True:
        if acquire_lock():
            break
        logger.info('Page {} locked'.format(slug))
        time.sleep(5)

    try:

        if not user_id:
            user = User.objects.filter(is_staff=True)[0]
        else:
            user = User.objects.get(pk=user_id)

        staging = Title.objects.filter(language='en', slug='staging')
        production = Title.objects.filter(language='en', slug='production')

        if staging:
            staging = staging[0].page
        if production:
            production = production[0].page

        staging_title = Title.objects.filter(
            language='en', slug=slug, page__in=staging.get_descendants())
        production_title = Title.objects.filter(
            language='en', slug=slug, page__in=production.get_descendants())

        try:
            if staging_title and not production_title:
                staging_page = staging_title[0].page
                parent_slug = staging_page.parent.get_slug('en')
                production_parent_title = Title.objects.filter(
                    language='en',
                    slug=parent_slug,
                    page__in=production.get_descendants())

                if production_parent_title:
                    production_parent_title = production_parent_title[0]

                    cms.api.create_page(
                        **{
                            "title": staging_title[0].title,
                            "template": staging_page.template,
                            "language": 'en',
                            "menu_title": staging_title[0].menu_title,
                            "slug": staging_title[0].slug,
                            "created_by": user,
                            "parent": production_parent_title.page,
                            "in_navigation": True
                        })

                    production_title = Title.objects.filter(
                        language='en',
                        slug=slug,
                        page__in=production.get_descendants())
        except:
            logger.exception("Error creating production page.")

        if staging_title and production_title:
            staging_title = staging_title[0]
            production_title = production_title[0]

            source = staging_title.page
            destination = production_title.page

            placeholders = source.get_placeholders()

            source = source.get_public_object()
            destination = destination.get_draft_object()
            en_title = source.get_title_obj(language='en')

            destination_placeholders = dict([
                (a.slot, a) for a in destination.get_placeholders()
            ])
            languages = languages or [k for k, v in settings.LANGUAGES]

            for k in languages:
                available = [a.language for a in destination.title_set.all()]
                title = source.get_title_obj(language=k)

                # Doing some cleanup while I am at it
                if en_title and title:
                    title.title = en_title.title
                    title.slug = en_title.slug.strip()
                    if hasattr(title, 'save'):
                        title.save()

                if k not in available:
                    cms.api.create_title(k,
                                         title.title,
                                         destination,
                                         slug=title.slug)

                try:
                    destination_title = destination.get_title_obj(language=k)
                    if en_title and title and destination_title:
                        destination_title.title = title.title.strip()
                        destination_title.page_title = title.page_title.strip()
                        destination_title.slug = en_title.slug.strip()

                        if hasattr(destination_title, 'save'):
                            destination_title.save()

                except Exception as e:
                    logger.exception("Error updating title.")

            for placeholder in placeholders:
                destination_placeholders[placeholder.slot].clear()

                for k in languages:
                    plugins = list(
                        placeholder.cmsplugin_set.filter(
                            language=k).order_by('path'))
                    copied_plugins = copy_plugins.copy_plugins_to(
                        plugins, destination_placeholders[placeholder.slot], k)

            if publish:
                try:
                    for k in languages:
                        cms.api.publish_page(destination, user, k)
                except Exception as e:
                    logger.exception('Error publishing')
                    pass

            for k in languages:
                source_title = source.get_title_obj(language=k)
                destination_title = destination.get_title_obj(language=k)

                source_html = generate_html_for_diff(title=source_title,
                                                     language=k)
                destination_html = generate_html_for_diff(
                    title=destination_title, language=k)

                import difflib

                diff_generator = difflib.context_diff(
                    source_html.splitlines(True),
                    destination_html.splitlines(True))
                diff = ''.join(list(diff_generator))

                if diff:
                    logger.info(
                        "There is an inconsistency between staging and production. Language {}, {}"
                        .format(k, source_title.slug))

    except Exception as e:
        logger.exception('Error in promotion')
        if count < 10:
            logger.info('Count {}'.format(count))
            import random
            promote_page.apply_async(kwargs=dict(slug=slug,
                                                 publish=publish,
                                                 user_id=user_id,
                                                 languages=languages,
                                                 count=(count + 1)),
                                     countdown=random.randint(10, 20))

    finally:
        release_lock()
示例#45
0
def update_ddf():
    from .manager import update_server
    acquire_lock = lambda: cache.add(1, "true", LOCK_EXPIRE)
    update_server(sample=True)
    release_lock = lambda: cache.delete(1)
示例#46
0
def pull_from_transifex(slug,
                        language,
                        project=settings.TRANSIFEX_PROJECT_SLUG,
                        retry=True):
    from django.contrib.auth import get_user_model

    User = get_user_model()

    # cache.add fails if the key already exists
    acquire_lock = lambda: cache.add('publishing-translation', 'true', 60 * 5)
    # memcache delete is very slow, but we have to use it to take
    # advantage of using add() for atomic locking
    release_lock = lambda: cache.delete('publishing-translation')

    try:
        if language == 'en':
            return
        import cms.api

        internal_language = language if language not in SHIM_LANGUAGE_DICTIONARY else SHIM_LANGUAGE_DICTIONARY[
            language]

        while True:
            if acquire_lock():
                break
            time.sleep(5)

        staging = Title.objects.filter(language='en', slug='staging')
        if staging:
            staging = staging[0].page
        titles = Title.objects.filter(language='en',
                                      slug=slug,
                                      page__in=staging.get_descendants())

        if not titles:
            logger.info('Page not found. Ignoring.')

        page = titles[0].page.get_draft_object()

        password = settings.TRANSIFEX_PASSWORD
        user = settings.TRANSIFEX_USER

        transifex_language = language
        transifex_url_data = {
            "project": project,
            "slug": page.get_slug('en'),
            "language": transifex_language
        }
        fetch_format = "http://www.transifex.com/api/2/project/{project}/resource/{slug}html/translation/{language}/?mode=default"

        logger.info("Trying to request: %s" %
                    fetch_format.format(**transifex_url_data))
        logger.info("With creds: %s %s" % (user, password))

        r = requests.get(fetch_format.format(**transifex_url_data),
                         auth=(user, password))

        translation = r.json()

        text = translation['content'].strip()
        text = _parse_html_for_content(text)
        soup = BeautifulSoup(text)

        parser = etree.HTMLParser()
        tree = etree.parse(StringIO(unicode(soup.prettify())), parser)
        selector = CSSSelector('div[data-id]')
        title_selector = CSSSelector('div.title')
        """
        Directions are handled application-wise
        """
        dir_selector = CSSSelector('[dir]')

        for element in dir_selector(tree.getroot()):
            del element.attrib['dir']

        content = selector(tree.getroot())
        title = title_selector(tree.getroot())
        if title:
            try:
                title = title[0].text
                title_obj = page.get_title_obj(internal_language,
                                               fallback=False)
                if type(title_obj).__name__ == 'EmptyTitle':
                    logger.info('Creating new title')
                    en_title_obj = page.get_title_obj('en')
                    title_obj = cms.api.create_title(
                        language=internal_language,
                        title=en_title_obj.title.strip(),
                        page=page,
                        slug=en_title_obj.slug.strip(),
                    )
                    title_obj.save()
                title_obj.page_title = title.strip()
                title_obj.save()
            except Exception as e:
                logger.exception('Error updating the application.')

        dict_list = []

        for div in content:
            plugin_dict = {
                'id':
                div.attrib['data-id'],
                'type':
                div.attrib['data-type'],
                'parent':
                div.attrib['data-parent'],
                'position':
                div.attrib['data-position'],
                'translated': (div.text or '') + u''.join([
                    etree.tostring(a, pretty_print=True, method="html")
                    for a in div
                ]),
            }
            dict_list.append(plugin_dict)
        blame = User.objects.filter(is_staff=True, is_superuser=True)[0]

        _translate_page(dict_list, internal_language, page)

        cms.api.publish_page(page, blame, internal_language)
    except Exception as e:
        if retry:
            time.sleep(5)
            pull_from_transifex.delay(slug, language, project, False)
        else:
            traceback.print_exc()
            logger.info('Tried to retry it but it still erred out.')
            raise e
    finally:
        release_lock()
示例#47
0
def __acquire_lock(name):
    return cache.add(name, True, LOCK_EXPIRE)
示例#48
0
def new_email():
    cache.add('error_email_throttle', 0, getattr(settings, 'EMAIL_THROTTLING', DEFAULT_THROTTLE)[1])
    return cache.incr('error_email_throttle')
示例#49
0
from django.core.cache import cache
from data_importer import BaseImporter, XMLImporter, XLSImporter, XLSXImporter
from django.conf import settings
from data_importer import settings as data_importer_settings
from django.core.mail import EmailMessage
from django.utils.safestring import mark_safe

LOCK_EXPIRE = hasattr(
    settings, 'DATA_IMPORTER_TASK_LOCK_EXPIRE'
) and settings.DATA_IMPORTER_TASK_LOCK_EXPIRE or data_importer_settings.DATA_IMPORTER_TASK_LOCK_EXPIRE

DATA_IMPORTER_QUEUE = hasattr(
    settings, 'DATA_IMPORTER_QUEUE'
) and settings.DATA_IMPORTER_QUEUE or data_importer_settings.DATA_IMPORTER_QUEUE

acquire_lock = lambda lock_id: cache.add(lock_id, "true", LOCK_EXPIRE)
release_lock = lambda lock_id: cache.delete(lock_id)


class DataImpoterTask(Task):
    """
    This tasks is executed by Celery.
    """
    name = 'flowbot.add_data_importer_task'
    queue = DATA_IMPORTER_QUEUE
    time_limit = 60 * 15
    mimetype = None
    parse = None

    def get_mimetype(self, file_history_instance=None):
        filename, extension = os.path.splitext(file_history_instance.filename)
示例#50
0
 def add(self, *args, **kwargs):
     return cache.add(self.key, *args, **kwargs)
示例#51
0
 def _acquire(self):
     return cache.add(self.key, 'true', self.timeout)
示例#52
0
文件: lock.py 项目: dgk/modnoemesto
 def __enter__(self):
     while True:
         value = cache.add(self.key, "1", 60000)
         if value == True:
             return
         time.sleep(.1)
示例#53
0
def renderView(request):
    start = time()

    if request.REQUEST.has_key('json_request'):
        (graphOptions,
         requestOptions) = parseDataOptions(request.REQUEST['json_request'])
    elif request.is_ajax() and request.method == 'POST':
        (graphOptions,
         requestOptions) = parseDataOptions(request.raw_post_data)
    else:
        (graphOptions, requestOptions) = parseOptions(request)

    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'localOnly': requestOptions['localOnly'],
        'data': []
    }
    data = requestContext['data']

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            for target in requestOptions['targets']:
                if not target.strip():
                    continue
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(content_type='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(
                        series.start + (i * series.step),
                        requestOptions['tzinfo'])
                    writer.writerow(
                        (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"),
                         value))

            return response

        if format == 'json':
            series_data = []
            if 'maxDataPoints' in requestOptions and any(data):
                startTime = min([series.start for series in data])
                endTime = max([series.end for series in data])
                timeRange = endTime - startTime
                maxDataPoints = requestOptions['maxDataPoints']
                for series in data:
                    numberOfDataPoints = timeRange / series.step
                    if maxDataPoints < numberOfDataPoints:
                        valuesPerPoint = math.ceil(
                            float(numberOfDataPoints) / float(maxDataPoints))
                        secondsPerPoint = int(valuesPerPoint * series.step)
                        # Nudge start over a little bit so that the consolidation bands align with each call
                        # removing 'jitter' seen when refreshing.
                        nudge = secondsPerPoint + (
                            series.start % series.step) - (series.start %
                                                           secondsPerPoint)
                        series.start = series.start + nudge
                        valuesToLose = int(nudge / series.step)
                        for r in range(1, valuesToLose):
                            del series[0]
                        series.consolidate(valuesPerPoint)
                        timestamps = range(series.start, series.end,
                                           secondsPerPoint)
                    else:
                        timestamps = range(series.start, series.end,
                                           series.step)
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name, datapoints=datapoints))
            else:
                for series in data:
                    timestamps = range(series.start, series.end, series.step)
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name, datapoints=datapoints))

            if 'jsonp' in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" %
                    (requestOptions['jsonp'], json.dumps(series_data)),
                    content_type='text/javascript')
            else:
                response = HttpResponse(content=json.dumps(series_data),
                                        content_type='application/json')

            response['Pragma'] = 'no-cache'
            response['Cache-Control'] = 'no-cache'
            return response

        if format == 'raw':
            response = HttpResponse(content_type='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(str, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'svg':
            graphOptions['outputFormat'] = 'svg'

        if format == 'pickle':
            response = HttpResponse(content_type='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

    # add template to graphOptions
    try:
        user_profile = getProfile(request, allowDefault=False)
        graphOptions['defaultTemplate'] = user_profile.defaultTemplate
    except:
        graphOptions['defaultTemplate'] = "default"

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions)
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    useSVG = graphOptions.get('outputFormat') == 'svg'
    if useSVG and 'jsonp' in requestOptions:
        response = HttpResponse(content="%s(%s)" %
                                (requestOptions['jsonp'], json.dumps(image)),
                                content_type='text/javascript')
    else:
        response = buildResponse(image,
                                 'image/svg+xml' if useSVG else 'image/png')

    if useCache:
        cache.set(requestKey, response, cacheTimeout)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
示例#54
0
 def version(self, val=None):
     if val is not None:
         return cache.add(self.version_key, val)
     return cache.get(self.version_key)
示例#55
0
def index(request):
    avozy, bvozy = None, None
    cache.add(avozy, bvozy)

    avozy = request.POST.get("avozy")
    bvozy = request.POST.get("bvozy")
    try:
        avozy = str(avozy.title())
        bvozy = str(bvozy.title())
    except:
        pass

    if avozy and bvozy:
        try:

            #  extraction necessary data from file .json.
            # format 'AAA':['BBB', 'CCC', ], 'DDD':['CCC']
            def airbaltic_code(file):
                di = {}
                for k, v in file.items():
                    if 'destinations' in v:
                        c = v['code']
                        d = v['destinations']
                    ls = [a[0:3] for a, b in d.items()]
                    di[c] = ls
                airbaltic = di
                # print('1. airbaltic_code - good')       #
                return airbaltic

            a_c = airbaltic_code(a)

            #  extraction necessary data from file .json.
            # format 'AAA':['BBB', 'CCC', ], 'DDD':['CCC']
            def ryanair_code(file):
                di = {}
                for v in file.values():
                    for i in v:
                        for k_v, v_v in i.items():
                            if 'iataCode' in k_v:
                                c = i['iataCode']
                                # print(c)
                            if 'routes' in k_v:
                                r = i['routes']
                                g = [i_r.split(':', maxsplit=1) for i_r in r]
                                y = [
                                    b.split('|')[0] for a, b in g
                                    if a == 'airport'
                                ]
                                di[c] = y
                ryanair = di
                # print('1. ryanair_code - good')
                return ryanair

            r_c = ryanair_code(r)

            #  extraction necessary data from file .json.
            # format {'AAA':['BBB', 'CCC', ], 'DDD':['CCC']}
            def wizzair_code(file):
                di = {}
                for i in file:
                    if 'iata' and 'connections' in i:
                        c = i['iata']
                        r = [k['iata'] for k in i['connections']]
                        di[c] = r
                wizzair = di
                # print('1. wizzair_code - good')
                return wizzair

            w_c = wizzair_code(w)

            #  extraction necessary data from file .json.
            # format [['City' , 'AAA'], ['City', 'BBB']]
            def airbaltic_city(file):
                dis = []
                for k, v in file.items():
                    ind = []
                    if 'country' and 'code' in v:
                        contr = v['city']
                        coddd = v['code']
                        ind.append(contr)
                        ind.append(coddd)
                    dis.append(ind)
                for i in dis:
                    if 'Kiev' in i[0]:
                        i[0] = 'Kyiv'
                    if 'Aalesund' in i[0]:
                        i[0] = 'Alesund'
                    if 'Kharkov' in i[0]:
                        i[0] = 'Kharkiv'
                    if 'Brønnøysund' in i[0]:
                        i[0] = 'Bronnoysund'
                # print('2. airbaltic_city - good')
                return dis

            a_ci = airbaltic_city(a)

            #  extraction necessary data from file .json.
            # format [['City' , 'AAA'], ['City', 'BBB']]
            def ryanair_city(file):
                dis = []
                for v in file.values():
                    for i in v:
                        for k, v, in i.items():
                            ind = []
                            if 'iataCode' and 'cityCode' in k:

                                co = i['iataCode']
                                ko = i['cityCode']
                                ko = ko.split('_')
                                ko = ' '.join(ko)
                                ko = ko.title()

                                ind.append(ko)
                                ind.append(co)
                                dis.append(ind)
                for i in dis:
                    if 'Kiev' in i[0]:
                        i[0] = 'Kyiv'
                    if 'Brønnøysund' in i[0]:
                        i[0] = 'Bronnoysund'
                # print('2. ryanair_city - good')
                return dis

            r_ci = ryanair_city(r)

            #  extraction necessary data from file .json.
            # format [['City' , 'AAA'], ['City', 'BBB']]
            def wizzair_city(file):
                dis = []
                for i in file:
                    if 'iata' and 'aliases':
                        co = i['iata']
                        ko = i['aliases']
                        ind = []
                        for l in ko:
                            # l=l.split('')
                            # # l=l[0]
                            l = l.split(' -')
                            l = l[0]
                            l = l.split('-')
                            l = l[0]

                            l = l.split(' ')

                            if len(l[0]) > 3:
                                l = l[0]
                            else:
                                l = ' '.join(l)
                            l = l.split('\r\n')
                            l = l[0]

                            ind.append(l)
                            ind.append(co)
                    dis.append(ind)

                for i in dis:
                    if 'Kraków' in i[0]:
                        i[0] = 'Krakow'
                    if 'Niš' in i[0]:
                        i[0] = 'Nis'
                    if 'Memmingen/Munich' in i[0]:
                        i[0] = 'Memmingen'
                    if 'Prishtina' in i[0]:
                        i[0] = 'Pristina'
                    if 'Brønnøysund' in i[0]:
                        i[0] = 'Bronnoysund'
                # print('2. wizzair_city - good')
                return dis

            w_ci = wizzair_city(w)

            def find_path(graph, start, end):
                dist = {start: [start]}
                q = deque([start])
                while len(q):
                    at = q.popleft()
                    for next in graph[at]:
                        if next not in graph:
                            continue
                        if next not in dist:
                            dist[next] = [*dist[at], next]
                            q.append(next)
                return dist[end]

            # def find_path(graph, start, end, path=[]):
            #     path = path + [start]
            #     if start == end:
            #         return path
            #     if start not in graph:
            #         return None
            #     for node in graph[start]:
            #         if node not in path:
            #             newpath = find_path(graph, node, end, path)
            #             if newpath: return newpath
            #     # print('3. find_code - good')
            #     return None

            #this we change code from city to AAA, like 'Kyiv' to 'KBP'/
            # air_ci is code whene ['Kyiv', 'KBP']
            # a and b are names for citise
            # air_c is code whene {'KBP':['FKI', 'ALL'], ... }

            def rewrite_code(air_ci, a, b, air_c):
                try:
                    for l in air_ci:
                        if a == l[0]:
                            nach = l[1]
                        if b == l[0]:
                            cone = l[1]
                    return find_path(air_c, nach, cone)
                except:
                    nach = 'KRK'
                    cone = 'KRK'
                    return find_path(air_c, nach, cone)

            #this we change code from AAA to Kyiv, like  'KBP' to 'Kyiv',
            # air_ci is code whene ['Kyiv', 'KBP']

            # l_redi is result from function of def rewrite_code()
            def rewrite_code_2(l_redi, air_ci):
                s = []
                for r in l_redi:
                    for l in air_ci:
                        if r == l[1]:
                            # s.append( [l[0], l[1]] )
                            s.append(l[0])  # for one variant
                if len(s) == 1:
                    er = [
                        'Unfortunately, our airplane can\'t depart or arrive to the country'
                    ]
                    return er
                else:
                    return s

            # if get the same city step by step we must use that function
            def del_same_city(list_go):
                s = 1
                n = len(list_go)
                if n > 2:
                    while True:
                        n = len(list_go)
                        if s >= n:
                            break

                        if list_go[s - 1] == list_go[s]:
                            del list_go[s - 1]
                        else:
                            s += 1
                else:
                    pass
                return list_go

            block_air_ci = [a_ci, r_ci, w_ci]
            block_air_c = [a_c, r_c, w_c]

            u = []
            try:
                for air_c, air_ci in zip(block_air_c, block_air_ci):
                    r_c = rewrite_code(air_ci, avozy, bvozy, air_c)
                    r_c_2 = rewrite_code_2(r_c, air_ci)
                    h = del_same_city(r_c_2)
                    u.append(h)
            except:
                return render(
                    request, 'index.html', {
                        'err':
                        'Try again, one of the cities: ' + '\"' + avozy +
                        '\"' + ' or ' + '\"' + bvozy + '\"' + ' isn`t correct'
                    })
            # d={}
            # # u[0].insert(0, 'Airbaltic: ')
            # # u[1].insert(0, 'Ryanair: ')
            # # u[2].insert(0, 'Wizzair: ')
            # # print(u)
            # d['Airbaltic ']= u[0]
            # d['Ryanair ']= u[1]
            # d['Wizzair ']= u[2]
            # print(timeit(lambda:d))

            # pprint(d)
    #         aaa= del_same_city(end)

            return render(
                request, 'index.html', {
                    'air_0': u[0],
                    'a': 'Airbaltic: ',
                    'air_1': u[1],
                    'r': 'Ryanair: ',
                    'air_2': u[2],
                    'w': 'Wizzair: '
                })

        except:
            return render(request, 'index.html')
    else:
        return render(request, 'index.html')
示例#56
0
文件: utils.py 项目: tesssie/tendenci
def build_image(file,
                size,
                pre_key,
                crop=False,
                quality=90,
                cache=False,
                unique_key=None,
                constrain=False):
    """
    Builds a resized image based off of the original image.
    """
    try:
        quality = int(quality)
    except TypeError:
        quality = 90

    if settings.USE_S3_STORAGE:
        content = read_media_file_from_s3(file)
        image = Image.open(BytesIO(content))
    else:
        if hasattr(file, 'path') and exists(file.path):
            try:
                image = Image.open(file.path)  # get image
            except Image.DecompressionBombError:
                raise Http404
        else:
            raise Http404

    image = apply_orientation(image)

    image_options = {'quality': quality}
    if image.format == 'GIF':
        image_options['transparency'] = 0

    format = image.format
    if image.format in ('GIF', 'PNG'):
        if image.mode != "RGBA":
            image = image.convert("RGBA")
        image.format = format  # this is lost in conversion

    elif image.format == 'JPEG':
        # handle infamous error
        # IOError: cannot write mode P as JPEG
        if image.mode != "RGB":
            image = image.convert("RGB")

    if crop:
        image = image_rescale(image, size)  # thumbnail image
    else:
        format = image.format
        image = image.resize(size, Image.ANTIALIAS)  # resize image
        image.format = format

    binary = get_image_binary(image, **image_options)

    if cache:
        key = generate_image_cache_key(file, size, pre_key, crop, unique_key,
                                       quality, constrain)
        try:
            django_cache.add(key, binary,
                             60 * 60 * 24 * 30)  # cache for 30 days #issue/134
        except:
            pass

    return binary
示例#57
0
def clear_blocklist(*args, **kw):
    # Something in the blocklist changed; invalidate all responses.
    cache.add('blocklist:keyversion', 1)
    cache.incr('blocklist:keyversion')
示例#58
0
                    p.terminate()
                    return errorPage("Failed to fetch data")

                if seriesList == None:
                    log.info("DEBUG:[%s] request timed out" % requestHash)
                    p.terminate()
                    return errorPage("Request timed out")

                data.extend(seriesList)
            log.rendering("[%s] Retrieval took %.6f" % (requestHash,
                                                        (time() - start_t)))
            log.info("RENDER:[%s]:Timigns:Retrieve %.6f" %
                     (requestHash, (time() - start_t)))

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(content_type='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(
                        series.start + (i * series.step),
                        requestOptions['tzinfo'])
                    writer.writerow(
                        (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"),
                         value))
示例#59
0
文件: models.py 项目: sundw2015/841
 def ban_ip(self, ip, ban_time=None):
     if not ban_time:
         ban_time = self.fail_ban_time
     logger.error("代理商下: {}-{} 超出最大登录失败次数,禁止登录{}秒".format(
         self.name, ip, ban_time))
     cache.add(self.ban_key(ip), 'ban', ban_time)
示例#60
0
 def test_add(self):
     # A key can be added to a cache
     cache.add("addkey1", "value")
     result = cache.add("addkey1", "newvalue")
     self.assertFalse(result)
     self.assertEqual(cache.get("addkey1"), "value")