def persist_hits(): if is_cached_hitcount_enabled() and using_memcache(): backend, location, params = parse_backend_conf(CACHED_HITCOUNT_CACHE) host, port = location.split(':') hitcount_cache = get_hitcount_cache() lock = hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY) #print 'persist_hits - check %s lock = %s' % (CACHED_HITCOUNT_LOCK_KEY, lock) if lock is None or lock != 1: try: #acquire a lock so no updates will occur while we are persisting the hits to DB hitcount_cache.set(CACHED_HITCOUNT_LOCK_KEY, 1, CACHED_HITCOUNT_CACHE_TIMEOUT) #print 'acquire %s lock = %s ' % (CACHED_HITCOUNT_LOCK_KEY, hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY)) mem = MemcachedStats(host, port) keys = mem.keys() content_types = { } #used for keeping track of the content types so DB doesn't have to be queried each time for cache_key in keys: if "hitcount__" in cache_key and not CACHED_HITCOUNT_IP_CACHE in cache_key: cache_key = cache_key.split( ':' )[-1] #the key is a combination of key_prefix, version and key all separated by : - all we need is the key count = hitcount_cache.get(cache_key) if count: #only update the hit count if the is not None hitcount, ctype_pk, object_pk = cache_key.split( '__') if ctype_pk in content_types.keys(): content_type = content_types[ctype_pk] else: content_type = ContentType.objects.get( id=ctype_pk) content_types[ctype_pk] = content_type with transaction_atomic(): #save a new hit or increment this hits on an existing hit hit, created = Hit.objects.select_for_update( ).get_or_create(added=datetime.utcnow().date(), object_pk=object_pk, content_type=content_type) if hit and created: hit.hits = long(count) hit.save() elif hit: hit.hits = hit.hits + long(count) hit.save() #reset the hitcount for this object to 0 - even if it was previously None hitcount_cache.set(cache_key, 0, CACHED_HITCOUNT_CACHE_TIMEOUT) #print 'reset key %s to zero = %s ' % (cache_key, hitcount_cache.get(cache_key)) except Exception, ex: logger.error('Unable to persist hits') logger.error(ex) raise ex finally:
def persist_hits(): if is_cached_hitcount_enabled() and using_memcache(): backend, location, params = parse_backend_conf(CACHED_HITCOUNT_CACHE) host, port = location.split(':') hitcount_cache = get_hitcount_cache() lock = hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY) #print 'persist_hits - check %s lock = %s' % (CACHED_HITCOUNT_LOCK_KEY, lock) if lock is None or lock != 1: try: #acquire a lock so no updates will occur while we are persisting the hits to DB hitcount_cache.set(CACHED_HITCOUNT_LOCK_KEY, 1, CACHED_HITCOUNT_CACHE_TIMEOUT) #print 'acquire %s lock = %s ' % (CACHED_HITCOUNT_LOCK_KEY, hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY)) mem = MemcachedStats(host, port) keys = mem.keys() content_types = {}#used for keeping track of the content types so DB doesn't have to be queried each time for cache_key in keys: if "hitcount__" in cache_key and not CACHED_HITCOUNT_IP_CACHE in cache_key: cache_key = cache_key.split(':')[-1]#the key is a combination of key_prefix, version and key all separated by : - all we need is the key count = hitcount_cache.get(cache_key) if count:#only update the hit count if the is not None hitcount, ctype_pk, object_pk = cache_key.split('__') if ctype_pk in content_types.keys(): content_type = content_types[ctype_pk] else: content_type = ContentType.objects.get(id=ctype_pk) content_types[ctype_pk] = content_type with transaction_atomic(): #save a new hit or increment this hits on an existing hit hit, created = Hit.objects.select_for_update().get_or_create(added=datetime.utcnow().date(), object_pk=object_pk, content_type=content_type) if hit and created: hit.hits = long(count) hit.save() elif hit: hit.hits = hit.hits + long(count) hit.save() #reset the hitcount for this object to 0 - even if it was previously None hitcount_cache.set(cache_key, 0, CACHED_HITCOUNT_CACHE_TIMEOUT) #print 'reset key %s to zero = %s ' % (cache_key, hitcount_cache.get(cache_key)) except Exception, ex: logger.error('Unable to persist hits') logger.error(ex) raise ex finally:
def _update_hit_count(request, object_pk, ctype_pk): ''' Evaluates a request's Hit and corresponding HitCount object and, after a bit of clever logic, either ignores the request or registers a new Hit. This is NOT a view! But should be used within a view ... Returns True if the request was considered a Hit; returns False if not. ''' if request: #we might be calling this form a signal ip = get_ip(request) # first, check our request against the blacklists before continuing if CACHED_HITCOUNT_EXCLUDE_IP_ADDRESS and ip in BlacklistIP.objects.get_cache( ): return False #are we excluding bots and is this a bot if CACHED_HITCOUNT_EXCLUDE_BOTS and is_bot_request(request): return False #save to memcache hitcount_cache = get_hitcount_cache() cache_key = "hitcount__%s__%s" % (ctype_pk, object_pk) #if the lock is set then do nothing (this means the hits are being persisted to DB) lock = hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY) #print 'check %s lock = %s' % (CACHED_HITCOUNT_LOCK_KEY, lock) if lock is None or lock != 1: try: hitcount_cache.incr(cache_key) return True except ValueError: #cache might have timed out count = 1 hitcount_cache.set(cache_key, count, CACHED_HITCOUNT_CACHE_TIMEOUT) return True return False
def _update_hit_count(request, object_pk, ctype_pk): ''' Evaluates a request's Hit and corresponding HitCount object and, after a bit of clever logic, either ignores the request or registers a new Hit. This is NOT a view! But should be used within a view ... Returns True if the request was considered a Hit; returns False if not. ''' if request:#we might be calling this form a signal ip = get_ip(request) # first, check our request against the blacklists before continuing if CACHED_HITCOUNT_EXCLUDE_IP_ADDRESS and ip in BlacklistIP.objects.get_cache(): return False #are we excluding bots and is this a bot if CACHED_HITCOUNT_EXCLUDE_BOTS and is_bot_request(request): return False #save to memcache hitcount_cache = get_hitcount_cache() cache_key = "hitcount__%s__%s" % (ctype_pk, object_pk) #if the lock is set then do nothing (this means the hits are being persisted to DB) lock = hitcount_cache.get(CACHED_HITCOUNT_LOCK_KEY) #print 'check %s lock = %s' % (CACHED_HITCOUNT_LOCK_KEY, lock) if lock is None or lock != 1: try: hitcount_cache.incr(cache_key) return True except ValueError:#cache might have timed out count = 1 hitcount_cache.set(cache_key, count, CACHED_HITCOUNT_CACHE_TIMEOUT) return True return False
def get_cache(self): hitcount_cache = get_hitcount_cache() blacklist_ips = hitcount_cache.get(CACHED_HITCOUNT_IP_CACHE) if not blacklist_ips and not isinstance(blacklist_ips, list): blacklist_ips = self.set_cache() return blacklist_ips
def set_cache(self): hitcount_cache = get_hitcount_cache() blacklist_ips = list(self.get_queryset().values_list('ip', flat=True)) hitcount_cache.set(CACHED_HITCOUNT_IP_CACHE, blacklist_ips, CACHED_HITCOUNT_IP_CACHE_TIMEOUT) return blacklist_ips