def get_cached(self, **kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple))) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) obj = cache.get(key) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: return obj try: obj = self.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise
def get_cached(self, **kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple)) ) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) obj = cache.get(key) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: return obj try: obj = self.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise
def get_many_cached(self, list_of_kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") prefetch = cache_settings.get("prefetch") related = self._get_select_related_from_attrs(self.model, prefetch) if related: base_qs = self.all().select_related(*related) else: base_qs = self.all() cache_keys = dict() for kwargs in list_of_kwargs: keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple)) ) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) cache_keys[key] = kwargs objects = cache.get_many(cache_keys.keys()) pending_cache_update = dict() cached_objects = list() for key, kwargs in cache_keys.iteritems(): obj = objects.get(key, None) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: cached_objects.append(obj) continue try: obj = base_qs.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise self._prefetch_related(obj, prefetch) self._tag_object_as_from_cache(obj) pending_cache_update[key] = obj cached_objects.append(obj)
def wrapper(*args, **kwargs): # Get request details from thread local request = getattr(thread_namespace, 'lockoutrequest', None) if request is None: # The call to authenticate must not have come via an HttpRequest, so # lockout is not enforced. return function(*args, **kwargs) params = [] ip = request.META.get('HTTP_X_FORWARDED_FOR', None) if ip: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... ip = ip.split(', ')[0] else: ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) if settings.WITH_USERNAME: try: username = kwargs.get('username') or args[0] params.append(username) except IndexError: pass #raise ValueError("No username in parameters, but LOCKOUT_WITH_USERNAME specified") key = generate_base_key(*params) attempts = cache.get(key) or 0 if attempts >= settings.MAX_ATTEMPTS: raise LockedOut() result = function(*args, **kwargs) if result is None: try: attempts = cache.incr(key) except ValueError: # No such key, so set it cache.set(key, 1, settings.ENFORCEMENT_WINDOW) # If attempts is max allowed, set a new key with that # value so that the lockout time will be based on the most # recent login attempt. if attempts >= settings.MAX_ATTEMPTS: cache.set(key, attempts, settings.LOCKOUT_TIME) return result
def reset_attempts(request): """Clears the cache key for the specified ``request``. """ params = [] ip = request.META.get('HTTP_X_FORWARDED_FOR', None) if ip: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... ip = ip.split(', ')[0] else: ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) key = generate_base_key(*params) cache.delete(key)
def wrapper(*args, **kwargs): # Get request details from thread local request = getattr(thread_namespace, 'lockoutrequest', None) if request is None: # The call to authenticate must not have come via an HttpRequest, so # lockout is not enforced. return function(*args, **kwargs) params = [] ip = request.META.get('HTTP_X_FORWARDED_FOR', None) if ip: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... ip = ip.split(', ')[0] else: ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) key = generate_base_key(*params) attempts = cache.get(key) or 0 if attempts >= settings.MAX_ATTEMPTS: raise LockedOut() result = function(*args, **kwargs) if result is None: try: attempts = cache.incr(key) except ValueError: # No such key, so set it cache.set(key, 1, settings.ENFORCEMENT_WINDOW) # If attempts is max allowed, set a new key with that # value so that the lockout time will be based on the most # recent login attempt. if attempts >= settings.MAX_ATTEMPTS: cache.set(key, attempts, settings.LOCKOUT_TIME) return result
def wrapper(request, *args, **kwargs): params = [] ip = request.META.get('HTTP_X_FORWARDED_FOR', None) if ip: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... ip = ip.split(', ')[0] else: ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) key = generate_base_key(*params) attempts = cache.get(key) or 0 if attempts >= settings.MAX_ATTEMPTS: raise LockedOut() response = function(request, *args, **kwargs) if request.method == 'POST': login_failed = ( response and not response.has_header('location') and response.status_code != 302 ) if login_failed: try: attempts = cache.incr(key) except ValueError: # No such key, so set it cache.set(key, 1, settings.ENFORCEMENT_WINDOW) # If attempts is max allowed, set a new key with that # value so that the lockout time will be based on the most # recent login attempt. if attempts >= settings.MAX_ATTEMPTS: cache.set(key, attempts, settings.LOCKOUT_TIME) return response
def invalidate_root_instances(self, *instances): """Invalidates all possible versions of the cached ``instances``, using the ``instances``'s lookups and their current field values. Each instance in ``instances`` must be a root instance, that is, an instance of one of the top-level models stored in the cache, not a related model instance. """ self.seen_instances.update(instances) keys = set() for instance in instances: model = instance.__class__ instance_variants = [instance] if hasattr(instance, "_orig_state"): orig = model() orig.__dict__ = instance._orig_state instance_variants.append(orig) for instance in instance_variants: cache_settings = get_cache_settings(instance.__class__) lookups = cache_settings.get("lookups") for lookup in lookups: if not isinstance(lookup, (list, tuple)): lookup = [lookup] kwargs = {} for fieldname in lookup: if fieldname == "pk": field = model._meta.pk else: field = model._meta.get_field(fieldname) attname = field.get_attname() kwargs[fieldname] = getattr(instance, attname) key = generate_base_key(instance.__class__, **kwargs) keys.add(key) cache.delete_many(keys)
def wrapper(*args, **kwargs): # Get request details from thread local request = getattr(thread_namespace, 'lockoutrequest', None) if request is None: # The call to authenticate must not have come via an HttpRequest, so # lockout is not enforced. return function(*args, **kwargs) params = [] ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) key = generate_base_key(*params) attempts = cache.get(key) or 0 if attempts >= settings.MAX_ATTEMPTS: raise LockedOut() result = function(*args, **kwargs) if result is None: try: attempts = cache.incr(key) except ValueError: # No such key, so set it cache.set(key, 1, settings.ENFORCEMENT_WINDOW) # If attempts is max allowed, set a new key with that # value so that the lockout time will be based on the most # recent login attempt. if attempts >= settings.MAX_ATTEMPTS: cache.set(key, attempts, settings.LOCKOUT_TIME) return result
def wrapper(*args, **kwargs): # Get request details from thread local request = getattr(thread_namespace, 'lockoutrequest', None) if request is None: # The call to authenticate must not have come via an HttpRequest, so # lockout is not enforced. return function(*args, **kwargs) params = [] ip = request.META.get('HTTP_X_FORWARDED_FOR', None) if ip: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... ip = ip.split(', ')[0] else: ip = request.META.get('REMOTE_ADDR', '') params.append(ip) if settings.USE_USER_AGENT: useragent = request.META.get('HTTP_USER_AGENT', '') params.append(useragent) key = generate_base_key(*params) attempts = cache.get(key) or 0 if attempts >= settings.MAX_ATTEMPTS: raise LockedOut() try: result = function(*args, **kwargs) except: _handle_authentication_failure(key, attempts) raise if result is None: _handle_authentication_failure(key, attempts) return result