def aggregate_day_after_hour_aggregated(sender, left_boundary, right_boundary,
                                        **kwargs):
    aggregate_queryset = HourAggregate.objects.filter(
        timestamp__gte=left_boundary, timestamp__lt=right_boundary)

    aggregate_queryset.query.clear_ordering(force_empty=True)
    aggregate_queryset = aggregate_queryset.values(
        'admin_id', 'instance_id', 'instance_name',
        'source').annotate(value=Sum('value'))
    day_date = datetime(left_boundary.year,
                        left_boundary.month,
                        left_boundary.day,
                        tzinfo=pytz.utc)

    with Lock(redis=redis,
              name='day_aggregation_{}'.format(day_date.isoformat())):
        for aggregate_dict in aggregate_queryset.iterator():
            filter_data = {
                'timestamp': day_date,
                'instance_name': aggregate_dict.get('instance_name'),
                'source': aggregate_dict.get('source'),
                'admin_id': aggregate_dict.get('admin_id')
            }
            if not DayAggregate.objects.filter(**filter_data).update(
                    value=F('value') + aggregate_dict.get('value')):
                DayAggregate.objects.create(timestamp=day_date,
                                            **aggregate_dict)
def get_hash_cache(name, key, ex=None, none_callback=None, *args, **kwargs):
    """
    name          : redis hash name
    key           : redis 键名
    ex            : 超期时间 (单位:秒)
    none_callback : 返回None值,调用回调函数
    args          : 回调参数
    kwags         : 回调参数
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        v = cli.hget(name, key)
        if v is None and none_callback:
            if DEBUG_CACHE:
                print "no redis cache hit. %s." % key
            v = none_callback(*args, **kwargs)
            try:
                nv = JSONDataEncoder.encode(v)
            except:
                nv = v
            cli.hset(name, key, nv)
            _set_expire_time(cli, name, ex)
        else:
            if DEBUG_CACHE:
                print "cache redis hit. %s." % key
            try:
                v = JSONDataEncoder.decode(v)
            except:
                pass
    return v
def get_hash_map_cache(name,
                       keys,
                       ex=None,
                       none_callback=None,
                       *args,
                       **kwargs):
    """
    name          : name
    keys          : key list
    ex            : 超期时间 (单位:秒)
    none_callback : 返回None值,调用回调函数
    args          : 回调参数
    kwags         : 回调参数
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        v = cli.hmget(name, keys)
        if v is None and none_callback:
            if DEBUG_CACHE:
                print "no redis cache hit. %s." % keys
            v = none_callback(*args, **kwargs)
            cli.hmset(name, v)
            _set_expire_time(cli, name, ex)

        if v and (isinstance(keys, str) or
                  (isinstance(keys, list) and len(keys) == 1)):
            v = v[-1]
    return v
Ejemplo n.º 4
0
    def finalize(self):
        """
        Call before you generate a dataset
        This caches all the guids in the db at time of calling to a list
        creating a data structure which allows for fast access by the dataset object
        and fixing the length of the the dataset
        """

        lockname = self.key('lock')
        lk = Lock(self.redis, lockname)
        #logger.debug(f'getting lock {lockname}')
        lk.acquire()
        #logger.debug(f'getting lock {lockname}')
        self.redis.set(self.key('finalized'), 'FINALIZED')
        if lk.owned():
            lk.release()
            #logger.debug(f'released lock {lockname}')

        self.episodes = []
        self.episode_len = []
        for episode in range(self.num_episodes()):
            self.episodes.append(self.redis.lindex(self.key('episodes'), episode).decode())

        self.episodes = sorted(self.episodes)

        for episode_id in self.episodes:
            self.episode_len.append(len(Episode(self, self.redis, episode_id)))

        self.episode_off = []
        offset = 0
        for l in self.episode_len:
            self.episode_off.append(offset)
            offset += l

        self.finalized = True
def refresh_redis_cache(name, ex):
    """
        name          : name
        ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        _set_expire_time(cli, name, ex)
Ejemplo n.º 6
0
    def create_lock(cls, id, r_obj=None):
        import config
        from redis.lock import LuaLock as Lock

        redis_key = 'lock_%s_%s' % (cls.__name__, id)
        return Lock(r_obj or rds_tmp, redis_key,
                    sleep=config.REDIS_LOCK_SLEEP,
                    timeout=config.REDIS_LOCK_TIMEOUT)
Ejemplo n.º 7
0
 def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
     self._unique_id = unique_id
     self._redis_server = redis_server or StrictRedis()
     refresh_lock = Lock(redis=self._redis_server,
                         name='{0}_lock'.format(self._unique_id))
     super(RedisManagedOAuth2Mixin,
           self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
     if self._access_token is None:
         self._get_and_update_current_tokens()
Ejemplo n.º 8
0
 def acquire(self, lox_name, lock_id, expires_seconds=None):
     key = self.key(lox_name, lock_id)
     lock = Lock(self.connection, key, timeout=expires_seconds)
     # retry logic is handled in core.lock, so no blocking here
     if lock.acquire(blocking=False):
         return BackendLock(key, lox_name, lock_id, provider_lock=lock)
     else:
         raise LockInUseException(
             "Lock {} has been acquired previously, possibly by another thread/process, and is not available."
             .format(key))
Ejemplo n.º 9
0
 def __init__(self, ns: str, name: str, expiration: Union[int, float],
              redis_backend: RedisBackend) -> None:
     super().__init__(ns, name)
     self.__redis = redis_backend.get_redis_connection(ns)
     with _map_to_sdl_exception():
         redis_lockname = '{' + ns + '},' + self._lock_name
         self.__redis_lock = Lock(redis=self.__redis,
                                  name=redis_lockname,
                                  timeout=expiration)
         self._register_scripts()
Ejemplo n.º 10
0
def redis_lock_from_event(event):
    from django_redis import get_redis_connection
    from redis.lock import Lock

    if not hasattr(event, '_lock') or not event._lock:
        rc = get_redis_connection("redis")
        event._lock = Lock(redis=rc,
                           name='pretix_event_%s' % event.id,
                           timeout=LOCK_TIMEOUT)
    return event._lock
def set_hash_map_cache(name, mapping, ex=None):
    """
    name          : name
    mapping       : mapping dict
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        if DEBUG_CACHE:
            print "set redis hash map. %s %s" % (name, ex)
        cli.hmset(name, mapping)
        _set_expire_time(cli, name, ex)
    return True
Ejemplo n.º 12
0
    def greenlet_scheduler(self):

        redis_scheduler_lock_key = "%s:schedulerlock" % get_current_config(
        )["redis_prefix"]
        while True:
            with Lock(connections.redis,
                      redis_scheduler_lock_key,
                      timeout=self.config["scheduler_interval"] + 10,
                      blocking=False,
                      thread_local=False):
                self.scheduler.check()

            time.sleep(self.config["scheduler_interval"])
Ejemplo n.º 13
0
 def wrapper(*args, **kwargs):
     init_transaction()
     with Lock(redis_lock, get_trans_lock_name(), timeout=5) as look:
         try:
             begin_transaction()
             result = func(*args, **kwargs)
             end_transaction()
             close_transaction(get_transaction_id())
             return result
         except Exception as e:
             rollback(get_transaction_id())
             raise e
         finally:
             release_transaction()
Ejemplo n.º 14
0
 def __enter__(self):
     self.lock = Lock(
         redis_client,
         self.lock_name,
         blocking_timeout=1,
         timeout=self.lock_timeout,
     )
     try:
         self.is_lock_free = self.lock.acquire(blocking=False)
     except LockError:
         log.error(
             'lock acquire error',
             extra={'data': {'lock_name': self.lock_name}},
             exc_info=True,
         )
     return self
def set_cache(key, value, ex=None):
    """
    key           : redis 键名
    value         : redis 键值
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(key), timeout=20):
        if DEBUG_CACHE:
            print "set redis cache. %s %s" % (key, ex)
        try:
            v = JSONDataEncoder.encode(value)
        except:
            v = value
        cli.set(key, v, ex)
    return True
Ejemplo n.º 16
0
    def end(self):
        if self.p is not None:
            self.p.execute()

        lockname = self.rollout.key('lock')
        lk = Lock(self.redis, lockname)
        #logger.debug(f'getting lock {lockname}')
        lk.acquire()
        #logger.debug(f'got lock {lockname}')

        if not self.redis.exists(self.rollout.key('finalized')):
            self.redis.lpush(self.rollout.key('episodes'), self.id)
            self.redis.incrby(self.rollout.key('steps'), len(self))

        if lk.owned():
            lk.release()
Ejemplo n.º 17
0
    def raise_or_lock(self, key, timeout):
        """
        Checks if the task is locked and raises an exception, else locks
        the task. By default, the tasks and the key expire after 60 minutes.
        (meaning it will not be executed and the lock will clear).
        """
        acquired = Lock(self.redis,
                        key,
                        timeout=timeout,
                        blocking=self.blocking,
                        blocking_timeout=self.blocking_timeout).acquire()

        if not acquired:
            # Time remaining in milliseconds
            # https://redis.io/commands/pttl
            ttl = self.redis.pttl(key)
            raise AlreadyQueued(ttl / 1000.)
Ejemplo n.º 18
0
def redis_lock(name: str, **kwargs) -> Generator:
    """
    Acquire a Redis lock. This is a wrapper around redis.lock.Lock(), that also works in tests (there, the lock is
    always granted without any checks).

    Relevant kwargs are:
     - blocking_timeout: how many seconds to try to acquire the lock. Use 0 for a non-blocking lock.
       The default is None, which means we wait forever.
     - timeout: how many seconds to keep the lock for. The default is None, which means it remains locked forever.

    Raises redis.exceptions.LockError if the lock couldn't be acquired or released.
    """
    if settings.DJTRIGGERS_REDIS_URL.startswith('redis'):  # pragma: no cover
        with Lock(redis=Redis.from_url(settings.DJTRIGGERS_REDIS_URL), name=name, **kwargs):
            yield
    else:
        yield
def set_hash_cache(name, key, value, ex=None):
    """
    name          : redis hash name
    key           : redis 键名
    value         : redis 键值
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        if DEBUG_CACHE:
            print "set redis hash cache. %s %s" % (key, ex)
        try:
            v = JSONDataEncoder.encode(value)
        except:
            v = value
        cli.hset(name, key, v)
        _set_expire_time(cli, name, ex)
    return True
Ejemplo n.º 20
0
    def __init__(self,
                 persist_mode=False,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self.__resource_cache = get_triple_store()
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        # Clean temporal folders under 'base' (others than 'path' subfolder)
        for sub in filter(lambda x: x != path,
                          get_immediate_subdirectories(base)):
            shutil.rmtree('{}/{}'.format(self.__base_path, sub))

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Ejemplo n.º 21
0
    def __init__(self,
                 persist_mode=None,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None,
                 graph_memory_limit=5000):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__graph_memory_limit = graph_memory_limit
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self._r.delete(key_prefix)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Ejemplo n.º 22
0
 def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
     """
     :param unique_id:
         An identifier for this auth object. Auth instances which wish to share tokens must use the same ID.
     :type unique_id:
         `unicode`
     :param redis_server:
         An instance of a Redis server, configured to talk to Redis.
     :type redis_server:
         :class:`Redis`
     """
     # pylint:disable=keyword-arg-before-vararg
     self._unique_id = unique_id
     self._redis_server = redis_server or StrictRedis()
     refresh_lock = Lock(redis=self._redis_server,
                         name='{0}_lock'.format(self._unique_id))
     super(RedisManagedOAuth2Mixin,
           self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
     if self._access_token is None:
         self._get_and_update_current_tokens()
Ejemplo n.º 23
0
def _send_smses(send_deferred=False, backend=None, limit=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        send_lock_name = 'smsgateway_send_sms_deferred'
    else:
        send_lock_name = 'smsgateway_send_sms'

    with Lock(redis=Redis.from_url(settings.SMSGATEWAY_REDIS_URL),
              name='smsgateway-' + send_lock_name,
              blocking_timeout=0):
        successes, failures = 0, 0
        try:
            # Get SMSes that need to be sent (deferred or non-deferred)
            if send_deferred:
                to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
            else:
                to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

            if isinstance(limit, int):
                to_send = to_send[:limit]

            # Send each SMS
            for sms in to_send:
                if backend:
                    sms_using = backend
                else:
                    sms_using = None if sms.using == '__none__' else sms.using
                if send(sms.to, sms.content, sms.signature, sms_using,
                        sms.reliable):
                    # Successfully sent, remove from queue
                    sms.delete()
                    successes += 1
                else:
                    # Failed to send, defer SMS
                    sms.defer()
                    failures += 1
        finally:
            if successes and failures:
                statsd.gauge('smsgateway.success_rate', successes / failures)
            else:
                statsd.gauge('smsgateway.success_rate', 1)
Ejemplo n.º 24
0
    def greenlet_queuestats(self):

        interval = min(self.config["orchestrate_interval"], 1 * 60)
        lock_timeout = 5 * 60 + (interval * 2)

        while True:
            lock = Lock(connections.redis, self.redis_queuestats_lock_key,
                           timeout=lock_timeout, thread_local=False, blocking=False)
            with lock:
                lock_expires = time.time() + lock_timeout
                self.queue_etas = defaultdict(lambda: MovingETA(5))

                while True:
                    self.queuestats()

                    # Because queue stats can be expensive, we try to keep the lock on the same agent
                    lock_extend = (time.time() + lock_timeout) - lock_expires
                    lock_expires += lock_extend
                    lock.extend(lock_extend)

                    time.sleep(interval)

            time.sleep(interval)
Ejemplo n.º 25
0
def request(redis, method, url, json_):
    """Request wrapper to handle errors"""
    logging.info("[UTILS REQUEST] method: %s url: %s json: %s", method, url, json_)
    response = requests.request(
        method,
        urljoin(API_CALLBACK, url),
        json=json_,
        timeout=30,
        headers={"Authorization": f"processing-token {PROCESSING_TOKEN}"},
    )
    if 400 <= response.status_code < 500:
        # client error, log and fix if necessary
        logging.error(response.text)
        capture_message(response.text)
    elif response.status_code >= 500:
        # server error, store for retry
        redis.lpush(
            redis_fields.error_retry_queue(),
            json.dumps({"method": method, "url": url, "json": json_}),
        )
        logging.info(
            "[UTILS REQUEST] queueing for retry: method: %s url: %s json: %s",
            method,
            url,
            json_,
        )
        logging.error(response.text)
        capture_message(response.text)
    elif (
        200 <= response.status_code < 300
        and not Lock(redis, redis_fields.error_retry_lock()).locked()
        and redis.llen(redis_fields.error_retry_queue()) > 0
    ):
        # success, retry error queue if populated
        publisher.publish(RETRY_ERROR_TOPIC, encode_pubsub_data({}))

    return response
Ejemplo n.º 26
0
def testRedisNativeLock(db):

    l = Lock(db.redis, 'zelock', timeout=3.0)
    l.acquire()
    l.release()
Ejemplo n.º 27
0
from flask_sqlalchemy import SQLAlchemy
from .alchemy_encoder import AlchemyEncoder
from werkzeug.contrib.cache import RedisCache
from redis.lock import Lock
from flask_bcrypt import Bcrypt
from flask_assets import Environment, Bundle

# set up application
application = Flask(__name__)
application.config.from_envvar('APP_CONFIG')
application.json_encoder = AlchemyEncoder
application.cache = RedisCache(application.config['REDIS_HOST'],
                               application.config['REDIS_PORT'], \
                               default_timeout=application.config['CACHE_TIMEOUT'])
application.cache_lock = Lock(application.cache._client,
                              'mgtrk_worker_lock',
                              timeout=0.2,
                              blocking_timeout=0.2)

# set up authentication
bcrypt = Bcrypt(application)

# bundle js and css code
assets = Environment(application)

js = Bundle('js/core/*', filters='rjsmin', output='js/core/mgtrk-core.min.js')
assets.register('core-js', js)

css = Bundle('css/core/*',
             filters='cssmin',
             output='css/core/mgtrk-core.min.css')
assets.register('core-css', css)
Ejemplo n.º 28
0
 def uri_lock(self, uri):
     with self.__lock:
         key = '{}:l:'.format(self.__key_prefix) + uri
         key = (key[:250]) if len(key) > 250 else key
         return Lock(self._r, key)
Ejemplo n.º 29
0
 def _redis_lock(self) -> Lock:
     if self._memoised_lock is None:
         self._memoised_lock = Lock(self.redis, self._lock_name)
     return self._memoised_lock
Ejemplo n.º 30
0
from headphones2.exceptions import HeadphonesException
from headphones2.external.musicbrainz import get_release_groups_for_artist, get_releases_for_release_group, \
    musicbrainz_lock
from headphones2.orm import Artist, Status, Album, Release, Track, MediaFile
from headphones2.taggers.pipeline import match_album_from_list_of_paths
from headphones2.utils.general import ensure_unicode
from headphones2.utils.structs import FolderResult
from redis.lock import Lock

from .engine import huey
from headphones2 import local_redis
from ..orm import connect

logger = logbook.Logger(__name__)

write_lock = Lock(local_redis, name='sqlite_write')


@huey.task()
def add_artist_task(artist_id):
    with closing(connect()) as session:
        already_working = local_redis.setnx(artist_id, 'in_progress')
        if already_working == 1:
            add_artist_to_db(artist_id, session)
            local_redis.delete(artist_id)
            return True
        else:
            logger.debug('Task for adding artist {} already in progress'.format(artist_id))
            return False