예제 #1
0
파일: redis.py 프로젝트: daevaorn/sentry
    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for host_id in self.cluster.hosts.iterkeys():
                conn = self.cluster.get_local_client(host_id)
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                keycount = 0
                for key in keys:
                    keycount += 1
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
                metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #2
0
    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(self.pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        process_incr.apply_async(kwargs={
                            'key': key,
                        })
                    conn.target([host_id]).zrem(self.pending_key, *keys)
            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #3
0
파일: redis.py 프로젝트: ob3/sentry
    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for host_id in self.cluster.hosts.iterkeys():
                conn = self.cluster.get_local_client(host_id)
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                keycount = 0
                for key in keys:
                    keycount += 1
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
                metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #4
0
파일: base.py 프로젝트: zvrr/sentry
 def incr(self, model, columns, filters, extra=None):
     """
     >>> incr(Group, columns={'times_seen': 1}, filters={'pk': group.pk})
     """
     process_incr.apply_async(
         kwargs={"model": model, "columns": columns, "filters": filters, "extra": extra}
     )
예제 #5
0
파일: redis.py 프로젝트: faulkner/sentry
    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(self.pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        process_incr.apply_async(kwargs={
                            'key': key,
                        })
                    conn.target([host_id]).zrem(self.pending_key, *keys)
            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #6
0
 def incr(self, model, columns, filters, extra=None):
     """
     >>> incr(Group, columns={'times_seen': 1}, filters={'pk': group.pk})
     """
     process_incr.apply_async(kwargs={
         'model': model,
         'columns': columns,
         'filters': filters,
         'extra': extra,
     })
예제 #7
0
 def incr(self, model, columns, filters, extra=None):
     """
     >>> incr(Group, columns={'times_seen': 1}, filters={'pk': group.pk})
     """
     process_incr.apply_async(kwargs={
         'model': model,
         'columns': columns,
         'filters': filters,
         'extra': extra,
     })
예제 #8
0
 def process_pending(self):
     for conn in self.conn.hosts.itervalues():
         keys = conn.zrange(self.pending_key, 0, -1)
         if not keys:
             continue
         for key in keys:
             process_incr.apply_async(kwargs={
                 'key': key,
             })
         conn.zrem(self.pending_key, *keys)
예제 #9
0
파일: redis.py 프로젝트: alexandrul/sentry
    def process_pending(self, partition=None):
        if partition is None and self.pending_partitions > 1:
            # If we're using partitions, this one task fans out into
            # N subtasks instead.
            for i in range(self.pending_partitions):
                process_pending.apply_async(kwargs={'partition': i})
            # Explicitly also run over the unpartitioned buffer as well
            # to ease in transition. In practice, this should just be
            # super fast and is fine to do redundantly.

        pending_key = self._make_pending_key(partition)
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        pending_buffer = PendingBuffer(self.incr_batch_size)

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        pending_buffer.append(key)
                        if pending_buffer.full():
                            process_incr.apply_async(
                                kwargs={
                                    'batch_keys': pending_buffer.flush(),
                                }
                            )
                    conn.target([host_id]).zrem(pending_key, *keys)

            # queue up remainder of pending keys
            if not pending_buffer.empty():
                process_incr.apply_async(kwargs={
                    'batch_keys': pending_buffer.flush(),
                })

            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #10
0
 def incr(self, model, columns, filters, extra=None, signal_only=None):
     """
     >>> incr(Group, columns={'times_seen': 1}, filters={'pk': group.pk})
     signal_only - added to indicate that `process` should only call the complete
     signal handler with the updated model and skip creates/updates in the database. this
     is useful in cases where we need to do additional processing before writing to the
     database and opt to do it in a `buffer_incr_complete` receiver.
     """
     process_incr.apply_async(
         kwargs={
             "model": model,
             "columns": columns,
             "filters": filters,
             "extra": extra,
             "signal_only": signal_only,
         })
    def process_pending(self, partition=None):
        if partition is None and self.pending_partitions > 1:
            # If we're using partitions, this one task fans out into
            # N subtasks instead.
            for i in range(self.pending_partitions):
                process_pending.apply_async(kwargs={'partition': i})
            # Explicitly also run over the unpartitioned buffer as well
            # to ease in transition. In practice, this should just be
            # super fast and is fine to do redundantly.

        pending_key = self._make_pending_key(partition)
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        pending_buffer = PendingBuffer(self.incr_batch_size)

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        pending_buffer.append(key)
                        if pending_buffer.full():
                            process_incr.apply_async(
                                kwargs={
                                    'batch_keys': pending_buffer.flush(),
                                })
                    conn.target([host_id]).zrem(pending_key, *keys)

            # queue up remainder of pending keys
            if not pending_buffer.empty():
                process_incr.apply_async(kwargs={
                    'batch_keys': pending_buffer.flush(),
                })

            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
예제 #12
0
파일: redis.py 프로젝트: jaysoffian/sentry
    def process_pending(self):
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not self.conn.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for conn in self.conn.hosts.itervalues():
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                for key in keys:
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
        finally:
            self.conn.delete(lock_key)
예제 #13
0
    def process_pending(self):
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not self.conn.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for conn in self.conn.hosts.itervalues():
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                for key in keys:
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
        finally:
            self.conn.delete(lock_key)