Beispiel #1
0
class SendMessageToQueueCommand(LuaRedisCommand):
    # KEYS[1] = queue key
    # ARGV[1] = expiry
    # ARGV[2] = queue capacity
    # ARGV[3] = message
    _script = """
if redis.call('llen', KEYS[1]) >= tonumber(ARGV[2]) then
    return redis.error_reply("queue full")
end
redis.call('rpush', KEYS[1], ARGV[3])
redis.call('expire', KEYS[1], ARGV[1])
"""

    def __call__(
            self,
            queue_key,  # type: six.text_type
            message,  # type: six.binary_type
            expiry,  # type: int
            capacity,  # type: int
            connection,  # type: redis.StrictRedis
    ):
        # type: (...) -> None
        self._call(keys=[queue_key],
                   args=[expiry, capacity, message],
                   connection=connection)
_create_status = create_status
# <start id="post-status-lua"/>
def create_status(conn, uid, message, **data):          #H
    args = [                                            #I
        'message', message,                             #I
        'posted', time.time(),                          #I
        'uid', uid,                                     #I
    ]
    for key, value in data.iteritems():                 #I
        args.append(key)                                #I
        args.append(value)                              #I

    return create_status_lua(                           #J
        conn, ['user:%s' % uid, 'status:id:'], args)    #J

create_status_lua = script_load('''
local login = redis.call('hget', KEYS[1], 'login')      --A
if not login then                                       --B
    return false                                        --B
end
local id = redis.call('incr', KEYS[2])                  --C
local key = string.format('status:%s', id)              --D

redis.call('hmset', key,                                --E
    'login', login,                                     --E
    'id', id,                                           --E
    unpack(ARGV))                                       --E
redis.call('hincrby', KEYS[1], 'posts', 1)              --F

return id                                               --G
''')
Beispiel #3
0
        self.mark_done(keys=[item['ident']],
                       args=[EXPIRE_TIME, LOG_CHANNEL,
                             int(time.time())])


# ------------------------------------------------------------------------------

redis_url = urlparse(REDIS_URL)
redis_db = int(redis_url.path[1:])
r = redis.StrictRedis(host=redis_url.hostname,
                      port=redis_url.port,
                      db=redis_db)

# ------------------------------------------------------------------------------

MARK_DONE = '''
local ident = KEYS[1]
local expire_time = ARGV[1]
local log_channel = ARGV[2]
local finished_at = ARGV[3]

redis.call('hmset', ident, 'finished_at', finished_at)
redis.call('lrem', 'working', 1, ident)

local was_aborted = redis.call('hget', ident, 'aborted')

-- If the job was aborted, we ignore the given expire time.  Instead, we set a
-- much shorter expire time -- one that's long enough for (most) subscribers
-- to read a message, but short enough to not cause undue suffering in the
-- case of retrying an aborted job.
if was_aborted then
    f = open(config_file)
    config = yaml.safe_load(f)
    f.close()

    config['event_ttl'] = 86400
    config['retry_timeout'] = 5  # time to treat servers as down
    config['redis_connect_timeout'] = 1
    config['redis_timeout'] = 1

    return config


app = Flask(__name__)
app.config.update(load_config(script_args.config_file))

enqueue_script = """
local kEvents = unpack(KEYS)
local curTime = 1*ARGV[1]
local purgeTTL = 1*ARGV[2]
local channel = ARGV[3]
-- Append the events (the remaining arguments) to the set
local pushed = 0
for i = 4,#ARGV,1 do
local event = ARGV[i]
-- Prepend the timestamp to allow duplicate events
redis.call('zAdd',kEvents,curTime,curTime .. ':' .. event)
-- Also publish the event for the fast common case
redis.call('publish',channel,curTime .. ':' .. event)
    pushed = pushed + 1
end
-- Prune out any stale events
Beispiel #5
0
__author__ = 'chiradip'
import redis
import time
import register

lua = '''
local ownuid = redis.call('GET', "username:"******":uid")
local issuid = redis.call('GET', "email:" ..KEYS[2].. ":uid")
if not issuid then
    issuid = redis.call('INCR', "global:getNextUserId")
    redis.call('SET', "email:" ..KEYS[2].. ":uid", issuid)
    redis.call('SET', "uid:" ..issuid.. ":email", KEYS[2])
end
redis.call('ZADD', "owner:"  ..ownuid..":docs", ARGV[1], ARGV[2])
redis.call('ZADD', "issuer:"  ..issuid..":docs", ARGV[1], ARGV[2])
redis.call('HMSET', "doc:"..ARGV[2], "owner.uid", ownuid , "issuer.uid", issuid)
print(KEYS[1] .."|".. KEYS[2] .."|".. ARGV[1] .."|".. ARGV[2])
'''
r_server = redis.Redis("localhost")
r_up = r_server.register_script(lua)

def update_redis(owner,issuer,url, dname):
  ts = time.time()
  password = "******"
  rusername  = owner.split("@")[0]
  iusername = issuer.split("@")[0]
  idomain = issuer.split("@")[1]
  print("Before calling LUA that updates document entry for the user")
  print("Owner's email: ", owner)
  print("Issuer's email: ", issuer)
  print("Owner's user name: ", rusername)
Beispiel #6
0
def test_lua_with_redis_call():
    r = fresh_redis()

    assert r.eval("""\
redis.call('set', KEYS[1], KEYS[2])
return redis.call('get', KEYS[1])""", 2, "testkey", "testvalue") == "testvalue"
import redis

from datetime import datetime
from threading import Timer

# redis conn
r = redis.Redis(host='127.0.0.1', port=6379, db=0)

# vars
N = 50
KEY = 'count:sc'
MEMBERS = list(range(1, N + 1))
print('total members:', N)

# register lua script
lua_script = """
redis.call('SETNX', KEYS[1], 0)
local value = redis.call('GET', KEYS[1])
value = tonumber(value)
local new_value = value + 1
redis.call('SET', KEYS[1], new_value)
return new_value
"""
increment = r.register_script(lua_script)


def incr_count():
    print(datetime.now().ctime(), 'incr global count')
    increment(keys=[KEY], args=[])

import websocket
import json
import redis
from ledgerx_api import get_contracts
# redistimeseries docs: https://github.com/RedisTimeSeries/redistimeseries-py
# websocket-client docs: https://github.com/websocket-client/websocket-client

redis_host = 'localhost'
r = redis.Redis(host=redis_host, decode_responses=True)

# Lua script to do a conditional publish/store to TS (only clock > prev_clock)
lua_cond_pub = """
redis.call('SETNX', KEYS[1], 0)
local prev_clock = redis.call('GETSET', KEYS[1], ARGV[1])
local clock = tonumber(ARGV[1])
prev_clock = tonumber(prev_clock)
if(clock > prev_clock) then
    redis.call('PUBLISH', ARGV[2], ARGV[5])
    redis.call('TS.ADD', KEYS[2], '*', ARGV[3], 'ON_DUPLICATE', 'LAST')
    redis.call('TS.ADD', KEYS[3], '*', ARGV[4], 'ON_DUPLICATE', 'LAST')
    return 1
else
    return 0
end
"""
cond_pub = r.register_script(lua_cond_pub)


def on_message(ws, raw_message):
    message = json.loads(raw_message)
    if message['type'] == 'heartbeat':
# coding: utf8

__all__ = ["RedisTokenBucketRateLimiter"]
__authors__ = ["Tim Chow"]

import time
import math

import redis

from .interface import RateLimiter

SCRIPT = """
local tokens_key = KEYS[1]
local timestamp_key = KEYS[2]

local rate = tonumber(ARGV[1])
local capacity = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local requested = tonumber(ARGV[4])

local fill_time = capacity/rate
local ttl = math.floor(fill_time*2)

local last_tokens = tonumber(redis.call("get", tokens_key))
if last_tokens == nil then
  last_tokens = capacity
end

local last_refreshed = tonumber(redis.call("get", timestamp_key))
if last_refreshed == nil then
my_server = redis.StrictRedis(unix_socket_path=REDLOC2)

# -- Get logins from the last 10 minutes
lualatestlogins = """
local l = redis.call('ZRANGEBYSCORE', KEYS[1], ARGV[1]-600, '+inf') -- returns key:value pairs
local c = {}
for _, v in pairs(l) do
  local p = v:find(':')
  local i = v:sub(1,p-1)
  c[#c+1] = i
end
return c"""
getlatestlogins = my_server.register_script(lualatestlogins)

# -- "Evict" old logins
luacleanselogins = """
redis.call('ZREMRANGEBYSCORE', KEYS[1], '-inf', '(' .. ARGV[1]-600)"""
cleanselogins = my_server.register_script(luacleanselogins)

# -- Store the new login
luastorelogin = """
redis.call('ZADD', KEYS[1], ARGV[1], ARGV[2] .. ':' .. ARGV[3])
redis.call('SET',KEYS[2],ARGV[3])
redis.call('EXPIRE',KEYS[2],600)"""
storelogin = my_server.register_script(luastorelogin)

# -- retrieve collisions in IPs
luaretrieveclones = """
local q = redis.call('GET',KEYS[2]) -- gives ip associated to user ID
if q == nil or q == '' then
  return nil               -- the key holding user_ID's ip has expired, hence return nothing
Beispiel #11
0
class MarkItemAsDone(SimpleTask):
    def __init__(self, redis, mark_done_script):
        SimpleTask.__init__(self, 'MarkItemAsDone')
        self.redis = redis
        self.mark_done = self.redis.register_script(mark_done_script)

    def process(self, item):
        self.mark_done(keys=[item['ident']], args=[EXPIRE_TIME, LOG_CHANNEL,
            int(time.time()), json.dumps(item['info']), item['log_key']])

# ------------------------------------------------------------------------------
# REDIS SCRIPTS
# ------------------------------------------------------------------------------

MARK_DONE = '''
local ident = KEYS[1]
local expire_time = ARGV[1]
local log_channel = ARGV[2]
local finished_at = ARGV[3]
local info = ARGV[4]
local log_key = ARGV[5]

redis.call('hmset', ident, 'finished_at', finished_at)
redis.call('lrem', 'working', 1, ident)

local was_aborted = redis.call('hget', ident, 'aborted')

-- If the job was aborted, we ignore the given expire time.  Instead, we set a
-- much shorter expire time -- one that's long enough for (most) subscribers
-- to read a message, but short enough to not cause undue suffering in the
Beispiel #12
0
import redis
import json
import uuid
import datetime
import click

r = redis.StrictRedis(host='redis')

queues = ['created', 'claimed', 'finished', 'failed']

put_job_lua_script = """
local job_id = KEYS[1]
local new_queue = KEYS[2]
local old_queue = KEYS[3]
local job = ARGV[1]

local function merge(t1, t2)
    for k, v in pairs(t2) do
        if (type(v) == "table") and (type(t1[k] or false) == "table") then
            merge(t1[k], t2[k])
        else t1[k] = v end
    end
    return t1
end

if job_id == "" then
    job_id = redis.call('RPOP', old_queue)
    if not job_id then return nil end

    -- merge with given job ..
    local job_from_redis = redis.call('JSON.GET', job_id, '.')
Beispiel #13
0
            # defer other index lookups to the query object
            query = cls.query.filter(**{attr: value})
            if _limit:
                query = query.limit(*_limit)
            return query.all()

    @ClassProperty
    def query(cls):
        '''
        Returns a ``Query`` object that refers to this model to handle
        subsequent filtering.
        '''
        return Query(cls)

_redis_writer_lua = _script_load('''
local namespace = ARGV[1]
local id = ARGV[2]

-- check and update unique column constraints
for i, write in ipairs({false, true}) do
    for col, value in pairs(cjson.decode(ARGV[3])) do
        local key = string.format('%s:%s:uidx', namespace, col)
        if write then
            redis.call('HSET', key, value, id)
        else
            local known = redis.call('HGET', key, value)
            if known ~= id and known ~= false then
                return col
            end
        end
Beispiel #14
0
import redis
import uuid
import socket
import struct
import random
import time


def int2ip(addr):
    return socket.inet_ntoa(struct.pack("!I", addr))


script_add = '''
redis.call('HSET', KEYS[1], 'client_id', ARGV[1], 'ip', ARGV[2], 'time_1', ARGV[3])
redis.call('RPUSH', KEYS[3], KEYS[1])
redis.call('RPUSH', KEYS[2], KEYS[1])
'''


def execute(host, port, barrier, end, sleep_time):
    print('Starting process type 1')
    r = redis.Redis(host='localhost', port=6379, db=0)
    add_request = r.register_script(script_add)
    barrier.wait()

    while not end.value:
        request_id = str(uuid.uuid4())
        client_id = str(uuid.uuid4())
        ip = int2ip(random.randrange(0, 2**32 - 1))
        add_request(keys=[f'request-{request_id}', 'queue_2', 'queue_3'],
                    args=[client_id, ip, time.time()])
Beispiel #15
0
lua_add_food = r.script_load(add_food_lua_script)

"""
ARGV[1] str        cart_id
ARGV[2] int        user_id
ARGV[3] str        order_id

ret:
0           200
1           401
2           404 篮子不存在
3           403 每个用户只能下一单
4           403 食物库存不足
"""

create_order_lua_script = """
local cart = redis.call("LRANGE",ARGV[1],0,3)
local cart_size = #cart
if cart_size == 0 then
    return {2}
end
if not (ARGV[2] == cart[1]) then
    return {1}
end
if redis.call("SISMEMBER", "ordered_user", ARGV[2]) == 1 then
    return {3}
end
for i=2, cart_size do
    if redis.call("DECR", cart[i]) < 0 then
        return {4}
    end
Beispiel #16
0
                          concurrency=data[2],
                          age=data[3],
                          abort_requested=data[4],
                          suppress_ignore_reports=data[5])

            if data[6]:
                result['ignore_patterns'] = self.redis.smembers(data[6])
            else:
                result['ignore_patterns'] = []

            return result


# ------------------------------------------------------------------------------

MARK_DONE_SCRIPT = '''
local ident = KEYS[1]
local expire_time = ARGV[1]
local log_channel = ARGV[2]
local finished_at = ARGV[3]
local info = ARGV[4]
local log_key = ARGV[5]

redis.call('hmset', ident, 'finished_at', finished_at)
redis.call('lrem', 'working', 1, ident)

local was_aborted = redis.call('hget', ident, 'aborted')

-- If the job was aborted, we ignore the given expire time.  Instead, we set a
-- much shorter expire time -- one that's long enough for (most) subscribers
-- to read a message, but short enough to not cause undue suffering in the
Beispiel #17
0
            # defer other index lookups to the query object
            query = cls.query.filter(**{attr: value})
            if _limit:
                query = query.limit(*_limit)
            return query.all()

    @ClassProperty
    def query(cls):
        '''
        Returns a ``Query`` object that refers to this model to handle
        subsequent filtering.
        '''
        return Query(cls)

_redis_writer_lua = _script_load('''
local namespace = ARGV[1]
local id = ARGV[2]
local is_delete = cjson.decode(ARGV[11])

-- check and update unique column constraints
for i, write in ipairs({false, true}) do
    for col, value in pairs(cjson.decode(ARGV[3])) do
        local key = string.format('%s:%s:uidx', namespace, col)
        if write then
            redis.call('HSET', key, value, id)
        else
            local known = redis.call('HGET', key, value)
            if known ~= id and known ~= false then
                return col
            end
Beispiel #18
0
__author__ = 'chiradip'
import redis
import time
import register

lua = '''
local ownuid = redis.call('GET', "username:"******":uid")
local issuid = redis.call('GET', "email:" ..KEYS[2].. ":uid")
if not issuid then
    issuid = redis.call('INCR', "global:getNextUserId")
    redis.call('SET', "email:" ..KEYS[2].. ":uid", issuid)
    redis.call('SET', "uid:" ..issuid.. ":email", KEYS[2])
end
redis.call('ZADD', "owner:"  ..ownuid..":docs", ARGV[1], ARGV[2])
redis.call('ZADD', "issuer:"  ..issuid..":docs", ARGV[1], ARGV[2])
redis.call('HMSET', "doc:"..ARGV[2], "owner.uid", ownuid , "issuer.uid", issuid)
print(KEYS[1] .."|".. KEYS[2] .."|".. ARGV[1] .."|".. ARGV[2])
'''
r_server = redis.Redis("localhost")
r_up = r_server.register_script(lua)


def update_redis(owner, issuer, url, dname):
    ts = time.time()
    password = "******"
    rusername = owner.split("@")[0]
    iusername = issuer.split("@")[0]
    idomain = issuer.split("@")[1]
    print("Before calling LUA that updates document entry for the user")
    print("Owner's email: ", owner)
    print("Issuer's email: ", issuer)
Beispiel #19
0
import time

redis_url = os.getenv("REDIS_URL") or os.getenv("REDISCLOUD_PRODUCTION_URL")
if not redis_url:
    print "You need a REDIS_URL env variable.  Get the value at"
    print "https://github.com/getlantern/too-many-secrets/blob/master/lantern_aws/config_server.yaml#L2"
    sys.exit(1)
rs = redis.from_url(redis_url)

# KEYS[1]: '<region>:srvq'
# KEYS[2]: '<region>:bakedin'
# KEYS[3]: '<region>:bakedin-names'
# KEYS[4]: 'srvcount'
# KEYS[5]: '<region>:srvreqq'
# ARGV[1]: unix timestamp in seconds
luasrc = """
local cfg = redis.call("rpop", KEYS[1])
if not cfg then
    return "<no-servers-in-srvq>"
end
redis.call("lpush", KEYS[2], ARGV[1] .. "|" .. cfg)
local begin = string.find(cfg, "|")
local end_ = string.find(cfg, "|", begin + 1)
local name = string.sub(cfg, begin+1, end_-1)
redis.call("sadd", KEYS[3], name)
local serial = redis.call("incr", KEYS[4])
redis.call("lpush", KEYS[5], serial)
return cfg
"""

script = rs.register_script(luasrc)
Beispiel #20
0
        pipeline.lrem(REPORTING_QUEUE, 0, reporting_database_key)
    if push_to_twitter:
        pipeline.lpush(TWITTER_QUEUE, push_to_twitter)
    if push_to_slack:
        pipeline.lpush(SLACK_QUEUE, push_to_slack)
    pipeline.execute()


# This is a small LUA script to merge all of the items in the slack queue into one item. It's done as a LUA script
# instead of python code in order to make it a single operation.
# The first key should be the queue to merge
# The first argument should be the separator to use when merging.
# TODO: should we have a limit for how many messages we merge together / the length of the message?
_slack_merge_script = redis.client.Script(None, """
local elements = redis.call('lrange', KEYS[1], 0, -1) -- Get all elements
local merged = table.concat(elements, ARGV[1]) -- Merge
redis.call('del', KEYS[1]) -- Delete all values
redis.call('lpush', KEYS[1], merged) -- Insert the merged element
""")


def merge_slack_queue():
    """
    Merges all current messages in the slack queue into one message.
    """
    _slack_merge_script(
        keys=[SLACK_QUEUE],
        args=['\n\n'],
        client=redis_data.get_connection()
    )

Beispiel #21
0
                    concurrency=data[2],
                    age=data[3],
                    abort_requested=data[4],
                    suppress_ignore_reports=data[5]
                    )

            if data[6]:
                result['ignore_patterns'] = self.redis.smembers(data[6])
            else:
                result['ignore_patterns'] = []

            return result

# ------------------------------------------------------------------------------

MARK_DONE_SCRIPT = '''
local ident = KEYS[1]
local expire_time = ARGV[1]
local log_channel = ARGV[2]
local finished_at = ARGV[3]
local info = ARGV[4]
local log_key = ARGV[5]

redis.call('hmset', ident, 'finished_at', finished_at)
redis.call('lrem', 'working', 1, ident)

local was_aborted = redis.call('hget', ident, 'aborted')

-- If the job was aborted, we ignore the given expire time.  Instead, we set a
-- much shorter expire time -- one that's long enough for (most) subscribers
-- to read a message, but short enough to not cause undue suffering in the

e_enter_script = """
local release_name = ARGV[1]
local waiting_queue = ARGV[2]
local window_holder = ARGV[3]
local timeout = ARGV[4]
if redis.call('setnx', window_holder, release_name) == 1 then
    redis.call('expire', window_holder, timeout)
    return true
end
redis.pcall('rpush', waiting_queue, release_name)
return false
"""

e_exit_script = """
local release_name = ARGV[1]
local waiting_queue = ARGV[2]
local window_holder = ARGV[3]
if redis.call('get', window_holder) ~= release_name then
    return
end
redis.call('del', window_holder)
local waiting_release_names = redis.call('lrange', waiting_queue, 0, -1)
redis.call('del', waiting_queue)
return waiting_release_names
"""


# noinspection PyProtectedMember pylint: disable=W0212
class ExclusionWindow(object):
Beispiel #23
0
def test_commands_should_be_case_insensitive_inside_lua():
    r = fresh_redis()

    assert r.eval("""\
redis.call('SeT', KEYS[1], KEYS[2])
return redis.call('Get', KEYS[1])""", 2, "testkey", "testvalue") == "testvalue"
Beispiel #24
0
# owner_key  = owner:{owner}
#
# grant      = {mode}:{owner}
# access     = {mode}:{name}
#
# (2) Addtional data structure for deadlock detect wait-for graph
#
# SET:  waitor -> set of waitee
#
# waitor_key = wait:{owner}
# waitee     = {owner}

# atomic:
# - checking if any conflicting locks granted
# - adding lock if no confliction
_LOCK_SCRIPT = """\
local rsrc_key = KEYS[1]
local lock_key = KEYS[2]
local owner_key = KEYS[3]
local name = string.match(lock_key, 'lock:(.+):[RW]:.+')
local mode = string.match(lock_key, 'lock:.+:([RW]):.+')
local owner = string.match(lock_key, 'lock:.+:[RW]:(.+)')
local grants = redis.call('smembers', rsrc_key)
for i, grant in ipairs(grants) do
	local grant_mode = string.match(grant, '([RW]):.+')
	local grant_owner = string.match(grant, '[RW]:(.+)')
	if grant_owner ~= owner then
		if not (grant_mode == 'R' and mode == 'R') then
			return 'false'
		end
	end
Beispiel #25
0
 def __mai_datum__(self):
     return szul_dat = redis.call('TIME')
Beispiel #26
0
#!/usr/bin/env python
# coding=utf-8
import os
import logging
import time

import redis

REDIS_CONFIG = {
    'host': os.environ.get('REDIS_HOST', '127.0.0.1'),
    'port': int(os.environ.get('REDIS_PORT', 6379)),
    'key': b'honorlist'
}

LUA_SCRIPT = """
local key = KEYS[1]
local data = redis.call('HGET', key, 'To')
redis.call('ZINCRBY', 'honorlist', 1, data)
redis.call('DEL', key)
"""


def gen_honor_list():
    rc = redis.StrictRedis(host=REDIS_CONFIG['host'],
                           port=REDIS_CONFIG['port'])
    for k in rc.scan_iter():
        if k != REDIS_CONFIG['key']:
            try:
                rc.eval(LUA_SCRIPT, 1, k)
            except Exception as e:
                logging.error('key: {}, msg: {}'.format(k, e))
Beispiel #27
0
"""
Redis pipelines with asynchronous I/O.
"""
import redis, sha, time

# Lua script for tracking a click.
# TRACK key
#   1  now_ts
#   2  click_ts
#   3  click_url
TRACK = """local key = KEYS[1]
local now_ts = tonumber(ARGV[1])
local click_ts = tonumber(ARGV[2])
local click_url = ARGV[3]

local total_field = 'total_clicks'
local last_ts_field = 'last_click_ts'
local last_url_field = 'last_click_url'

local last_ts = tonumber(redis.call('HGET', key, last_ts_field))

-- Set the last click timestamp and URL if the click is new.
if last_ts == nil or click_ts > last_ts then
    last_ts = click_ts
    redis.call('HSET', key, last_ts_field, click_ts)
    redis.call('HSET', key, last_url_field, click_url)
end

-- Only hold onto hashes for a week after last click.
redis.call('EXPIREAT', key, last_ts + 604800)
Beispiel #28
0
try:
    import redis
    from redis.exceptions import NoScriptError
except ImportError:
    from throttle.exceptions import ThrottleImproperlyConfigured
    raise ThrottleImproperlyConfigured(
        "django-throttle-requests is configured to use redis, but redis-py is not installed!"
    )

# Lua script to update bucket data atomically.
# In general, lua scripts should be used instead of Redis transactions to ensure atomicity. Transactions may be
# deprecated at some point. Also, nutcracker does not support transactions but does support scripting
# as long as all keys used by the script hash to the same backend. The same limitation applies to Redis Cluster.
#
# Script takes 1 key and 4 arguments: <bucket_num>, <bucket_num_next>, <bucket_span>, <cost>
INCR_BUCKET_SCRIPT = """
local newval = redis.call('hincrby', KEYS[1], ARGV[1], ARGV[4])
redis.call('hdel', KEYS[1], ARGV[2])
redis.call('expire', KEYS[1], ARGV[3])
return newval
"""

INCR_BUCKET_SCRIPT_SHA1 = hashlib.sha1(
    INCR_BUCKET_SCRIPT.encode('utf-8')).hexdigest()


class RedisBackend(ThrottleBackendBase):
    def __init__(self):
        self.pool = redis.ConnectionPool(
            host='localhost', port=6379,
            db=0)  #TODO: Parameterize connection parameters
Beispiel #29
0
    'Oman', 'Pakistan', 'Palau', 'Panama', 'Papua New Guinea', 'Paraguay',
    'Peru', 'Philippines', 'Poland', 'Portugal', 'Qatar', 'Romania',
    'Russian Federation', 'Rwanda', 'St Kitts & Nevis', 'St Lucia',
    'Saint Vincent & the Grenadines', 'Samoa', 'San Marino',
    'Sao Tome & Principe', 'Saudi Arabia', 'Senegal', 'Serbia', 'Seychelles',
    'Sierra Leone', 'Singapore', 'Slovakia', 'Slovenia', 'Solomon Islands',
    'Somalia', 'South Africa', 'South Sudan', 'Spain', 'Sri Lanka', 'Sudan',
    'Suriname', 'Swaziland', 'Sweden', 'Switzerland', 'Syria', 'Taiwan',
    'Tajikistan', 'Tanzania', 'Thailand', 'Togo', 'Tonga', 'Trinidad & Tobago',
    'Tunisia', 'Turkey', 'Turkmenistan', 'Tuvalu', 'Uganda', 'Ukraine',
    'United Arab Emirates', 'United Kingdom', 'United States', 'Uruguay',
    'Uzbekistan', 'Vanuatu', 'Vatican City', 'Venezuela', 'Vietnam', 'Yemen',
    'Zambia', 'Zimbabwe'
]

script_process = '''
redis.call('HSET', KEYS[1], 'country', ARGV[1], 'time_2', ARGV[2])
redis.call('RPUSH', KEYS[2], ARGV[3])
'''


def execute(host, port, barrier, end):
    print('Starting process type 2')
    r = redis.Redis(host='localhost', port=6379, db=0)
    process_data = r.register_script(script_process)
    barrier.wait()

    while not end.value:
        res = r.blpop('queue_2', 0.1)
        if not res:
            continue
Beispiel #30
0
ERROR_TIMEOUT = 'error-timeout'


SCRIPT_CONSTANTS = {'f_args': FIELD_ARGS,
                    'f_run_log': FIELD_RUN_LOG,
                    'f_progress': FIELD_PROGRESS,
                    'f_queue_id': FIELD_QUEUE_KEY,
                    'f_status': FIELD_STATUS,
                    'f_userdata': FIELD_USERDATA,
                    'f_time_created': FIELD_TIME_CREATED,
                    'f_time_updated': FIELD_TIME_UPDATED,
                    'status_done': STATUS_DONE,
                    'status_failed': STATUS_FAILED,
                    'status_pending': STATUS_PENDING}

SCRIPT_NEW = """
local data_key, queue_key = KEYS[1], KEYS[2]
local args, userdata, time_created = ARGV[1], ARGV[2], ARGV[3]
local rv = redis.call('hsetnx', data_key,
                      '%(f_status)s', '%(status_pending)s')
if rv == 0 then
    return 0
end
redis.call('hmset', data_key,
           '%(f_queue_id)s', queue_key,
           '%(f_args)s', args,
           '%(f_run_log)s', '',
           '%(f_progress)s', '',
           '%(f_userdata)s', userdata,
           '%(f_time_updated)s', time_created,
           '%(f_time_created)s', time_created)
Beispiel #31
0
        'message',
        message,  #I
        'posted',
        time.time(),  #I
        'uid',
        uid,  #I
    ]
    for key, value in data.iteritems():  #I
        args.append(key)  #I
        args.append(value)  #I

    return create_status_lua(  #J
        conn, ['user:%s' % uid, 'status:id:'], args)  #J


create_status_lua = script_load('''
local login = redis.call('hget', KEYS[1], 'login')      --A
if not login then                                       --B
    return false                                        --B
end
local id = redis.call('incr', KEYS[2])                  --C
local key = string.format('status:%s', id)              --D

redis.call('hmset', key,                                --E
    'login', login,                                     --E
    'id', id,                                           --E
    unpack(ARGV))                                       --E
redis.call('hincrby', KEYS[1], 'posts', 1)              --F

return id                                               --G
''')
Beispiel #32
0
from redorm.settings import REDORM_URL

GET_SET_INDIRECT = """
local l = {}
local keys = redis.call('smembers', KEYS[2])
for _,k in ipairs(keys) do
    table.insert(l, redis.call('get', KEYS[1] .. k))
end
return l
"""

GET_KEY_INDIRECT = """
return {redis.call('get', redis.call('get', KEYS[1]))}
"""

UNIQUE_SAVE = """
local uniquecnt = ARGV[1]
local uniquenullcnt = ARGV[2]
local indexcnt = ARGV[3]
local indexnullcnt = ARGV[4]
local data = ARGV[5]
local uuid = ARGV[6]
local clsname = ARGV[7]

local beginunique = 8
local endofunique = beginunique+(uniquecnt*3)-1
-- Check uniqueness constraints, triples of field name, old value, new value
for i=beginunique,endofunique,3 do
     if redis.call('hexists', clsname .. ':key:' .. ARGV[i], ARGV[i+2]) == 1 then
         return redis.error_reply('Unique Violation: ' .. ARGV[i])
     end
from throttle.backends.base import ThrottleBackendBase
try:
    import redis
    from redis.exceptions import NoScriptError
except ImportError:
    from throttle.exceptions import ThrottleImproperlyConfigured
    raise ThrottleImproperlyConfigured("django-throttle-requests is configured to use redis, but redis-py is not installed!")

# Lua script to update bucket data atomically.
# In general, lua scripts should be used instead of Redis transactions to ensure atomicity. Transactions may ne
# deprecated at some point. Also, nutcracker/twemproxy does not support transactions but does support scripting
# as long as all keys used by the script hash to the same backend.
#
# Script takes 1 key and 4 arguments: <bucket_num>, <bucket_num_next>, <bucket_span>, <cost>
INCR_BUCKET_SCRIPT = """
local newval = redis.call('hincrby', KEYS[1], ARGV[1], ARGV[4])
redis.call('hdel', KEYS[1], ARGV[2])
redis.call('expire', KEYS[1], ARGV[3])
return newval
"""

INCR_BUCKET_SCRIPT_SHA1 = hashlib.sha1(INCR_BUCKET_SCRIPT.encode('utf-8')).hexdigest()

class RedisBackend(ThrottleBackendBase):
    def __init__(self):
        self.pool = redis.ConnectionPool(host='localhost', port=6379, db=0)

    def incr_bucket(self, zone_name, bucket_key, bucket_num, bucket_num_next, bucket_span, cost=1):
        conn = redis.Redis(connection_pool=self.pool)
Beispiel #34
0
class LockingQueue(BaseStolosRedis, BaseLockingQueue):

    _EXTEND_LOCK_SCRIPT_NAME = 'lq_extend_lock'
    # Lua scripts that are sent to redis
    # keys:
    # h_k = ordered hash of key in form:  priority:insert_time_since_epoch:key
    # Q = sorted set of queued keys, h_k
    # Qi = sorted mapping (h_k -> key) for all known queued or completed items
    #
    # args:
    # expireat = seconds_since_epoch, presumably in the future
    # client_id = unique owner of the lock
    # randint = a random integer that changes every time script is called
    SCRIPTS = dict(

        # returns 1
        lq_put=dict(keys=('Q', 'h_k'), args=(), script="""
redis.call("ZINCRBY", KEYS[1], 0, KEYS[2])
return 1
"""),

        # returns 1 if got an item, and returns an error otherwise
        lq_get=dict(keys=('Q', ), args=('client_id', 'expireat'), script="""
local h_k = redis.call("ZRANGE", KEYS[1], 0, 0)[1]
if nil == h_k then return {err="queue empty"} end
if false == redis.call("SET", h_k, ARGV[1], "NX") then
return {err="already locked"} end
if 1 ~= redis.call("EXPIREAT", h_k, ARGV[2]) then
return {err="invalid expireat"} end
redis.call("ZINCRBY", KEYS[1], 1, h_k)
return h_k
"""),

        # returns 1 if got lock. Returns an error otherwise
        lq_lock=dict(
            keys=('h_k', 'Q'), args=('expireat', 'randint', 'client_id'),
            script="""
if false == redis.call("SET", KEYS[1], ARGV[3], "NX") then  -- did not get lock
local rv = redis.call("GET", KEYS[1])
if rv == "completed" then
    redis.call("ZREM", KEYS[2], KEYS[1])
    return {err="already completed"}
elseif rv == ARGV[3] then
    if 1 ~= redis.call("EXPIREAT", KEYS[1], ARGV[1]) then
    return {err="invalid expireat"} end
    return 1
else
    local score = tonumber(redis.call("ZSCORE", KEYS[2], KEYS[1]))
    math.randomseed(tonumber(ARGV[2]))
    local num = math.random(math.floor(score) + 1)
    if num ~= 1 then
    redis.call("ZINCRBY", KEYS[2], (num-1)/score, KEYS[1])
    end
    return {err="already locked"}
end
else
if 1 ~= redis.call("EXPIREAT", KEYS[1], ARGV[1]) then
    return {err="invalid expireat"} end
redis.call("ZINCRBY", KEYS[2], 1, KEYS[1])
return 1
end
"""),

        # return 1 if extended lock.  Returns an error otherwise.
        # otherwise
        lq_extend_lock=dict(
            keys=('h_k', ), args=('client_id', 'expireat'), script="""
local rv = redis.call("GET", KEYS[1])
if ARGV[1] == rv then
    if 1 ~= redis.call("EXPIREAT", KEYS[1], ARGV[2]) then
    return {err="invalid expireat"} end
    return 1
elseif "completed" == rv then return {err="already completed"}
elseif false == rv then return {err="expired"}
else return {err="lock stolen"} end
"""),

        # returns 1 if removed, 0 if key was already removed.
        lq_consume=dict(
            keys=('h_k', 'Q', 'Qi'), args=('client_id', ), script="""
local rv = redis.pcall("GET", KEYS[1])
if ARGV[1] == rv or "completed" == rv then
redis.call("SET", KEYS[1], "completed")
redis.call("PERSIST", KEYS[1])  -- or EXPIRE far into the future...
redis.call("ZREM", KEYS[2], KEYS[1])
if "completed" ~= rv then redis.call("INCR", KEYS[3]) end
return 1
else return 0 end
"""),

        # returns nil.  markes job completed
        lq_completed=dict(
            keys=('h_k', 'Q', 'Qi'), args=(), script="""
if "completed" ~= redis.call("GET", KEYS[1]) then
redis.call("INCR", KEYS[3])
redis.call("SET", KEYS[1], "completed")
redis.call("PERSIST", KEYS[1])  -- or EXPIRE far into the future...
redis.call("ZREM", KEYS[2], KEYS[1])
end
"""),

        # returns 1 if removed, 0 otherwise
        lq_unlock=dict(
            keys=('h_k', ), args=('client_id', ), script="""
if ARGV[1] == redis.call("GET", KEYS[1]) then
    return redis.call("DEL", KEYS[1])
else return 0 end
"""),

        # returns number of items {(queued + taken), completed}
        # O(log(n))
        lq_qsize_fast=dict(
            keys=('Q', 'Qi'), args=(), script="""
return {redis.call("ZCARD", KEYS[1]), redis.call("INCRBY", KEYS[2], 0)}"""),

        # returns number of items {in_queue, taken, completed}
        # O(n)  -- eek!
        lq_qsize_slow=dict(
            keys=('Q', 'Qi'), args=(), script="""
local taken = 0
local queued = 0
for _,k in ipairs(redis.call("ZRANGE", KEYS[1], 0, -1)) do
local v = redis.call("GET", k)
if "completed" ~= v then
    if v then taken = taken + 1
    else queued = queued + 1 end
end
end
return {queued, taken, redis.call("INCRBY", KEYS[2], 0)}
"""),

        # returns whether an item is in queue or currently being processed.
        # returns boolean tuple of form:  (is_taken, is_queued, is_completed)
        # O(1)
        lq_is_queued_h_k=dict(
            keys=('Q', 'h_k'), args=(), script="""
local taken = redis.call("GET", KEYS[2])
if "completed" == taken then
return {false, false, true}
elseif taken then return {true, false, false}
else return {false, false ~= redis.call("ZSCORE", KEYS[1], KEYS[2]), false} end
"""),

        # returns whether an item is in queue or currently being processed.
        # raises an error if already completed.
        # O(N * strlen(item)) -- eek!
        lq_is_queued_item=dict(
            keys=('Q', 'item'), args=(), script="""
for _,k in ipairs(redis.call("ZRANGE", KEYS[1], 0, -1)) do
if string.sub(k, -string.len(KEYS[2])) == KEYS[2] then
    local taken = redis.call("GET", k)
    if taken then
    if "completed" == taken then return {false, false, true} end
    return {true, false, false}
    else
    return {false, true, false} end
end
end
return {false, false, false}
"""),
    )

    def __init__(self, path):
        super(LockingQueue, self).__init__(path)
        self._q_lookup = ".%s" % path

        self._item = None
        self._h_k = None

    def __del__(self):
        for k in list(self.LOCKS):
            if self.LOCKS.get(k) == self._client_id:
                self.LOCKS.pop(k)

    def put(self, value, priority=100):
        """Add item onto queue.
        Rank items by priority.  Get low priority items before high priority
        """
        # format into hashed key
        h_k = "%d:%f:%s" % (priority, time.time(), value)

        rv = raw_client().evalsha(
            self._SHAS['lq_put'],
            len(self.SCRIPTS['lq_put']['keys']),
            self._path, h_k)
        assert rv == 1

    def consume(self):
        """Consume value gotten from queue.
        Raise UserWarning if consume() called before get()
        """
        if self._item is None:
            raise UserWarning("Must call get() before consume()")

        self.LOCKS.pop(self._h_k)

        rv = raw_client().evalsha(
            self._SHAS['lq_consume'],
            len(self.SCRIPTS['lq_consume']['keys']),
            self._h_k, self._path, self._q_lookup, self._client_id)
        assert rv == 1

        self._h_k = None
        self._item = None

    def get(self, timeout=None):
        """Get an item from the queue or return None.  Do not block forever."""
        if self._item is not None:
            return self._item

        expire_at = int(time.time() + self._lock_timeout)

        with timeout_cm(timeout):  # won't block forever
            try:
                self._h_k = raw_client().evalsha(
                    self._SHAS['lq_get'],
                    len(self.SCRIPTS['lq_get']['keys']),
                    self._path, self._client_id, expire_at)
            except redis.exceptions.ResponseError as err:
                if str(err) not in ['queue empty', 'already locked']:
                    raise err

        if self._h_k:
            priority, insert_time, item = self._h_k.decode().split(':', 2)
            self._item = item
            self.LOCKS[self._h_k] = self._client_id
            return self._item

    def size(self, queued=True, taken=True):
        """
        Find the number of jobs in the queue

        `queued` - Include the entries in the queue that are not currently
            being processed or otherwise locked
        `taken` - Include the entries in the queue that are currently being
            processed or are otherwise locked

        Raise AttributeError if all kwargs are False
        """
        if not queued and not taken:
            raise AttributeError("either `taken` or `queued` must be True")

        if taken and queued:

            n_queued_and_taken, _ = raw_client().evalsha(
                self._SHAS['lq_qsize_fast'],
                len(self.SCRIPTS['lq_qsize_fast']['keys']),
                self._path, self._q_lookup)
            return n_queued_and_taken
        else:
            nqueued, ntaken, _ = raw_client().evalsha(
                self._SHAS['lq_qsize_slow'],
                len(self.SCRIPTS['lq_qsize_slow']['keys']),
                self._path, self._q_lookup)
            if queued:
                return nqueued
            elif taken:
                return ntaken
            else:
                raise Exception('code error - should never get here')

    def is_queued(self, value):
        """
        Return True if item is in queue or currently being processed.
        False otherwise

        Redis will not like this operation.  Use sparingly with large queues.
        """
        if value == self._item:
            taken, queued, completed = raw_client().evalsha(
                self._SHAS['lq_is_queued_h_k'],
                len(self.SCRIPTS['lq_is_queued_h_k']['keys']),
                self._path, self._h_k)
        else:
            taken, queued, completed = raw_client().evalsha(
                self._SHAS['lq_is_queued_item'],
                len(self.SCRIPTS['lq_is_queued_item']['keys']),
                self._path, value)
        return taken or queued
Beispiel #35
0
class RedisDriver(coordination.CoordinationDriverCachedRunWatchers,
                  coordination.CoordinationDriverWithExecutor):
    """Redis provides a few nice benefits that act as a poormans zookeeper.

    It **is** fully functional and implements all of the coordination
    driver API(s). It stores data into `redis`_ using the provided `redis`_
    API(s) using `msgpack`_ encoded values as needed.

    - Durability (when setup with `AOF`_ mode).
    - Consistent, note that this is still restricted to only
      one redis server, without the recently released redis (alpha)
      clustering > 1 server will not be consistent when partitions
      or failures occur (even redis clustering docs state it is
      not a fully AP or CP solution, which means even with it there
      will still be *potential* inconsistencies).
    - Master/slave failover (when setup with redis `sentinel`_), giving
      some notion of HA (values *can* be lost when a failover transition
      occurs).

    The Redis driver connection URI should look like::

      redis://[:PASSWORD@]HOST:PORT[?OPTION=VALUE[&OPTION2=VALUE2[&...]]]

    For a list of options recognized by this driver, see the documentation
    for the member CLIENT_ARGS, and to determine the expected types of those
    options see CLIENT_BOOL_ARGS, CLIENT_INT_ARGS, and CLIENT_LIST_ARGS.

    To use a `sentinel`_ the connection URI must point to the sentinel server.
    At connection time the sentinel will be asked for the current IP and port
    of the master and then connect there. The connection URI for sentinel
    should be written as follows::

      redis://<sentinel host>:<sentinel port>?sentinel=<master name>

    Additional sentinel hosts are listed with multiple ``sentinel_fallback``
    parameters as follows::

        redis://<sentinel host>:<sentinel port>?sentinel=<master name>&
          sentinel_fallback=<other sentinel host>:<sentinel port>&
          sentinel_fallback=<other sentinel host>:<sentinel port>&
          sentinel_fallback=<other sentinel host>:<sentinel port>

    Further resources/links:

    - http://redis.io/
    - http://redis.io/topics/sentinel
    - http://redis.io/topics/cluster-spec

    Note that this client will itself retry on transaction failure (when they
    keys being watched have changed underneath the current transaction).
    Currently the number of attempts that are tried is infinite (this might
    be addressed in https://github.com/andymccurdy/redis-py/issues/566 when
    that gets worked on). See http://redis.io/topics/transactions for more
    information on this topic.

    General recommendations/usage considerations:

    - When used for locks, run in AOF mode and think carefully about how
      your redis deployment handles losing a server (the clustering support
      is supposed to aid in losing servers, but it is also of unknown
      reliablity and is relatively new, so use at your own risk).

    .. _redis: http://redis.io/
    .. _msgpack: http://msgpack.org/
    .. _sentinel: http://redis.io/topics/sentinel
    .. _AOF: http://redis.io/topics/persistence
    """

    CHARACTERISTICS = (
        coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
        coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
        coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
        coordination.Characteristics.CAUSAL,
    )
    """
    Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
    enum member(s) that can be used to interogate how this driver works.
    """

    MIN_VERSION = version.LooseVersion("2.6.0")
    """
    The min redis version that this driver requires to operate with...
    """

    GROUP_EXISTS = b'__created__'
    """
    Redis deletes dictionaries that have no keys in them, which means the
    key will disappear which means we can't tell the difference between
    a group not existing and a group being empty without this key being
    saved...
    """

    #: Value used (with group exists key) to keep a group from disappearing.
    GROUP_EXISTS_VALUE = b'1'

    #: Default namespace for keys when none is provided.
    DEFAULT_NAMESPACE = b'_tooz'

    NAMESPACE_SEP = b':'
    """
    Separator that is used to combine a key with the namespace (to get
    the **actual** key that will be used).
    """

    DEFAULT_ENCODING = 'utf8'
    """
    This is for python3.x; which will behave differently when returned
    binary types or unicode types (redis uses binary internally it appears),
    so to just stick with a common way of doing this, make all the things
    binary (with this default encoding if one is not given and a unicode
    string is provided).
    """

    CLIENT_ARGS = frozenset([
        'db',
        'encoding',
        'retry_on_timeout',
        'socket_keepalive',
        'socket_timeout',
        'ssl',
        'ssl_certfile',
        'ssl_keyfile',
        'sentinel',
        'sentinel_fallback',
    ])
    """
    Keys that we allow to proxy from the coordinator configuration into the
    redis client (used to configure the redis client internals so that
    it works as you expect/want it to).

    See: http://redis-py.readthedocs.org/en/latest/#redis.Redis

    See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py
    """

    #: Client arguments that are expected/allowed to be lists.
    CLIENT_LIST_ARGS = frozenset([
        'sentinel_fallback',
    ])

    #: Client arguments that are expected to be boolean convertible.
    CLIENT_BOOL_ARGS = frozenset([
        'retry_on_timeout',
        'ssl',
    ])

    #: Client arguments that are expected to be int convertible.
    CLIENT_INT_ARGS = frozenset([
        'db',
        'socket_keepalive',
        'socket_timeout',
    ])

    #: Default socket timeout to use when none is provided.
    CLIENT_DEFAULT_SOCKET_TO = 30

    #: String used to keep a key/member alive (until it next expires).
    STILL_ALIVE = b"Not dead!"

    SCRIPTS = {
        'create_group':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 1 then
    return 0
end
redis.call("sadd", all_groups_key, no_namespaced_group_key)
redis.call("hset", namespaced_group_key,
           "${group_existence_key}", "${group_existence_value}")
return 1
""",
        'delete_group':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 0 then
    return -1
end
if redis.call("sismember", all_groups_key, no_namespaced_group_key) == 0 then
    return -2
end
if redis.call("hlen", namespaced_group_key) > 1 then
    return -3
end
-- First remove from the set (then delete the group); if the set removal
-- fails, at least the group will still exist (and can be fixed manually)...
if redis.call("srem", all_groups_key, no_namespaced_group_key) == 0 then
    return -4
end
redis.call("del", namespaced_group_key)
return 1
""",
        'update_capabilities':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local group_key = KEYS[1]
local member_id = ARGV[1]
local caps = ARGV[2]
if redis.call("exists", group_key) == 0 then
    return -1
end
if redis.call("hexists", group_key, member_id) == 0 then
    return -2
end
redis.call("hset", group_key, member_id, caps)
return 1
""",
    }
    """`Lua`_ **template** scripts that will be used by various methods (they
    are turned into real scripts and loaded on call into the :func:`.start`
    method).

    .. _Lua: http://www.lua.org
    """

    EXCLUDE_OPTIONS = CLIENT_LIST_ARGS

    def __init__(self, member_id, parsed_url, options):
        super(RedisDriver, self).__init__(member_id, parsed_url, options)
        self._parsed_url = parsed_url
        self._encoding = self._options.get('encoding', self.DEFAULT_ENCODING)
        timeout = self._options.get('timeout', self.CLIENT_DEFAULT_SOCKET_TO)
        self.timeout = int(timeout)
        self.membership_timeout = float(
            self._options.get('membership_timeout', timeout))
        lock_timeout = self._options.get('lock_timeout', self.timeout)
        self.lock_timeout = int(lock_timeout)
        namespace = self._options.get('namespace', self.DEFAULT_NAMESPACE)
        self._namespace = utils.to_binary(namespace, encoding=self._encoding)
        self._group_prefix = self._namespace + b"_group"
        self._beat_prefix = self._namespace + b"_beats"
        self._groups = self._namespace + b"_groups"
        self._client = None
        self._acquired_locks = set()
        self._started = False
        self._server_info = {}
        self._scripts = {}

    def _check_fetch_redis_version(self, geq_version, not_existent=True):
        if isinstance(geq_version, six.string_types):
            desired_version = version.LooseVersion(geq_version)
        elif isinstance(geq_version, version.LooseVersion):
            desired_version = geq_version
        else:
            raise TypeError("Version check expects a string/version type")
        try:
            redis_version = version.LooseVersion(
                self._server_info['redis_version'])
        except KeyError:
            return (not_existent, None)
        else:
            if redis_version < desired_version:
                return (False, redis_version)
            else:
                return (True, redis_version)

    @property
    def namespace(self):
        return self._namespace

    @property
    def running(self):
        return self._started

    def get_lock(self, name):
        return RedisLock(self, self._client, name, self.lock_timeout)

    _dumps = staticmethod(utils.dumps)
    _loads = staticmethod(utils.loads)

    @classmethod
    def _make_client(cls, parsed_url, options, default_socket_timeout):
        kwargs = {}
        if parsed_url.hostname:
            kwargs['host'] = parsed_url.hostname
            if parsed_url.port:
                kwargs['port'] = parsed_url.port
        else:
            if not parsed_url.path:
                raise ValueError("Expected socket path in parsed urls path")
            kwargs['unix_socket_path'] = parsed_url.path
        if parsed_url.password:
            kwargs['password'] = parsed_url.password
        for a in cls.CLIENT_ARGS:
            if a not in options:
                continue
            if a in cls.CLIENT_BOOL_ARGS:
                v = strutils.bool_from_string(options[a])
            elif a in cls.CLIENT_LIST_ARGS:
                v = options[a]
            elif a in cls.CLIENT_INT_ARGS:
                v = int(options[a])
            else:
                v = options[a]
            kwargs[a] = v
        if 'socket_timeout' not in kwargs:
            kwargs['socket_timeout'] = default_socket_timeout

        # Ask the sentinel for the current master if there is a
        # sentinel arg.
        if 'sentinel' in kwargs:
            sentinel_hosts = [
                tuple(fallback.split(':'))
                for fallback in kwargs.get('sentinel_fallback', [])
            ]
            sentinel_hosts.insert(0, (kwargs['host'], kwargs['port']))
            sentinel_server = sentinel.Sentinel(
                sentinel_hosts, socket_timeout=kwargs['socket_timeout'])
            sentinel_name = kwargs['sentinel']
            del kwargs['sentinel']
            if 'sentinel_fallback' in kwargs:
                del kwargs['sentinel_fallback']
            master_client = sentinel_server.master_for(sentinel_name, **kwargs)
            # The master_client is a redis.StrictRedis using a
            # Sentinel managed connection pool.
            return master_client
        return redis.StrictRedis(**kwargs)

    def _start(self):
        super(RedisDriver, self)._start()
        try:
            self._client = self._make_client(self._parsed_url, self._options,
                                             self.timeout)
        except exceptions.RedisError as e:
            utils.raise_with_cause(coordination.ToozConnectionError,
                                   encodeutils.exception_to_unicode(e),
                                   cause=e)
        else:
            # Ensure that the server is alive and not dead, this does not
            # ensure the server will always be alive, but does insure that it
            # at least is alive once...
            with _translate_failures():
                self._server_info = self._client.info()
            # Validate we have a good enough redis version we are connected
            # to so that the basic set of features we support will actually
            # work (instead of blowing up).
            new_enough, redis_version = self._check_fetch_redis_version(
                self.MIN_VERSION)
            if not new_enough:
                raise tooz.NotImplemented("Redis version greater than or"
                                          " equal to '%s' is required"
                                          " to use this driver; '%s' is"
                                          " being used which is not new"
                                          " enough" %
                                          (self.MIN_VERSION, redis_version))
            tpl_params = {
                'group_existence_value': self.GROUP_EXISTS_VALUE,
                'group_existence_key': self.GROUP_EXISTS,
            }
            # For py3.x ensure these are unicode since the string template
            # replacement will expect unicode (and we don't want b'' as a
            # prefix which will happen in py3.x if this is not done).
            for (k, v) in six.iteritems(tpl_params.copy()):
                if isinstance(v, six.binary_type):
                    v = v.decode('ascii')
                tpl_params[k] = v
            prepared_scripts = {}
            for name, raw_script_tpl in six.iteritems(self.SCRIPTS):
                script_tpl = string.Template(raw_script_tpl)
                script = script_tpl.substitute(**tpl_params)
                prepared_scripts[name] = self._client.register_script(script)
            self._scripts = prepared_scripts
            self.heartbeat()
            self._started = True

    def _encode_beat_id(self, member_id):
        member_id = utils.to_binary(member_id, encoding=self._encoding)
        return self.NAMESPACE_SEP.join([self._beat_prefix, member_id])

    def _encode_member_id(self, member_id):
        member_id = utils.to_binary(member_id, encoding=self._encoding)
        if member_id == self.GROUP_EXISTS:
            raise ValueError("Not allowed to use private keys as a member id")
        return member_id

    def _decode_member_id(self, member_id):
        return utils.to_binary(member_id, encoding=self._encoding)

    def _encode_group_leader(self, group_id):
        group_id = utils.to_binary(group_id, encoding=self._encoding)
        return b"leader_of_" + group_id

    def _encode_group_id(self, group_id, apply_namespace=True):
        group_id = utils.to_binary(group_id, encoding=self._encoding)
        if not apply_namespace:
            return group_id
        return self.NAMESPACE_SEP.join([self._group_prefix, group_id])

    def _decode_group_id(self, group_id):
        return utils.to_binary(group_id, encoding=self._encoding)

    def heartbeat(self):
        with _translate_failures():
            beat_id = self._encode_beat_id(self._member_id)
            expiry_ms = max(0, int(self.membership_timeout * 1000.0))
            self._client.psetex(beat_id,
                                time_ms=expiry_ms,
                                value=self.STILL_ALIVE)
        for lock in self._acquired_locks.copy():
            try:
                lock.heartbeat()
            except tooz.ToozError:
                LOG.warning("Unable to heartbeat lock '%s'",
                            lock,
                            exc_info=True)
        return min(self.lock_timeout, self.membership_timeout)

    def _stop(self):
        while self._acquired_locks:
            lock = self._acquired_locks.pop()
            try:
                lock.release()
            except tooz.ToozError:
                LOG.warning("Unable to release lock '%s'", lock, exc_info=True)
        super(RedisDriver, self)._stop()
        if self._client is not None:
            # Make sure we no longer exist...
            beat_id = self._encode_beat_id(self._member_id)
            try:
                # NOTE(harlowja): this will delete nothing if the key doesn't
                # exist in the first place, which is fine/expected/desired...
                with _translate_failures():
                    self._client.delete(beat_id)
            except tooz.ToozError:
                LOG.warning("Unable to delete heartbeat key '%s'",
                            beat_id,
                            exc_info=True)
            self._client = None
        self._server_info = {}
        self._scripts.clear()
        self._started = False

    def _submit(self, cb, *args, **kwargs):
        if not self._started:
            raise tooz.ToozError("Redis driver has not been started")
        return self._executor.submit(cb, *args, **kwargs)

    def _get_script(self, script_key):
        try:
            return self._scripts[script_key]
        except KeyError:
            raise tooz.ToozError("Redis driver has not been started")

    def create_group(self, group_id):
        script = self._get_script('create_group')

        def _create_group(script):
            encoded_group = self._encode_group_id(group_id)
            keys = [
                encoded_group,
                self._groups,
            ]
            args = [
                self._encode_group_id(group_id, apply_namespace=False),
            ]
            result = script(keys=keys, args=args)
            result = strutils.bool_from_string(result)
            if not result:
                raise coordination.GroupAlreadyExist(group_id)

        return RedisFutureResult(self._submit(_create_group, script))

    def update_capabilities(self, group_id, capabilities):
        script = self._get_script('update_capabilities')

        def _update_capabilities(script):
            keys = [
                self._encode_group_id(group_id),
            ]
            args = [
                self._encode_member_id(self._member_id),
                self._dumps(capabilities),
            ]
            result = int(script(keys=keys, args=args))
            if result == -1:
                raise coordination.GroupNotCreated(group_id)
            if result == -2:
                raise coordination.MemberNotJoined(group_id, self._member_id)

        return RedisFutureResult(self._submit(_update_capabilities, script))

    def leave_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(self._member_id)

        def _leave_group(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            p.multi()
            p.hdel(encoded_group, encoded_member_id)
            c = p.execute()[0]
            if c == 0:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            else:
                self._joined_groups.discard(group_id)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _leave_group,
                         encoded_group,
                         value_from_callable=True))

    def get_members(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        def _get_members(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            potential_members = set()
            for m in p.hkeys(encoded_group):
                m = self._decode_member_id(m)
                if m != self.GROUP_EXISTS:
                    potential_members.add(m)
            if not potential_members:
                return set()
            # Ok now we need to see which members have passed away...
            gone_members = set()
            member_values = p.mget(
                compat_map(self._encode_beat_id, potential_members))
            for (potential_member,
                 value) in compat_zip(potential_members, member_values):
                # Always preserve self (just incase we haven't heartbeated
                # while this call/s was being made...), this does *not* prevent
                # another client from removing this though...
                if potential_member == self._member_id:
                    continue
                if not value:
                    gone_members.add(potential_member)
            # Trash all the members that no longer are with us... RIP...
            if gone_members:
                p.multi()
                encoded_gone_members = list(
                    self._encode_member_id(m) for m in gone_members)
                p.hdel(encoded_group, *encoded_gone_members)
                p.execute()
                return set(m for m in potential_members
                           if m not in gone_members)
            return potential_members

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _get_members,
                         encoded_group,
                         value_from_callable=True))

    def get_member_capabilities(self, group_id, member_id):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(member_id)

        def _get_member_capabilities(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            capabilities = p.hget(encoded_group, encoded_member_id)
            if capabilities is None:
                raise coordination.MemberNotJoined(group_id, member_id)
            return self._loads(capabilities)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _get_member_capabilities,
                         encoded_group,
                         value_from_callable=True))

    def join_group(self, group_id, capabilities=b""):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(self._member_id)

        def _join_group(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            p.multi()
            p.hset(encoded_group, encoded_member_id, self._dumps(capabilities))
            c = p.execute()[0]
            if c == 0:
                # Field already exists...
                raise coordination.MemberAlreadyExist(group_id,
                                                      self._member_id)
            else:
                self._joined_groups.add(group_id)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _join_group,
                         encoded_group,
                         value_from_callable=True))

    def delete_group(self, group_id):
        script = self._get_script('delete_group')

        def _delete_group(script):
            keys = [
                self._encode_group_id(group_id),
                self._groups,
            ]
            args = [
                self._encode_group_id(group_id, apply_namespace=False),
            ]
            result = int(script(keys=keys, args=args))
            if result in (-1, -2):
                raise coordination.GroupNotCreated(group_id)
            if result == -3:
                raise coordination.GroupNotEmpty(group_id)
            if result == -4:
                raise tooz.ToozError("Unable to remove '%s' key"
                                     " from set located at '%s'" %
                                     (args[0], keys[-1]))
            if result != 1:
                raise tooz.ToozError("Internal error, unable"
                                     " to complete group '%s' removal" %
                                     (group_id))

        return RedisFutureResult(self._submit(_delete_group, script))

    def _destroy_group(self, group_id):
        """Should only be used in tests..."""
        self._client.delete(self._encode_group_id(group_id))

    def get_groups(self):
        def _get_groups():
            results = []
            for g in self._client.smembers(self._groups):
                results.append(self._decode_group_id(g))
            return results

        return RedisFutureResult(self._submit(_get_groups))

    def _get_leader_lock(self, group_id):
        name = self._encode_group_leader(group_id)
        return self.get_lock(name)

    def run_elect_coordinator(self):
        for group_id, hooks in six.iteritems(self._hooks_elected_leader):
            leader_lock = self._get_leader_lock(group_id)
            if leader_lock.acquire(blocking=False):
                # We got the lock
                hooks.run(coordination.LeaderElected(group_id,
                                                     self._member_id))

    def run_watchers(self, timeout=None):
        result = super(RedisDriver, self).run_watchers(timeout=timeout)
        self.run_elect_coordinator()
        return result
Beispiel #36
0
        self.redis = redis
        self.mark_done = self.redis.register_script(mark_done_script)

    def process(self, item):
        self.mark_done(keys=[item["ident"]], args=[EXPIRE_TIME, LOG_CHANNEL, int(time.time())])


# ------------------------------------------------------------------------------

redis_url = urlparse(REDIS_URL)
redis_db = int(redis_url.path[1:])
r = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db)

# ------------------------------------------------------------------------------

MARK_DONE = """
local ident = KEYS[1]
local expire_time = ARGV[1]
local log_channel = ARGV[2]
local finished_at = ARGV[3]

redis.call('hmset', ident, 'finished_at', finished_at)
redis.call('lrem', 'working', 1, ident)

local was_aborted = redis.call('hget', ident, 'aborted')

-- If the job was aborted, we ignore the given expire time.  Instead, we set a
-- much shorter expire time -- one that's long enough for (most) subscribers
-- to read a message, but short enough to not cause undue suffering in the
-- case of retrying an aborted job.
if was_aborted then