Exemplo n.º 1
0
def test_ask_redirection():
    """
    Test that the server handles ASK response.

    At first call it should return a ASK ResponseError that will point
    the client to the next server it should talk to.

    Important thing to verify is that it tries to talk to the second node.
    """
    r = RedisCluster(host="127.0.0.1", port=7000)

    m = Mock(autospec=True)

    def ask_redirect_effect(connection, command_name, **options):
        def ok_response(connection, command_name, **options):
            assert connection.host == "127.0.0.1"
            assert connection.port == 7001

            return "MOCK_OK"
        m.side_effect = ok_response
        resp = ResponseError()
        resp.message = "ASK 1337 127.0.0.1:7001"
        raise resp

    m.side_effect = ask_redirect_effect

    r.parse_response = m
    assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
Exemplo n.º 2
0
def test_moved_redirection_pipeline():
    """
    Test that the server handles MOVED response when used in pipeline.

    At first call it should return a MOVED ResponseError that will point
    the client to the next server it should talk to.

    Important thing to verify is that it tries to talk to the second node.
    """
    r = RedisCluster(host="127.0.0.1", port=7000)
    p = r.pipeline()

    m = Mock(autospec=True)

    def moved_redirect_effect(connection, command_name, **options):
        def ok_response(connection, command_name, **options):
            assert connection.host == "127.0.0.1"
            assert connection.port == 7002

            return "MOCK_OK"
        m.side_effect = ok_response
        resp = ResponseError()
        resp.message = "MOVED 12182 127.0.0.1:7002"
        raise resp

    m.side_effect = moved_redirect_effect

    p.parse_response = m
    p.set("foo", "bar")
    assert p.execute() == ["MOCK_OK"]
    def monkey_link(host=None, port=None, decode_responses=False):
        """
        Helper function to return custom slots cache data from different redis nodes
        """
        if port == 7000:
            result = [[0, 5460, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]],
                      [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]]]

        elif port == 7001:
            result = [[0, 5460, [b'127.0.0.1', 7001], [b'127.0.0.1', 7003]],
                      [5461, 10922, [b'127.0.0.1', 7000], [b'127.0.0.1', 7004]]]

        else:
            result = []

        r = RedisCluster(host=host, port=port, decode_responses=True)
        orig_execute_command = r.execute_command

        def execute_command(*args, **kwargs):
            if args == ("cluster", "slots"):
                return result
            return orig_execute_command(*args, **kwargs)

        r.execute_command = execute_command
        return r
 def init(self):
     """
     Called by Scheduler to say 'let's prepare yourself guy'
     """
     logger.info('[RedisClusterRetention] Initialization of the redis '
                 'module')
     if self.password:
         self.rc = RedisCluster(startup_nodes=self.servers,
                                password=self.password)
     else:
         self.rc = RedisCluster(startup_nodes=self.servers)
Exemplo n.º 5
0
def test_clusterdown_exception_handling():
    """
    Test that if exception message starts with CLUSTERDOWN it should
    disconnect the connection pool and set refresh_table_asap to True.
    """
    with patch.object(ClusterConnectionPool, 'disconnect') as mock_disconnect:
        with patch.object(ClusterConnectionPool, 'reset') as mock_reset:
            r = RedisCluster(host="127.0.0.1", port=7000)
            i = len(mock_reset.mock_calls)

            assert r.handle_cluster_command_exception(Exception("CLUSTERDOWN")) == {"method": "clusterdown"}
            assert r.refresh_table_asap is True

            mock_disconnect.assert_called_once_with()

            # reset() should only be called once inside `handle_cluster_command_exception`
            assert len(mock_reset.mock_calls) - i == 1
Exemplo n.º 6
0
    def test_api(self):
        comm.start_cluster('127.0.0.1', 7100)
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102)
        time.sleep(1)

        rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}])

        for i in xrange(20):
            rc.set('key_%s' % i, 'value_%s' % i)
        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(3, len(nodes))
        self.assertEqual(range(8192),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8192, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        comm.quit_cluster('127.0.0.1', 7101)

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(range(16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        for i in xrange(20):
            rc.delete('key_%s' % i)

        comm.quit_cluster('127.0.0.1', 7102)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 7
0
    def __init__(self, queue, parent_pid, skip_mini, canary=False):
        super(Worker, self).__init__()

        self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True) 
        self.q = queue
        self.parent_pid = parent_pid
        self.daemon = True
        self.canary = canary
        self.skip_mini = skip_mini
Exemplo n.º 8
0
def seed():
    print 'Loading data over UDP via Horizon...'
    metric = 'horizon.test.udp'
    metric_set = 'unique_metrics'
    initial = int(time.time()) - settings.MAX_RESOLUTION

    with open(join(__location__, 'data.json'), 'r') as f:
        data = json.loads(f.read())
        series = data['results']
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        for datapoint in series:
            datapoint[0] = initial
            initial += 1
            packet = msgpack.packb((metric, datapoint))
            sock.sendto(packet, (socket.gethostname(), settings.UDP_PORT))

    print "Connecting to Redis..."
    r = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
    time.sleep(5)

    try:
        x = r.smembers(settings.FULL_NAMESPACE + metric_set)
        if x is None:
            raise NoDataException

        x = r.get(settings.FULL_NAMESPACE + metric)
        if x is None:
            raise NoDataException

        #Ignore the mini namespace if OCULUS_HOST isn't set.
        if settings.OCULUS_HOST != "":
            x = r.smembers(settings.MINI_NAMESPACE + metric_set)
            if x is None:
                raise NoDataException

            x = r.get(settings.MINI_NAMESPACE + metric)
            if x is None:
                raise NoDataException

        print "Congratulations! The data made it in. The Horizon pipeline seems to be working."

    except NoDataException:
        print "Woops, looks like the metrics didn't make it into Horizon. Try again?"
Exemplo n.º 9
0
    def test_fix(self):
        def migrate_one_slot(nodes, _):
            if nodes[0].port == 7100:
                source, target = nodes
            else:
                target, source = nodes
            return [(source, target, 1)]

        comm.start_cluster('127.0.0.1', 7100)
        rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}])
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101,
                          balance_plan=migrate_one_slot)

        rc.set('h-893', 'I am in slot 0')
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        t7100 = Talker('127.0.0.1', 7100)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))

        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        t7100.talk('cluster', 'setslot', 0, 'importing', n7101.node_id)

        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7101 = Talker('127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7100.talk('cluster', 'setslot', 0, 'migrating', n7101.node_id)
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        comm.quit_cluster('127.0.0.1', 7101)
        rc.delete('h-893')
        comm.shutdown_cluster('127.0.0.1', 7100)

        t7100.close()
        t7101.close()
Exemplo n.º 10
0
def test_refresh_table_asap():
    """
    If this variable is set externally, initialize() should be called.
    """
    with patch.object(NodeManager, 'initialize') as mock_initialize:
        mock_initialize.return_value = None

        r = RedisCluster(host="127.0.0.1", port=7000)
        r.connection_pool.nodes.slots[12182] = {
            "host": "127.0.0.1",
            "port": 7002,
            "name": "127.0.0.1:7002",
            "server_type": "master",
        }
        r.refresh_table_asap = True

        i = len(mock_initialize.mock_calls)
        r.execute_command("SET", "foo", "bar")
        assert len(mock_initialize.mock_calls) - i == 1
        assert r.refresh_table_asap is False
Exemplo n.º 11
0
    def test_close_existing_connection(self):
        """
        We cannot use 'r' or 's' object because they have called flushdb() and connected to
        any number of redis servers. Creating it manually will not do that.

        close_existing_connection() is called inside get_connection_by_slot() and will limit
        the number of connections stored to 1
        """
        params = {'startup_nodes': [{"host": "127.0.0.1", "port": "7000"}],
                  'max_connections': 1,
                  'socket_timeout': 0.1,
                  'decode_responses': False}

        client = RedisCluster(**params)
        assert len(client.connections) == 0
        c1 = client.get_connection_by_slot(0)
        assert len(client.connections) == 1
        c2 = client.get_connection_by_slot(16000)
        assert len(client.connections) == 1
        assert c1 != c2  # This shold not return different connections
Exemplo n.º 12
0
    def test_quit_problems(self):
        comm.start_cluster('127.0.0.1', 7100)
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102)
        time.sleep(1)

        rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}])

        for i in xrange(20):
            rc.set('key_%s' % i, 'value_%s' % i)
        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(3, len(nodes))
        self.assertEqual(range(8192),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8192, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)
        for i in xrange(20):
            rc.delete('key_%s' % i)

        self.assertRaisesRegexp(ValueError, '^The master still has slaves$',
                                comm.quit_cluster, '127.0.0.1', 7100)
        comm.quit_cluster('127.0.0.1', 7102)
        comm.quit_cluster('127.0.0.1', 7101)
        self.assertRaisesRegexp(ValueError, '^This is the last node',
                                comm.quit_cluster, '127.0.0.1', 7100)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 13
0
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer
        """
        super(Analyzer, self).__init__()
        #self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
        self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
	self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()
Exemplo n.º 14
0
#-*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import StringField, BooleanField, PasswordField, TextAreaField, SelectField, SelectMultipleField, SubmitField, FileField, IntegerField
from wtforms.validators import DataRequired, Length
import time
import db_op, Mysql
from sqlalchemy import distinct
from rediscluster import RedisCluster
import __init__
app = __init__.app
nodes = app.config.get('NODES_PRODUCE')
rc = RedisCluster(startup_nodes=nodes, decode_responses=True)


class MyForm_Submit(Form):
    submit1 = SubmitField('提交', id='btn1')
    submit2 = SubmitField('提交', id='btn2')
    submit3 = SubmitField('提交', id='btn3')


class myform(Form):
    text = TextAreaField(validators=[DataRequired()])
    input = StringField('Input', validators=[DataRequired()])
    submit_redis = SubmitField('清除日志')
    submit = SubmitField('提交', id='btn1')


class Myform_op_user(Form):
    text = TextAreaField(validators=[DataRequired()])
    select = SelectField(
        choices=[('query', '查询'), ('add', '开通'), ('del',
Exemplo n.º 15
0
# -*- coding:utf-8 -*-
# @Time : 2020/3/18 上午11:17
# @Author: [email protected]
# @File : video_stream_validate_statistics.py

from rediscluster import RedisCluster

redis_nodes = [{"host": "192.168.23.90", "port": "7001", "database": 2},
               {"host": "192.168.23.82", "port": "7001", "database": 2}]
conn = RedisCluster(startup_nodes=redis_nodes, decode_responses=True)

key_prefix = 'camera_'
rc_end = conn.scan_iter(key_prefix + "*")
count_dic = {}
count_dic['total'] = 0
for k in rc_end:
    count_dic['total'] += 1
    v = conn.hget(k, "onlineStatus")
    if v not in count_dic:
        count_dic[v] = 0
    count_dic[v] += 1
print(count_dic)
Exemplo n.º 16
0
def redis_cluster_conn(startup_nodes=REDIS_CLUSTER_NODES):
    nodes = [{"host": x.split(":")[0], "port": int(x.split(":")[1])} for x in
             startup_nodes.strip().split(',')]
    print(nodes)
    rc = RedisCluster(startup_nodes=nodes, decode_responses=True)
    return rc
Exemplo n.º 17
0
    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
               # self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
		continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Grab data from the queue and populate dictionaries
            exceptions = dict()
            anomaly_breakdown = dict()
            while 1:
                try:
                    key, value = self.anomaly_breakdown_q.get_nowait()
                    if key not in anomaly_breakdown.keys():
                        anomaly_breakdown[key] = value
                    else:
                        anomaly_breakdown[key] += value
                except Empty:
                    break

            while 1:
                try:
                    key, value = self.exceptions_q.get_nowait()
                    if key not in exceptions.keys():
                        exceptions[key] = value
                    else:
                        exceptions[key] += value
                except Empty:
                    break

            # Send alerts
            if settings.ENABLE_ALERTS:
                for alert in settings.ALERTS:
                    for metric in self.anomalous_metrics:
                        if alert[0] in metric[1]:
                            cache_key = 'last_alert.%s.%s' % (alert[1], metric[1])
                            try:
                                last_alert = self.redis_conn.get(cache_key)
                                if not last_alert:
                                    self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
                                    trigger_alert(alert, metric)

                            except Exception as e:
                                logger.error("couldn't send alert: %s" % e)

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(exceptions.values())))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % exceptions)
            logger.info('anomaly breakdown :: %s' % anomaly_breakdown)

            # Log to Graphite
            self.send_graphite_metric('skyline.analyzer.run_time', '%.2f' % (time() - now))
            self.send_graphite_metric('skyline.analyzer.total_analyzed', '%.2f' % (len(unique_metrics) - sum(exceptions.values())))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.duration', '%.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.projected', '%.2f' % projected)

            # Reset counters
            self.anomalous_metrics[:] = []

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
Exemplo n.º 18
0
    def get_connect(self):
        # 获取数据库连接
        try:
            if not self._url:
                if not self._ip_ports:
                    raise Exception("未设置 redis 连接信息")

                ip_ports = (
                    self._ip_ports
                    if isinstance(self._ip_ports, list)
                    else self._ip_ports.split(",")
                )
                if len(ip_ports) > 1:
                    startup_nodes = []
                    for ip_port in ip_ports:
                        ip, port = ip_port.split(":")
                        startup_nodes.append({"host": ip, "port": port})

                    if self._service_name:
                        # log.debug("使用redis哨兵模式")
                        hosts = [(node["host"], node["port"]) for node in startup_nodes]
                        sentinel = Sentinel(hosts, socket_timeout=3, **self._kwargs)
                        self._redis = sentinel.master_for(
                            self._service_name,
                            password=self._user_pass,
                            db=self._db,
                            redis_class=redis.StrictRedis,
                            decode_responses=self._decode_responses,
                            max_connections=self._max_connections,
                            **self._kwargs,
                        )

                    else:
                        # log.debug("使用redis集群模式")
                        self._redis = RedisCluster(
                            startup_nodes=startup_nodes,
                            decode_responses=self._decode_responses,
                            password=self._user_pass,
                            max_connections=self._max_connections,
                            **self._kwargs,
                        )

                    self._is_redis_cluster = True
                else:
                    ip, port = ip_ports[0].split(":")
                    self._redis = redis.StrictRedis(
                        host=ip,
                        port=port,
                        db=self._db,
                        password=self._user_pass,
                        decode_responses=self._decode_responses,
                        max_connections=self._max_connections,
                        **self._kwargs,
                    )
                    self._is_redis_cluster = False
            else:
                self._redis = redis.StrictRedis.from_url(
                    self._url, decode_responses=self._decode_responses
                )
                self._is_redis_cluster = False

        except Exception as e:
            raise
        else:
            # if not self._url:
            #     log.debug("连接到redis数据库 %s db%s" % (self._ip_ports, self._db))
            # else:
            #     log.debug("连接到redis数据库 %s" % (self._url))
            pass

        return self.__redis.ping()  # 不要写成self._redis.ping() 否则循环调用了
Exemplo n.º 19
0
def compute(user_set):
    bulk_action = []
    count_c = 0
    
    startup_nodes = [{"host": REDIS_VENT_HOST, "port": REDIS_VENT_PORT}]
    weibo_redis = RedisCluster(startup_nodes = startup_nodes)
    for user in user_set:
        user_info = weibo_redis.hgetall(user)#dict
        origin_weibo_retweeted_timestamp = []
        origin_weibo_retweeted_count = []
        origin_weibo_list = []
        origin_weibo_comment_timestamp = []
        origin_weibo_comment_count = []
        retweeted_weibo_retweeted_count = []
        retweeted_weibo_comment_count= []
        retweeted_weibo_retweeted_timestamp = []
        retweeted_weibo_comment_timestamp = []
        retweeted_weibo_list = []
        user_fansnum = 0
        for key in user_info.iterkeys():
            if 'origin_weibo_retweeted_timestamp_' in key:
                origin_weibo_retweeted_timestamp.append(key.split('_')[-1])
            elif 'origin_weibo_comment_timestamp_' in key:
                origin_weibo_comment_timestamp.append(key.split('_')[-1])
            elif 'retweeted_weibo_retweeted_timestamp_' in key:
                retweeted_weibo_retweeted_timestamp.append(key.split('_')[-1])
            elif 'retweeted_weibo_comment_timestamp_' in key:
                retweeted_weibo_comment_timestamp.append(key.split('_')[-1])
            elif '_origin_weibo_timestamp' in key:
                origin_weibo_list.append(key.split('_')[0])
            elif '_retweeted_weibo_timestamp' in key:
                retweeted_weibo_list.append(key.split('_')[0])
            elif '_origin_weibo_retweeted' in key:
                origin_weibo_retweeted_count.append(key.split('_')[0])
            elif '_origin_weibo_comment' in key:
                origin_weibo_comment_count.append(key.split('_')[0])
            elif '_retweeted_weibo_retweeted' in key:
                retweeted_weibo_retweeted_count.append(key.split('_')[0])
            elif '_retweeted_weibo_comment' in key:
                retweeted_weibo_comment_count.append(key.split('_')[0])
            elif 'fansnum' in key:
                user_fansnum = user_info[key]
            else:
                print user_info, key

        user_origin_weibo_timestamp = [] 
        if len(origin_weibo_list):
            for i in range(len(origin_weibo_list)):
                timestamp = user_info[str(origin_weibo_list[i])+'_origin_weibo_timestamp']
                user_origin_weibo_timestamp.append(timestamp)
        

        user_retweeted_weibo_timestamp = [] 
        if len(retweeted_weibo_list):
            for i in range(len(retweeted_weibo_list)):
                timestamp = user_info[str(retweeted_weibo_list[i])+'_retweeted_weibo_timestamp']
                user_retweeted_weibo_timestamp.append(timestamp)

        user_id = str(user)
        origin_weibo_retweeted_detail, origin_weibo_retweeted_total_number, origin_weibo_retweeted_top_number, origin_weibo_retweeted_average_number, origin_weibo_top_retweeted_id=statis_origin_weibo_retweeted(origin_weibo_retweeted_count, user_info, origin_weibo_list)

        origin_weibo_comment_detail, origin_weibo_comment_total_number, origin_weibo_comment_top_number, origin_weibo_comment_average_number, origin_weibo_top_comment_id=statis_origin_weibo_comment(origin_weibo_comment_count, user_info, origin_weibo_list)

        retweeted_weibo_retweeted_detail, retweeted_weibo_retweeted_total_number, retweeted_weibo_retweeted_top_number, retweeted_weibo_retweeted_average_number, retweeted_weibo_top_retweeted_id=statis_retweeted_weibo_retweeted(retweeted_weibo_retweeted_count, user_info, retweeted_weibo_list)

        retweeted_weibo_comment_detail, retweeted_weibo_comment_total_number, retweeted_weibo_comment_top_number, retweeted_weibo_comment_average_number, retweeted_weibo_top_comment_id=statis_retweeted_weibo_comment(retweeted_weibo_comment_count, user_info, retweeted_weibo_list)


        origin_weibo_retweeted_brust= activity_origin_weibo_retweeted(origin_weibo_retweeted_timestamp, user_info)
        origin_weibo_comment_brust= activity_origin_weibo_comment(origin_weibo_comment_timestamp, user_info)
        retweeted_weibo_retweeted_brust= activity_retweeted_weibo_retweeted(retweeted_weibo_retweeted_timestamp, user_info)
        retweeted_weibo_comment_brust= activity_retweeted_weibo_comment(retweeted_weibo_comment_timestamp, user_info)



        influence_origin_weibo_retweeted = influence_weibo_cal(origin_weibo_retweeted_total_number, origin_weibo_retweeted_average_number, origin_weibo_retweeted_top_number,origin_weibo_retweeted_brust)

        influence_origin_weibo_comment = influence_weibo_cal(origin_weibo_comment_total_number, origin_weibo_comment_average_number, origin_weibo_comment_top_number, origin_weibo_comment_brust)

        influence_retweeted_weibo_retweeted = influence_weibo_cal(retweeted_weibo_retweeted_total_number, retweeted_weibo_retweeted_average_number, retweeted_weibo_retweeted_top_number, retweeted_weibo_retweeted_brust)

        influence_retweeted_weibo_comment = influence_weibo_cal(retweeted_weibo_comment_total_number, retweeted_weibo_comment_average_number, retweeted_weibo_comment_top_number, retweeted_weibo_retweeted_brust)

        user_index = user_index_cal(origin_weibo_list, retweeted_weibo_list, user_fansnum, influence_origin_weibo_retweeted, influence_origin_weibo_comment, influence_retweeted_weibo_retweeted, influence_retweeted_weibo_comment)


        user_item = {}
        user_item['user_index'] = user_index
        user_item['user'] = user
        user_item['user_fansnum'] = user_fansnum
        user_item['origin_weibo_number'] = len(origin_weibo_list)
        user_item['retweeted_weibo_number'] = len(retweeted_weibo_list)

        user_item['origin_weibo_retweeted_total_number'] = origin_weibo_retweeted_total_number
        user_item['origin_weibo_retweeted_average_number'] = origin_weibo_retweeted_average_number
        user_item['origin_weibo_retweeted_top_number'] = origin_weibo_retweeted_top_number
        user_item['origin_weibo_retweeted_brust_average'] = origin_weibo_retweeted_brust[1]
        user_item['origin_weibo_top_retweeted_id'] = origin_weibo_top_retweeted_id
        user_item['origin_weibo_retweeted_brust_n'] = origin_weibo_retweeted_brust[0]
        #user_item['origin_weibo_retweeted_detail'] = origin_weibo_retweeted_detail

        user_item['origin_weibo_comment_total_number'] = origin_weibo_comment_total_number
        user_item['origin_weibo_comment_average_number'] = origin_weibo_comment_average_number
        user_item['origin_weibo_comment_top_number'] = origin_weibo_comment_top_number
        user_item['origin_weibo_comment_brust_n'] = origin_weibo_comment_brust[0]
        user_item['origin_weibo_comment_brust_average'] = origin_weibo_comment_brust[1]
        user_item['origin_weibo_top_comment_id'] = origin_weibo_top_comment_id
        #user_item['origin_weibo_comment_detail'] = origin_weibo_comment_detail

        user_item['retweeted_weibo_retweeted_total_number'] = retweeted_weibo_retweeted_total_number
        user_item['retweeted_weibo_retweeted_average_number'] = retweeted_weibo_retweeted_average_number
        user_item['retweeted_weibo_retweeted_top_number'] = retweeted_weibo_retweeted_top_number
        user_item['retweeted_weibo_retweeted_brust_n'] = retweeted_weibo_retweeted_brust[0]
        user_item['retweeted_weibo_retweeted_brust_average'] = retweeted_weibo_retweeted_brust[1]
        user_item['retweeted_weibo_top_retweeted_id'] = retweeted_weibo_top_retweeted_id
        #user_item['retweeted_weibo_retweeted_detail'] = retweeted_weibo_retweeted_detail

        user_item['retweeted_weibo_comment_total_number'] = retweeted_weibo_comment_total_number
        user_item['retweeted_weibo_comment_average_number'] = retweeted_weibo_comment_average_number
        user_item['retweeted_weibo_comment_top_number'] = retweeted_weibo_comment_top_number
        user_item['retweeted_weibo_comment_brust_n'] = retweeted_weibo_comment_brust[0]
        user_item['retweeted_weibo_comment_brust_average'] = retweeted_weibo_comment_brust[1]
        user_item['retweeted_weibo_top_comment_id'] = retweeted_weibo_top_comment_id
        #user_item['retweeted_weibo_comment_detail'] = retweeted_weibo_comment_detail


        x = expand_index_action(user_item)
        bulk_action.extend([x[0], x[1]])
        count_c += 1
        if count_c % 1000 == 0:
            while True:
                try:
                    es.bulk(bulk_action, index=es_index, doc_type='bci', timeout=30)
                    bulk_action = []
                    break
                except Exception,r:
                    print "bulk error"
           print count_c
Exemplo n.º 20
0
#import redis
from rediscluster import RedisCluster

file_name = 'member.txt'

#r_server = redis.Redis("localhost", 7000)
#r_server = r_server.pipeline()

startup_nodes = [{"host": "127.0.0.1", "port": "7000"}]
r_server = RedisCluster(startup_nodes=startup_nodes)

with open(file_name, 'r') as file:
        data = file.readlines()

	#r_server.select(1)
	r_server.set("member_counter", 0)

        for i, line in enumerate(data):
                col = line.split(",")
                household_id = col[0]
                gender = col[3]
                age = col[8]
                education = col[5]
                occupation = col[6]
                married = col[7][0]

		#r_server.sadd("household", household_id)
		#print 'Householde #' + household_id + ' added'
		
		member_id = r_server.incr("member_counter")
		member_dict = {
Exemplo n.º 21
0
        "port": "8003"
    },
    {
        "host": "localhost",
        "port": "8004"
    },
    {
        "host": "localhost",
        "port": "8005"
    },
    {
        "host": "localhost",
        "port": "8006"
    },
]
rc = RedisCluster(startup_nodes=startup_nodes)


class RedisCache:  # 接口缓存
    def __init__(self, client, default_timeout=300, key_prefix=None):
        self._client = client  # decode_responses is not supported by RedisCache
        self.key_prefix = key_prefix or ''
        self.default_timeout = default_timeout

    def cached(self, timeout=None, key_prefix="view%s"):
        def decorator(f):
            @functools.wraps(f)
            def decorated_function(*args, **kwargs):
                cache_key = _make_cache_key()
                rv = self.get(cache_key)
                if rv is None:
Exemplo n.º 22
0
    minutes = xrange(app.config['ONLINE_LAST_MINUTES'])
    return redis.sunion(['online-users/%d' % (current - x)        #取ONLINE_LAST_MINUTES分钟对应集合的交集
                         for x in minutes])


redis 聚群:
sudo pip2 install redis-py-cluster

>>> from rediscluster import RedisCluster
>>> startup_nodes = [{'host': '172.19.1.106', 'port': '7001'},
                         {'host': '172.19.1.106', 'port': '7002'},
                         {'host': '172.19.1.106', 'port': '7003'},
                         {'host': '172.19.1.106', 'port': '7004'},
                         {'host': '172.19.1.106', 'port': '7005'},
                         {'host': '172.19.1.106', 'port': '7006'}]
>>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True)

# decode_responses=True: 防止get到的值为`bytes`
# 默认情况下,publish的消息会被编码,当你获取消息时得到的是编码后的字节,如果你需要它自动解码,创建Redis client实例时需要指定decode_responses=True,
# (译者注:不建议使用该选项,因为当存在pickle序列化的值时,client.get(key)时会出现解码失败的错误UnicodeDecodeError)


>>> rc.set("foo", "bar")
True
>>> rc.get("foo")
'bar'

HDEL: 删除对应哈希(Hash)表的指定键(key)的字段,hdel(self, name, key)
HEXISTS: 检测哈希(Hash)表对应键(key)字段是否存在,返回布尔逻辑,hexists(self, name, key)
HGET: 获取哈希(Hash)指定键(key)对应的值,hget(self, name, key)
HGETALL: 获取哈希(Hash)表的键-值对(key-value pairs),返回python字典类型数据,hgetall(self, name)
Exemplo n.º 23
0
def redis_cluster_info():
    try:
        dt = time.strftime('%Y-%m-%d',time.localtime())
        tt = time.strftime('%H:%M:%S',time.localtime())
        ot = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y-%m-%d')
        RC_JAVA = RedisCluster(startup_nodes=cluster_java_nodes, decode_responses=True)
        results = RC_JAVA.info()
        Redis_Key = 'redis_cluster_java_info'
        for host in results:
            try:
                if results[host]['role'] == 'master':
                    key_commands = '%s_redis_commands' % host
                    key_offset = '%s_redis_offset' % host
                    key_net_input = '%s_redis_net_input' % host
                    key_net_output = '%s_redis_net_output' % host
                    key_keys = '%s_redis_keys' % host
                    Master_Info = {}
                    Master_Info['maxmemory_policy'] = results[host]['maxmemory_policy']
                    Master_Info['used_memory_human'] = results[host]['used_memory_human']
                    Master_Info['slave_host'] = '%s:%s'%(results[host]['slave0']['ip'],results[host]['slave0']['port'])
                    Master_Info['slave_state'] = results[host]['slave0']['state']
                    Master_Info['rejected_connections'] = results[host]['rejected_connections']
                    Master_Info['redis_version'] = results[host]['redis_version']
                    Master_Info['redis_mode'] = results[host]['redis_mode']
                    Master_Info['uptime_in_days'] = results[host]['uptime_in_days']
                    Master_Info['space_keys'] = results[host]['db0']['keys']
                    old_offset = new_offset = int(results[host]['slave0']['offset'])
                    if RC.exists(key_offset):
                        old_offset = int(RC.get(key_offset))
                    RC.set(key_offset,new_offset)
                    Master_Info['slave_offset'] = new_offset - old_offset
                    #连接数
                    connected_clients = results[host]['connected_clients']
                    #增量keys
                    old_keys = new_keys = int(results[host]['db0']['keys'])
                    if RC.exists(key_keys):
                        old_keys = int(RC.get(key_keys))
                    RC.set(key_keys,int(new_keys))
                    add_keys = new_keys - old_keys
                    #命中率
                    HitRate = int(float(results[host]['keyspace_hits']) / (float(results[host]['keyspace_hits']) + float(results[host]['keyspace_misses'])) * 100)
                    # 执行指令
                    old_commands = new_commands = int(results[host]['total_commands_processed'])
                    if RC.exists(key_commands):
                        old_commands = int(RC.get(key_commands))
                    RC.set(key_commands,int(new_commands))
                    commands = (new_commands - old_commands)/60
                    #入口流量
                    old_net_input = new_net_input = int(results[host]['total_net_input_bytes'])
                    if RC.exists(key_net_input):
                        old_net_input = int(RC.get(key_net_input))
                    RC.set(key_net_input,int(new_net_input))
                    net_input = (new_net_input - old_net_input)/1024/1024
                    # 出口流量
                    old_net_output = new_net_output = int(results[host]['total_net_output_bytes'])
                    if RC.exists(key_net_output):
                        old_net_output = int(RC.get(key_net_output))
                    RC.set(key_net_output,int(new_net_output))
                    net_output = (new_net_output - old_net_output)/1024/1024
                    c = db_idc.idc_redis_cluster_info(getdate =dt,gettime =tt,master=host,add_keys=add_keys, connected_clients=connected_clients, HitRate=HitRate,commands=commands,net_input=net_input,net_output=net_output)
                    db_idc.DB.session.add(c)
                    db_idc.DB.session.commit()
                    db = db_idc.idc_redis_cluster_info
                    v = db.query.filter(db.getdate <= ot).all()
                    if v:
                        for c in v:
                            db_idc.DB.session.delete(c)
                            db_idc.DB.session.commit()
                    RC.hset(Redis_Key,host,Master_Info)
            except Exception as e:
                loging.write(e)
                continue
    except Exception as e:
        loging.write(e)
    finally:
        db_idc.DB.session.remove()
Exemplo n.º 24
0
Arquivo: db.py Projeto: pendyala/ceryx
class RedisClient:
    @staticmethod
    def from_config(path=None):
        """
        Returns a RedisClient, using the default configuration from Ceryx
        settings.
        """
        return RedisClient(
            settings.REDIS_HOST,
            settings.REDIS_PORT,
            settings.REDIS_PASSWORD,
            0,
            settings.REDIS_PREFIX,
            settings.REDIS_TIMEOUT,
        )

    def __init__(self, host, port, password, db, prefix, timeout):
        self.client = RedisCluster(host=host, port=port, decode_responses=True)
        print("Redis HOST ===> ")
        print(host)
        self.prefix = prefix

    def _prefixed_key(self, key):
        return f"{self.prefix}:{key}"

    def _route_key(self, source):
        return self._prefixed_key(f"routes:{source}")

    def _settings_key(self, source):
        return self._prefixed_key(f"settings:{source}")

    def _delete_target(self, host):
        key = self._route_key(host)
        self.client.delete(key)

    def _delete_settings(self, host):
        key = self._settings_key(host)
        self.client.delete(key)

    def _lookup_target(self, host, raise_exception=False):
        print("HOST === " + host)
        key = self._route_key(host)
        target = self.client.get(key)

        if target is None and raise_exception:
            raise exceptions.NotFound("Route not found.")

        return target

    def _lookup_settings(self, host):
        key = self._settings_key(host)
        return self.client.hgetall(key)

    def lookup_hosts(self, pattern="*"):
        lookup_pattern = self._route_key(pattern)
        print("---> lookup_pattern --->", lookup_pattern)
        left_padding = len(lookup_pattern) - 1
        print("---> left_padding ---> ", left_padding)
        keys = self.client.keys(lookup_pattern)
        print("Keys ---->")
        print(keys)
        print(*keys, sep=", ")
        newKeys = []
        for key in range(len(keys)):
            temp = keys[key]
            newKeys.append(temp[left_padding:])
        #return [_str(key)[left_padding:] for key in keys]
        return newKeys

    def _set_target(self, host, target):
        key = self._route_key(host)
        self.client.set(key, target)

    def _set_settings(self, host, settings):
        key = self._settings_key(host)
        self.client.hmset(key, settings)

    def _set_route(self, route: schemas.Route):
        redis_data = route.to_redis()
        self._set_target(route.source, redis_data["target"])
        self._set_settings(route.source, redis_data["settings"])
        return route

    def get_route(self, host):
        print("in get_route ....... host ===> ")
        print(host)
        target = self._lookup_target(host, raise_exception=True)
        settings = self._lookup_settings(host)
        route = schemas.Route.from_redis({
            "source": host,
            "target": target,
            "settings": settings
        })
        return route

    def list_routes(self):
        hosts = self.lookup_hosts()
        print("hosts ===> ")
        print(*hosts, sep=", ")
        routes = [self.get_route(host) for host in hosts]
        return routes

    def create_route(self, data: dict):
        route = schemas.Route.validate(data)
        return self._set_route(route)

    def update_route(self, host: str, data: dict):
        data["source"] = host
        route = schemas.Route.validate(data)
        return self._set_route(route)

    def delete_route(self, host: str):
        self._delete_target(host)
        self._delete_settings(host)
Exemplo n.º 25
0
class Analyzer(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer
        """
        super(Analyzer, self).__init__()
        #self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
        self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
	self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def send_graphite_metric(self, name, value):
        if settings.GRAPHITE_HOST != '':
            sock = socket.socket()
            sock.connect((settings.GRAPHITE_HOST, settings.CARBON_PORT))
            sock.sendall('%s %s %i\n' % (name, value, time()))
            sock.close()
            return True

        return False

    def spin_process(self, i, unique_metrics):
        """
        Assign a bunch of metrics for a process to analyze.
        """
        # Discover assigned metrics
        keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))
        if i == settings.ANALYZER_PROCESSES:
            assigned_max = len(unique_metrics)
        else:
            assigned_max = i * keys_per_processor
        assigned_min = assigned_max - keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)

        # Compile assigned metrics
        assigned_metrics = [unique_metrics[index] for index in assigned_keys]

        # Check if this process is unnecessary
        if len(assigned_metrics) == 0:
            return

        # Multi get series
        raw_assigned = self.redis_conn.mget(assigned_metrics)

        # Make process-specific dicts
        exceptions = defaultdict(int)
        anomaly_breakdown = defaultdict(int)

        # Distill timeseries strings into lists
        for i, metric_name in enumerate(assigned_metrics):
            self.check_if_parent_is_alive()

            try:
                raw_series = raw_assigned[i]
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name)

                # If it's anomalous, add it to list
                if anomalous:
                    base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
                    metric = [datapoint, base_name]
                    self.anomalous_metrics.append(metric)

                    # Get the anomaly breakdown - who returned True?
                    for index, value in enumerate(ensemble):
                        if value:
                            algorithm = settings.ALGORITHMS[index]
                            anomaly_breakdown[algorithm] += 1

            # It could have been deleted by the Roomba
            except TypeError:
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                exceptions['TooShort'] += 1
            except Stale:
                exceptions['Stale'] += 1
            except Boring:
                exceptions['Boring'] += 1
            except:
                exceptions['Other'] += 1
                logger.info(traceback.format_exc())

        # Add values to the queue so the parent process can collate
        for key, value in anomaly_breakdown.items():
            self.anomaly_breakdown_q.put((key, value))

        for key, value in exceptions.items():
            self.exceptions_q.put((key, value))

    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
               # self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
		continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Grab data from the queue and populate dictionaries
            exceptions = dict()
            anomaly_breakdown = dict()
            while 1:
                try:
                    key, value = self.anomaly_breakdown_q.get_nowait()
                    if key not in anomaly_breakdown.keys():
                        anomaly_breakdown[key] = value
                    else:
                        anomaly_breakdown[key] += value
                except Empty:
                    break

            while 1:
                try:
                    key, value = self.exceptions_q.get_nowait()
                    if key not in exceptions.keys():
                        exceptions[key] = value
                    else:
                        exceptions[key] += value
                except Empty:
                    break

            # Send alerts
            if settings.ENABLE_ALERTS:
                for alert in settings.ALERTS:
                    for metric in self.anomalous_metrics:
                        if alert[0] in metric[1]:
                            cache_key = 'last_alert.%s.%s' % (alert[1], metric[1])
                            try:
                                last_alert = self.redis_conn.get(cache_key)
                                if not last_alert:
                                    self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
                                    trigger_alert(alert, metric)

                            except Exception as e:
                                logger.error("couldn't send alert: %s" % e)

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(exceptions.values())))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % exceptions)
            logger.info('anomaly breakdown :: %s' % anomaly_breakdown)

            # Log to Graphite
            self.send_graphite_metric('skyline.analyzer.run_time', '%.2f' % (time() - now))
            self.send_graphite_metric('skyline.analyzer.total_analyzed', '%.2f' % (len(unique_metrics) - sum(exceptions.values())))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.duration', '%.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.projected', '%.2f' % projected)

            # Reset counters
            self.anomalous_metrics[:] = []

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
Exemplo n.º 26
0
def test_cluster_of_one_instance():
    """
    Test a cluster that starts with only one redis server and ends up with
    one server.

    There is another redis server joining the cluster, hold slot 0, and
    eventually quit the cluster. The RedisCluster instance may get confused
    when slots mapping and nodes change during the test.
    """
    with patch.object(RedisCluster, 'parse_response') as parse_response_mock:
        with patch.object(NodeManager, 'initialize', autospec=True) as init_mock:
            def side_effect(self, *args, **kwargs):
                def ok_call(self, *args, **kwargs):
                    assert self.port == 7007
                    return "OK"
                parse_response_mock.side_effect = ok_call

                resp = ResponseError()
                resp.args = ('CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information',)
                resp.message = 'CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information'
                raise resp

            def side_effect_rebuild_slots_cache(self):
                # make new node cache that points to 7007 instead of 7006
                self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}]
                self.slots = {}

                for i in range(0, 16383):
                    self.slots[i] = {
                        'host': '127.0.0.1',
                        'server_type': 'master',
                        'port': 7006,
                        'name': '127.0.0.1:7006',
                    }

                # Second call should map all to 7007
                def map_7007(self):
                    self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}]
                    self.slots = {}

                    for i in range(0, 16383):
                        self.slots[i] = {
                            'host': '127.0.0.1',
                            'server_type': 'master',
                            'port': 7007,
                            'name': '127.0.0.1:7007',
                        }

                # First call should map all to 7006
                init_mock.side_effect = map_7007

            parse_response_mock.side_effect = side_effect
            init_mock.side_effect = side_effect_rebuild_slots_cache

            rc = RedisCluster(host='127.0.0.1', port=7006)
            rc.set("foo", "bar")

            #####
            # Test that CLUSTERDOWN is handled the same way when used via pipeline

            parse_response_mock.side_effect = side_effect
            init_mock.side_effect = side_effect_rebuild_slots_cache

            rc = RedisCluster(host='127.0.0.1', port=7006)
            p = rc.pipeline()
            p.set("bar", "foo")
            p.execute()
Exemplo n.º 27
0
startup_node = [{
    "host": "10.1.49.237",
    "port": "7001"
}, {
    "host": "10.1.49.239",
    "port": "7003"
}, {
    "host": "10.1.49.239",
    "port": "7006"
}, {
    "host": "10.1.49.237",
    "port": "7008"
}]
#startup_nodes = [{"host": "10.1.105.146", "port": "7003"},{"host": "10.1.105.145", "port": "7005"},{"host": "10.1.105.146", "port": "7006"},{"host": "10.1.105.144", "port": "7008"}]
rc = RedisCluster(startup_nodes=startup_node,
                  decode_responses=True,
                  readonly_mode=True)
# rc.set('chen', "chen.cc")
# print(redis.VERSION)
# print(rc.hgetall('MessageWaitingReport_20190821_02'))
# print(rc1.hget(name = 'MessageWaitingReport_20190821_02',key='CMPPClient_dx_95567:-8455503350619681570' ) )
# print(rc.keys())
# print(rc.dbsize())
# print(rc.hmget(name="*MessageWaitingReport*",keys="*SME*"))
# print(rc.hscan("*MessageWaitingReport*"))
rc_key = rc.keys()
rc_db = rc.dbsize()

for i in rc_key:
    rc_i = i.find('MessageWaitingReport')
    print(rc.hgetall(rc_i))
Exemplo n.º 28
0
class RedisDB:
    def __init__(
        self,
        ip_ports=None,
        db=None,
        user_pass=None,
        url=None,
        decode_responses=True,
        service_name=None,
        max_connections=32,
        **kwargs,
    ):
        """
        redis的封装
        Args:
            ip_ports: ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
            db:
            user_pass:
            url:
            decode_responses:
            service_name: 适用于redis哨兵模式
        """

        # 可能会改setting中的值,所以此处不能直接赋值为默认值,需要后加载赋值
        if ip_ports is None:
            ip_ports = setting.REDISDB_IP_PORTS
        if db is None:
            db = setting.REDISDB_DB
        if user_pass is None:
            user_pass = setting.REDISDB_USER_PASS
        if service_name is None:
            service_name = setting.REDISDB_SERVICE_NAME

        self._is_redis_cluster = False

        self.__redis = None
        self._url = url
        self._ip_ports = ip_ports
        self._db = db
        self._user_pass = user_pass
        self._decode_responses = decode_responses
        self._service_name = service_name
        self._max_connections = max_connections
        self._kwargs = kwargs
        self.get_connect()

    def __repr__(self):
        if self._url:
            return "<Redisdb url:{}>".format(self._url)

        return "<Redisdb ip_ports: {} db:{} user_pass:{}>".format(
            self._ip_ports, self._db, self._user_pass
        )

    @property
    def _redis(self):
        try:
            if not self.__redis.ping():
                raise ConnectionError("unable to connect to redis")
        except:
            self._reconnect()

        return self.__redis

    @_redis.setter
    def _redis(self, val):
        self.__redis = val

    def get_connect(self):
        # 获取数据库连接
        try:
            if not self._url:
                if not self._ip_ports:
                    raise Exception("未设置 redis 连接信息")

                ip_ports = (
                    self._ip_ports
                    if isinstance(self._ip_ports, list)
                    else self._ip_ports.split(",")
                )
                if len(ip_ports) > 1:
                    startup_nodes = []
                    for ip_port in ip_ports:
                        ip, port = ip_port.split(":")
                        startup_nodes.append({"host": ip, "port": port})

                    if self._service_name:
                        # log.debug("使用redis哨兵模式")
                        hosts = [(node["host"], node["port"]) for node in startup_nodes]
                        sentinel = Sentinel(hosts, socket_timeout=3, **self._kwargs)
                        self._redis = sentinel.master_for(
                            self._service_name,
                            password=self._user_pass,
                            db=self._db,
                            redis_class=redis.StrictRedis,
                            decode_responses=self._decode_responses,
                            max_connections=self._max_connections,
                            **self._kwargs,
                        )

                    else:
                        # log.debug("使用redis集群模式")
                        self._redis = RedisCluster(
                            startup_nodes=startup_nodes,
                            decode_responses=self._decode_responses,
                            password=self._user_pass,
                            max_connections=self._max_connections,
                            **self._kwargs,
                        )

                    self._is_redis_cluster = True
                else:
                    ip, port = ip_ports[0].split(":")
                    self._redis = redis.StrictRedis(
                        host=ip,
                        port=port,
                        db=self._db,
                        password=self._user_pass,
                        decode_responses=self._decode_responses,
                        max_connections=self._max_connections,
                        **self._kwargs,
                    )
                    self._is_redis_cluster = False
            else:
                self._redis = redis.StrictRedis.from_url(
                    self._url, decode_responses=self._decode_responses
                )
                self._is_redis_cluster = False

        except Exception as e:
            raise
        else:
            # if not self._url:
            #     log.debug("连接到redis数据库 %s db%s" % (self._ip_ports, self._db))
            # else:
            #     log.debug("连接到redis数据库 %s" % (self._url))
            pass

        return self.__redis.ping()  # 不要写成self._redis.ping() 否则循环调用了

    @classmethod
    def from_url(cls, url):
        """

        Args:
            url: redis://[[username]:[password]]@[host]:[port]/[db]

        Returns:

        """
        return cls(url=url)

    def sadd(self, table, values):
        """
        @summary: 使用无序set集合存储数据, 去重
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        ---------
        @result: 若库中存在 返回0,否则入库,返回1。 批量添加返回None
        """

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.sadd(table, value)
            pipe.execute()

        else:
            return self._redis.sadd(table, values)

    def sget(self, table, count=1, is_pop=True):
        """
        返回 list 如 ['1'] 或 []
        @param table:
        @param count:
        @param is_pop:
        @return:
        """

        datas = []
        if is_pop:
            count = count if count <= self.sget_count(table) else self.sget_count(table)
            if count:
                if count > 1:
                    pipe = self._redis.pipeline(
                        transaction=True
                    )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

                    if not self._is_redis_cluster:
                        pipe.multi()
                    while count:
                        pipe.spop(table)
                        count -= 1
                    datas = pipe.execute()

                else:
                    datas.append(self._redis.spop(table))

        else:
            datas = self._redis.srandmember(table, count)

        return datas

    def srem(self, table, values):
        """
        @summary: 移除集合中的指定元素
        ---------
        @param table:
        @param values: 一个或者列表
        ---------
        @result:
        """

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.srem(table, value)
            pipe.execute()
        else:
            self._redis.srem(table, values)

    def sget_count(self, table):
        return self._redis.scard(table)

    def sdelete(self, table):
        """
        @summary: 删除set集合的大键(数据量大的表)
        删除大set键,使用sscan命令,每次扫描集合中500个元素,再用srem命令每次删除一个键
        若直接用delete命令,会导致Redis阻塞,出现故障切换和应用程序崩溃的故障。
        ---------
        @param table:
        ---------
        @result:
        """

        # 当 SCAN 命令的游标参数被设置为 0 时, 服务器将开始一次新的迭代, 而当服务器向用户返回值为 0 的游标时, 表示迭代已结束
        cursor = "0"
        while cursor != 0:
            cursor, data = self._redis.sscan(table, cursor=cursor, count=500)
            for item in data:
                # pipe.srem(table, item)
                self._redis.srem(table, item)

            # pipe.execute()

    def sismember(self, table, key):
        "Return a boolean indicating if ``value`` is a member of set ``name``"
        return self._redis.sismember(table, key)

    def zadd(self, table, values, prioritys=0):
        """
        @summary: 使用有序set集合存储数据, 去重(值存在更新)
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        @param prioritys: 优先级; double类型,支持list 或 单个值。 根据此字段的值来排序, 值越小越优先。 可不传值,默认value的优先级为0
        ---------
        @result:若库中存在 返回0,否则入库,返回1。 批量添加返回 [0, 1 ...]
        """
        if isinstance(values, list):
            if not isinstance(prioritys, list):
                prioritys = [prioritys] * len(values)
            else:
                assert len(values) == len(prioritys), "values值要与prioritys值一一对应"

            pipe = self._redis.pipeline(transaction=True)

            if not self._is_redis_cluster:
                pipe.multi()
            for value, priority in zip(values, prioritys):
                pipe.execute_command(
                    "ZADD", table, priority, value
                )  # 为了兼容2.x与3.x版本的redis
            return pipe.execute()

        else:
            return self._redis.execute_command(
                "ZADD", table, prioritys, values
            )  # 为了兼容2.x与3.x版本的redis

    def zget(self, table, count=1, is_pop=True):
        """
        @summary: 从有序set集合中获取数据 优先返回分数小的(优先级高的)
        ---------
        @param table:
        @param count: 数量 -1 返回全部数据
        @param is_pop:获取数据后,是否在原set集合中删除,默认是
        ---------
        @result: 列表
        """

        start_pos = 0  # 包含
        end_pos = count - 1 if count > 0 else count

        pipe = self._redis.pipeline(
            transaction=True
        )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

        if not self._is_redis_cluster:
            pipe.multi()  # 标记事务的开始 参考 http://www.runoob.com/redis/redis-transactions.html
        pipe.zrange(table, start_pos, end_pos)  # 取值
        if is_pop:
            pipe.zremrangebyrank(table, start_pos, end_pos)  # 删除
        results, *count = pipe.execute()
        return results

    def zremrangebyscore(self, table, priority_min, priority_max):
        """
        根据分数移除成员 闭区间
        @param table:
        @param priority_min:
        @param priority_max:
        @return: 被移除的成员个数
        """
        return self._redis.zremrangebyscore(table, priority_min, priority_max)

    def zrangebyscore(self, table, priority_min, priority_max, count=None, is_pop=True):
        """
        @summary: 返回指定分数区间的数据 闭区间
        ---------
        @param table:
        @param priority_min: 优先级越小越优先
        @param priority_max:
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        @param is_pop: 是否删除
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            -- local key = KEYS[1]
            local min_score = ARGV[2]
            local max_score = ARGV[3]
            local is_pop = ARGV[4]
            local count = ARGV[5]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'limit', 0, count)
            else
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score)
            end

            -- 删除redis中刚取到的值
            if (is_pop) then
                for i=1, #datas do
                    redis.call('zrem', KEYS[1], datas[i])
                end
            end


            return datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(
                keys=[table], args=[table, priority_min, priority_max, is_pop, count]
            )
        else:
            res = cmd(keys=[table], args=[table, priority_min, priority_max, is_pop])

        return res

    def zrangebyscore_increase_score(
        self, table, priority_min, priority_max, increase_score, count=None
    ):
        """
        @summary: 返回指定分数区间的数据 闭区间, 同时修改分数
        ---------
        @param table:
        @param priority_min: 最小分数
        @param priority_max: 最大分数
        @param increase_score: 分数值增量 正数则在原有的分数上叠加,负数则相减
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            -- local key = KEYS[1]
            local min_score = ARGV[1]
            local max_score = ARGV[2]
            local increase_score = ARGV[3]
            local count = ARGV[4]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'limit', 0, count)
            else
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score)
            end

            --修改优先级
            for i=1, #datas do
                redis.call('zincrby', KEYS[1], increase_score, datas[i])
            end

            return datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(
                keys=[table], args=[priority_min, priority_max, increase_score, count]
            )
        else:
            res = cmd(keys=[table], args=[priority_min, priority_max, increase_score])

        return res

    def zrangebyscore_set_score(
        self, table, priority_min, priority_max, score, count=None
    ):
        """
        @summary: 返回指定分数区间的数据 闭区间, 同时修改分数
        ---------
        @param table:
        @param priority_min: 最小分数
        @param priority_max: 最大分数
        @param score: 分数值
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            -- local key = KEYS[1]
            local min_score = ARGV[1]
            local max_score = ARGV[2]
            local set_score = ARGV[3]
            local count = ARGV[4]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'withscores','limit', 0, count)
            else
                datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'withscores')
            end

            local real_datas = {} -- 数据
            --修改优先级
            for i=1, #datas, 2 do
               local data = datas[i]
               local score = datas[i+1]

               table.insert(real_datas, data) -- 添加数据

               redis.call('zincrby', KEYS[1], set_score - score, datas[i])
            end

            return real_datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(keys=[table], args=[priority_min, priority_max, score, count])
        else:
            res = cmd(keys=[table], args=[priority_min, priority_max, score])

        return res

    def zget_count(self, table, priority_min=None, priority_max=None):
        """
        @summary: 获取表数据的数量
        ---------
        @param table:
        @param priority_min:优先级范围 最小值(包含)
        @param priority_max:优先级范围 最大值(包含)
        ---------
        @result:
        """

        if priority_min != None and priority_max != None:
            return self._redis.zcount(table, priority_min, priority_max)
        else:
            return self._redis.zcard(table)

    def zrem(self, table, values):
        """
        @summary: 移除集合中的指定元素
        ---------
        @param table:
        @param values: 一个或者列表
        ---------
        @result:
        """

        if isinstance(values, list):
            self._redis.zrem(table, *values)
        else:
            self._redis.zrem(table, values)

    def zexists(self, table, values):
        """
        利用zscore判断某元素是否存在
        @param values:
        @return:
        """

        is_exists = []

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。
            pipe.multi()
            for value in values:
                pipe.zscore(table, value)
            is_exists_temp = pipe.execute()
            for is_exist in is_exists_temp:
                if is_exist != None:
                    is_exists.append(1)
                else:
                    is_exists.append(0)

        else:
            is_exists = self._redis.zscore(table, values)
            is_exists = 1 if is_exists != None else 0

        return is_exists

    def lpush(self, table, values):

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.rpush(table, value)
            pipe.execute()

        else:
            return self._redis.rpush(table, values)

    def lpop(self, table, count=1):
        """
        @summary:
        ---------
        @param table:
        @param count:
        ---------
        @result: count>1时返回列表
        """

        datas = None

        count = count if count <= self.lget_count(table) else self.lget_count(table)

        if count:
            if count > 1:
                pipe = self._redis.pipeline(
                    transaction=True
                )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。

                if not self._is_redis_cluster:
                    pipe.multi()
                while count:
                    pipe.lpop(table)
                    count -= 1
                datas = pipe.execute()

            else:
                datas = self._redis.lpop(table)

        return datas

    def rpoplpush(self, from_table, to_table=None):
        """
        将列表 from_table 中的最后一个元素(尾元素)弹出,并返回给客户端。
        将 from_table 弹出的元素插入到列表 to_table ,作为 to_table 列表的的头元素。
        如果 from_table 和 to_table 相同,则列表中的表尾元素被移动到表头,并返回该元素,可以把这种特殊情况视作列表的旋转(rotation)操作
        @param from_table:
        @param to_table:
        @return:
        """

        if not to_table:
            to_table = from_table

        return self._redis.rpoplpush(from_table, to_table)

    def lget_count(self, table):
        return self._redis.llen(table)

    def lrem(self, table, value, num=0):
        """
        @summary:
        删除value
        ---------
        @param table:
        @param value:
        @param num:
        ---------
        @result: 删除的条数
        """
        return self._redis.lrem(table, num, value)

    def lrange(self, table, start=0, end=-1):
        return self._redis.lrange(table, start, end)

    def hset(self, table, key, value):
        """
        @summary:
        如果 key 不存在,一个新的哈希表被创建并进行 HSET 操作。
        如果域 field 已经存在于哈希表中,旧值将被覆盖
        ---------
        @param table:
        @param key:
        @param value:
        ---------
        @result: 1 新插入; 0 覆盖
        """
        return self._redis.hset(table, key, value)

    def hset_batch(self, table, datas):
        """
        批量插入
        Args:
            datas:
                [[key, value]]
        Returns:

        """
        pipe = self._redis.pipeline(transaction=True)

        if not self._is_redis_cluster:
            pipe.multi()
        for key, value in datas:
            pipe.hset(table, key, value)
        return pipe.execute()

    def hincrby(self, table, key, increment):
        return self._redis.hincrby(table, key, increment)

    def hget(self, table, key, is_pop=False):
        if not is_pop:
            return self._redis.hget(table, key)
        else:
            lua = """
                -- local key = KEYS[1]
                local field = ARGV[1]

                -- 取值
                local datas = redis.call('hget', KEYS[1], field)
                -- 删除值
                redis.call('hdel', KEYS[1], field)

                return datas

                    """
            cmd = self._redis.register_script(lua)
            res = cmd(keys=[table], args=[key])

            return res

    def hgetall(self, table):
        return self._redis.hgetall(table)

    def hexists(self, table, key):
        return self._redis.hexists(table, key)

    def hdel(self, table, *keys):
        """
        @summary: 删除对应的key 可传多个
        ---------
        @param table:
        @param *keys:
        ---------
        @result:
        """
        self._redis.hdel(table, *keys)

    def hget_count(self, table):
        return self._redis.hlen(table)

    def setbit(self, table, offsets, values):
        """
        设置字符串数组某一位的值, 返回之前的值
        @param table:
        @param offsets: 支持列表或单个值
        @param values: 支持列表或单个值
        @return: list / 单个值
        """
        if isinstance(offsets, list):
            if not isinstance(values, list):
                values = [values] * len(offsets)
            else:
                assert len(offsets) == len(values), "offsets值要与values值一一对应"

            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。
            pipe.multi()

            for offset, value in zip(offsets, values):
                pipe.setbit(table, offset, value)

            return pipe.execute()

        else:
            return self._redis.setbit(table, offsets, values)

    def getbit(self, table, offsets):
        """
        取字符串数组某一位的值
        @param table:
        @param offsets: 支持列表
        @return: list / 单个值
        """
        if isinstance(offsets, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipeline实现一次请求指定多个命令,并且默认情况下一次pipeline 是原子性操作。
            pipe.multi()
            for offset in offsets:
                pipe.getbit(table, offset)

            return pipe.execute()

        else:
            return self._redis.getbit(table, offsets)

    def bitcount(self, table):
        return self._redis.bitcount(table)

    def strset(self, table, value, **kwargs):
        return self._redis.set(table, value, **kwargs)

    def str_incrby(self, table, value):
        return self._redis.incrby(table, value)

    def strget(self, table):
        return self._redis.get(table)

    def strlen(self, table):
        return self._redis.strlen(table)

    def getkeys(self, regex):
        return self._redis.keys(regex)

    def exists_key(self, key):
        return self._redis.exists(key)

    def set_expire(self, key, seconds):
        """
        @summary: 设置过期时间
        ---------
        @param key:
        @param seconds: 秒
        ---------
        @result:
        """
        self._redis.expire(key, seconds)

    def get_expire(self, key):
        """
        @summary: 查询过期时间
        ---------
        @param key:
        @param seconds: 秒
        ---------
        @result:
        """
        return self._redis.ttl(key)

    def clear(self, table):
        try:
            self._redis.delete(table)
        except Exception as e:
            log.error(e)

    def get_redis_obj(self):
        return self._redis

    def _reconnect(self):
        # 检测连接状态, 当数据库重启或设置 timeout 导致断开连接时自动重连
        retry_count = 0
        while True:
            try:
                retry_count += 1
                log.error(f"redis 连接断开, 重新连接 {retry_count}")
                if self.get_connect():
                    log.info(f"redis 连接成功")
                    return True
            except (ConnectionError, TimeoutError) as e:
                log.error(f"连接失败 e: {e}")

            time.sleep(2)
Exemplo n.º 29
0
from redis._compat import xrange
from rediscluster import RedisCluster

startup_nodes = [{"host": "127.0.0.1", "port": 7000}]
r = RedisCluster(startup_nodes=startup_nodes, max_connections=32, decode_responses=True)

for i in xrange(1000000):
    d = str(i)
    r.set(d, d)
    r.incrby(d, 1)
Exemplo n.º 30
0
        count_c += 1
        if count_c % 1000 == 0:
            while True:
                try:
                    es.bulk(bulk_action, index=es_index, doc_type='bci', timeout=30)
                    bulk_action = []
                    break
                except Exception,r:
                    print "bulk error"
           print count_c


if __name__ == "__main__":

    startup_nodes = [{"host": REDIS_VENT_HOST, "port": REDIS_VENT_PORT}]
    cluster_redis = RedisCluster(startup_nodes = startup_nodes)


    es_logger = logging.getLogger("elasticsearch")
    es_logger.setLevel(logging.ERROR)
    FileHandler = logging.FileHandler("es.log")
    formatter = logging.Formatter("%(asctime)s_%(name)s_%(levelname)s_%(message)s")
    FileHandler.setFormatter(formatter)
    es_logger.addHandler(FileHandler)

    es =  Elasticsearch(['se13', 'se14'], timeout = 60, retry_on_timeout=True, max_retries=6)
    count = 0
    tb = time.time()
    print es.nodes.info("node_id=all")
    print es.cluster.health()
Exemplo n.º 31
0
from redis import StrictRedis
from rediscluster import RedisCluster

if __name__ == '__main__':
    """
    for i in [94, 95, 96]:
        for j in [7380]:
            startup_nodes = [{"host": '219.224.135.%s'%i, "port": '%s'%j}]
            print startup_nodes
            weibo_redis = RedisCluster(startup_nodes = startup_nodes)

            weibo_redis.flushall()
    print "finish flushing!"
    """
    startup_nodes = [{"host": '219.224.135.91', "port": '7380'}]
    weibo_redis = RedisCluster(startup_nodes = startup_nodes)
    weibo_redis.flushall()

    startup_nodes = [{"host": '219.224.135.92', "port": '7380'}]
    weibo_redis = RedisCluster(startup_nodes = startup_nodes)
    weibo_redis.flushall()

    startup_nodes = [{"host": '219.224.135.93', "port": '7380'}]
    weibo_redis = RedisCluster(startup_nodes = startup_nodes)
    weibo_redis.flushall()

    #r = StrictRedis(host="219.224.135.97", port="7380")
    #r.flushall()

    print "ok"
    """
Exemplo n.º 32
0
class RedisClusterRetentionScheduler(BaseModule):

    def __init__(self, modconf, servers, password, key_prefix, expire_time):
        BaseModule.__init__(self, modconf)
        self.servers = [dict(host=elt.strip().split(':')[0],
                             port=int(elt.strip().split(':')[1]))
                        for elt in servers.split(',')]
        self.password = password
        self.key_prefix = key_prefix
        self.expire_time = expire_time

        self.rc = None

    def init(self):
        """
        Called by Scheduler to say 'let's prepare yourself guy'
        """
        logger.info('[RedisClusterRetention] Initialization of the redis '
                    'module')
        if self.password:
            self.rc = RedisCluster(startup_nodes=self.servers,
                                   password=self.password)
        else:
            self.rc = RedisCluster(startup_nodes=self.servers)

    def _get_host_key(self, h_name):
        host_key = '%s-HOST-%s' % (self.key_prefix, h_name) \
                   if self.key_prefix else 'HOST-%s' % h_name
        return host_key

    def _get_service_key(self, h_name, s_name):
        service_key = '%s-SERVICE-%s,%s' % (self.key_prefix, h_name, s_name)\
                      if self.key_prefix \
                      else 'SERVICE-%s,%s' % (h_name, s_name)
        return service_key

    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug('[RedisClusterRetention] asking me to update retention '
                     'objects')

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']

        # Now the flat file method
        for h_name in hosts:
            h = hosts[h_name]
            key = self._get_host_key(h_name)
            val = cPickle.dumps(h)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)

        for (h_name, s_desc) in services:
            s = services[(h_name, s_desc)]
            key = self._get_service_key(h_name, s_desc)
            val = cPickle.dumps(s)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)
        logger.info('Retention information updated in Redis')

    # Should return if it succeed in the retention load or not
    def hook_load_retention(self, daemon):

        # Now the new redis way :)
        logger.info('[RedisClusterRetention] asking me to load retention '
                    'objects')

        # We got list of loaded data from retention server
        ret_hosts = {}
        ret_services = {}

        # We must load the data and format as the scheduler want :)
        for h in daemon.hosts:
            key = self._get_host_key(h.host_name)
            val = self.rc.get(key)
            if val is not None:
                val = cPickle.loads(val)
                ret_hosts[h.host_name] = val

        for s in daemon.services:
            key = self._get_service_key(s.host.host_name,
                                        s.service_description)
            val = self.rc.get(key)
            if val is not None:
                val = cPickle.loads(val)
                ret_services[(s.host.host_name, s.service_description)] = val

        all_data = {'hosts': ret_hosts, 'services': ret_services}

        # Ok, now come load them scheduler :)
        daemon.restore_retention_data(all_data)

        logger.info('[RedisClusterRetention] Retention objects loaded '
                    'successfully.')

        return True
Exemplo n.º 33
0
from rediscluster import RedisCluster

startup_nodes = [{"host": "127.0.0.1", "port": "7000"}]
password = ""
keyPrefix = ""

rc = RedisCluster(startup_nodes=startup_nodes, password=password, skip_full_coverage_check=True, decode_responses=True)

file = open("sample.txt")
readlines = file.readlines()

for line in readlines:
    key = keyPrefix + line[:-1]
    print("del key " + key)
    print("end")
    if len(key) > 10:
        rc.delete(key)
Exemplo n.º 34
0
from redis import StrictRedis
from rediscluster import RedisCluster

if __name__ == '__main__':
    for i in [91, 92, 93]:
        for j in [6379, 6380]:
            startup_nodes = [{"host": '219.224.135.%s'%i, "port": '%s'%j}]
            weibo_redis = RedisCluster(startup_nodes = startup_nodes)

            #weibo_redis.flushall()
    print "finish flushing!"
    """
    r = StrictRedis(host="219.224.135.97", port="6380", db=1)
    r.set("1","1")
    """
    startup_nodes = [{"host": '219.224.135.94', "port": '6379'}]
    r =  RedisCluster(startup_nodes = startup_nodes)
    print r.hgetall('ip_1378224000')
Exemplo n.º 35
0
Arquivo: db.py Projeto: pendyala/ceryx
 def __init__(self, host, port, password, db, prefix, timeout):
     self.client = RedisCluster(host=host, port=port, decode_responses=True)
     print("Redis HOST ===> ")
     print(host)
     self.prefix = prefix
Exemplo n.º 36
0
    def test_join_no_load(self):
        comm.start_cluster('127.0.0.1', 7100)

        rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}])
        rc.set('x-{h-893}', 'y')
        rc.set('y-{h-893}', 'zzZ')
        rc.set('z-{h-893}', 'w')
        rc.incr('h-893')

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]

        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7102)
        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [0])

        nodes = base.list_nodes('127.0.0.1', 7102)
        self.assertEqual(3, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        n7102 = nodes[('127.0.0.1', 7102)]

        self.assertEqual(16383, len(n7100.assigned_slots))
        self.assertEqual(1, len(n7101.assigned_slots))
        self.assertEqual(0, len(n7102.assigned_slots))

        try:
            t = n7101.talker()
            m = t.talk('get', 'h-893')
            self.assertEqual('1', m)

            m = t.talk('get', 'y-{h-893}')
            self.assertEqual('zzZ', m)

            comm.quit_cluster('127.0.0.1', 7102)
            comm.quit_cluster('127.0.0.1', 7101)
            t = n7100.talker()

            rc.delete('x-{h-893}')
            rc.delete('y-{h-893}')
            rc.delete('z-{h-893}')
            rc.delete('h-893')
            comm.shutdown_cluster('127.0.0.1', 7100)
        finally:
            n7100.close()
            n7101.close()
Exemplo n.º 37
0
class Worker(Process):
    """
    The worker processes chunks from the queue and appends
    the latest datapoints to their respective timesteps in Redis.
    """
    def __init__(self, queue, parent_pid, skip_mini, canary=False):
        super(Worker, self).__init__()

        self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True) 
        self.q = queue
        self.parent_pid = parent_pid
        self.daemon = True
        self.canary = canary
        self.skip_mini = skip_mini

    def check_if_parent_is_alive(self):
        """
        Self explanatory.
        """
        try:
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def in_skip_list(self, metric_name):
        """
        Check if the metric is in SKIP_LIST.
        """
        for to_skip in settings.SKIP_LIST:
            if to_skip in metric_name:
                return True

        return False

    def send_graphite_metric(self, name, value):
        if settings.GRAPHITE_HOST != '':
            sock = socket.socket()
            sock.connect((settings.GRAPHITE_HOST, settings.CARBON_PORT))
            sock.sendall('%s %s %i\n' % (name, value, time()))
            sock.close()
            return True

        return False

    def run(self):
        """
        Called when the process intializes.
        """
        logger.info('started worker')

        FULL_NAMESPACE = settings.FULL_NAMESPACE
        MINI_NAMESPACE = settings.MINI_NAMESPACE
        MAX_RESOLUTION = settings.MAX_RESOLUTION
        full_uniques = FULL_NAMESPACE + 'unique_metrics'
        mini_uniques = MINI_NAMESPACE + 'unique_metrics'
        pipe = self.redis_conn.pipeline()

        while 1:

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('worker can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
                pipe = self.redis_conn.pipeline()
                continue

            try:
                # Get a chunk from the queue with a 15 second timeout
                chunk = self.q.get(True, 15)
                now = time()

                for metric in chunk:

                    # Check if we should skip it
                    if self.in_skip_list(metric[0]):
                        continue

                    # Bad data coming in
                    if metric[1][0] < now - MAX_RESOLUTION:
                        continue

                    # Append to messagepack main namespace
                    key = ''.join((FULL_NAMESPACE, metric[0]))
                    pipe.append(key, packb(metric[1]))
                    pipe.sadd(full_uniques, key)

                    if not self.skip_mini:
                        # Append to mini namespace
                        mini_key = ''.join((MINI_NAMESPACE, metric[0]))
                        pipe.append(mini_key, packb(metric[1]))
                        pipe.sadd(mini_uniques, mini_key)

                    pipe.execute()
                # Log progress
                if self.canary:
                    logger.info('queue size at %d' % self.q.qsize())
                    self.send_graphite_metric('skyline.horizon.queue_size', self.q.qsize())

            except Empty:
                logger.info('worker queue is empty and timed out')
            except WatchError:
                logger.error(key)
            except NotImplementedError:
                pass
            except Exception as e:
                logger.error("worker error: " + str(e))
Exemplo n.º 38
0
    def test_api(self):
        comm.start_cluster('127.0.0.1', 7100)
        rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}])
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))

        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        for i in xrange(20):
            rc.set('key_%s' % i, 'value_%s' % i)

        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)

        self.assertEqual(2, len(nodes))
        self.assertEqual(range(8192),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8192, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(range(8193),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8193, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101,
                           [8193, 8194, 8195])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(range(8196),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8196, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        self.assertRaisesRegexp(
            ValueError, 'Not all slot held by', comm.migrate_slots,
            '127.0.0.1', 7100, '127.0.0.1', 7101, [8192])

        self.assertRaisesRegexp(
            ValueError, 'Not all slot held by', comm.migrate_slots,
            '127.0.0.1', 7100, '127.0.0.1', 7101, [8195, 8196])

        self.assertRaisesRegexp(
            ValueError, 'Two nodes are not in the same cluster',
            comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196])

        comm.quit_cluster('127.0.0.1', 7100)

        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))
        self.assertEqual('value', rc.get('key'))

        nodes = base.list_nodes('127.0.0.1', 7101)
        self.assertEqual(1, len(nodes))
        self.assertEqual(range(16384),
                         nodes[('127.0.0.1', 7101)].assigned_slots)

        self.assertRaisesRegexp(
            RedisStatusError, 'Cluster containing keys',
            comm.shutdown_cluster, '127.0.0.1', 7101)

        rc.delete('key', *['key_%s' % i for i in xrange(20)])
        comm.shutdown_cluster('127.0.0.1', 7101)

        self.assertRaisesRegexp(ResponseError, 'CLUSTERDOWN .*', rc.get, 'key')
Exemplo n.º 39
0
    def run(self):
        """
        Called when the process intializes.
        """
        logger.info('started worker')

        FULL_NAMESPACE = settings.FULL_NAMESPACE
        MINI_NAMESPACE = settings.MINI_NAMESPACE
        MAX_RESOLUTION = settings.MAX_RESOLUTION
        full_uniques = FULL_NAMESPACE + 'unique_metrics'
        mini_uniques = MINI_NAMESPACE + 'unique_metrics'
        pipe = self.redis_conn.pipeline()

        while 1:

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('worker can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = RedisCluster(startup_nodes=settings.startup_nodes, decode_responses=True)
                pipe = self.redis_conn.pipeline()
                continue

            try:
                # Get a chunk from the queue with a 15 second timeout
                chunk = self.q.get(True, 15)
                now = time()

                for metric in chunk:

                    # Check if we should skip it
                    if self.in_skip_list(metric[0]):
                        continue

                    # Bad data coming in
                    if metric[1][0] < now - MAX_RESOLUTION:
                        continue

                    # Append to messagepack main namespace
                    key = ''.join((FULL_NAMESPACE, metric[0]))
                    pipe.append(key, packb(metric[1]))
                    pipe.sadd(full_uniques, key)

                    if not self.skip_mini:
                        # Append to mini namespace
                        mini_key = ''.join((MINI_NAMESPACE, metric[0]))
                        pipe.append(mini_key, packb(metric[1]))
                        pipe.sadd(mini_uniques, mini_key)

                    pipe.execute()
                # Log progress
                if self.canary:
                    logger.info('queue size at %d' % self.q.qsize())
                    self.send_graphite_metric('skyline.horizon.queue_size', self.q.qsize())

            except Empty:
                logger.info('worker queue is empty and timed out')
            except WatchError:
                logger.error(key)
            except NotImplementedError:
                pass
            except Exception as e:
                logger.error("worker error: " + str(e))
Exemplo n.º 40
0
from rediscluster import RedisCluster

startup_nodes = [{"host": "127.0.0.1", "port": 7000}]
r = RedisCluster(startup_nodes=startup_nodes, max_connections=32, decode_responses=True)

for i in xrange(1000000):
    d = str(i)
    pipe = r.pipeline(transaction=False)
    pipe.set(d, d)
    pipe.incrby(d, 1)
    pipe.execute()

    pipe = r.pipeline(transaction=False)
    pipe.set("foo-%s" % d, d)
    pipe.incrby("foo-%s" % d, 1)
    pipe.set("bar-%s" % d, d)
    pipe.incrby("bar-%s" % d, 1)
    pipe.set("bazz-%s" % d, d)
    pipe.incrby("bazz-%s" % d, 1)
    pipe.execute()
Exemplo n.º 41
0
class SharQ(object):
    """The SharQ object is the core of this queue.
    SharQ does the following.

        1. Accepts a configuration file.
        2. Initializes the queue.
        3. Exposes functions to interact with the queue.
    """
    def __init__(self, config_path):
        """Construct a SharQ object by doing the following.
            1. Read the configuration path.
            2. Load the config.
            3. Initialized SharQ.
        """
        self.config_path = config_path
        self._load_config()
        self._initialize()

    def _initialize(self):
        """Read the SharQ configuration and set appropriate
        variables. Open a redis connection pool and load all
        the Lua scripts.
        """
        self._key_prefix = self._config.get('redis', 'key_prefix')
        self._job_expire_interval = int(
            self._config.get('sharq', 'job_expire_interval'))
        self._default_job_requeue_limit = int(
            self._config.get('sharq', 'default_job_requeue_limit'))

        # initalize redis
        redis_connection_type = self._config.get('redis', 'conn_type')
        db = self._config.get('redis', 'db')
        if redis_connection_type == 'unix_sock':
            self._r = redis.StrictRedis(db=db,
                                        unix_socket_path=self._config.get(
                                            'redis', 'unix_socket_path'))
        elif redis_connection_type == 'tcp_sock':
            isclustered = False
            if self._config.has_option('redis', 'clustered'):
                isclustered = self._config.getboolean('redis', 'clustered')

            if isclustered:
                startup_nodes = [{
                    "host": self._config.get('redis', 'host'),
                    "port": self._config.get('redis', 'port')
                }]
                self._r = StrictRedisCluster(startup_nodes=startup_nodes,
                                             decode_responses=False,
                                             skip_full_coverage_check=True,
                                             socket_timeout=5)
            else:
                self._r = redis.StrictRedis(
                    db=db,
                    host=self._config.get('redis', 'host'),
                    port=self._config.get('redis', 'port'))
        self._load_lua_scripts()

    def _load_config(self):
        """Read the configuration file and load it into memory."""
        self._config = ConfigParser.SafeConfigParser()
        self._config.read(self.config_path)

    def reload_config(self, config_path=None):
        """Reload the configuration from the new config file if provided
        else reload the current config file.
        """
        if config_path:
            self.config_path = config_path
        self._load_config()

    def _load_lua_scripts(self):
        """Loads all lua scripts required by SharQ."""
        # load lua scripts
        lua_script_path = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'scripts/lua')
        with open(os.path.join(lua_script_path, 'enqueue.lua'),
                  'r') as enqueue_file:
            self._lua_enqueue_script = enqueue_file.read()
            self._lua_enqueue = self._r.register_script(
                self._lua_enqueue_script)

        with open(os.path.join(lua_script_path, 'dequeue.lua'),
                  'r') as dequeue_file:
            self._lua_dequeue_script = dequeue_file.read()
            self._lua_dequeue = self._r.register_script(
                self._lua_dequeue_script)

        with open(os.path.join(lua_script_path, 'finish.lua'),
                  'r') as finish_file:
            self._lua_finish_script = finish_file.read()
            self._lua_finish = self._r.register_script(self._lua_finish_script)

        with open(os.path.join(lua_script_path, 'interval.lua'),
                  'r') as interval_file:
            self._lua_interval_script = interval_file.read()
            self._lua_interval = self._r.register_script(
                self._lua_interval_script)

        with open(os.path.join(lua_script_path, 'requeue.lua'),
                  'r') as requeue_file:
            self._lua_requeue_script = requeue_file.read()
            self._lua_requeue = self._r.register_script(
                self._lua_requeue_script)

        with open(os.path.join(lua_script_path, 'metrics.lua'),
                  'r') as metrics_file:
            self._lua_metrics_script = metrics_file.read()
            self._lua_metrics = self._r.register_script(
                self._lua_metrics_script)

    def reload_lua_scripts(self):
        """Lets user reload the lua scripts in run time."""
        self._load_lua_scripts()

    def enqueue(self,
                payload,
                interval,
                job_id,
                queue_id,
                queue_type='default',
                requeue_limit=None):
        """Enqueues the job into the specified queue_id
        of a particular queue_type
        """
        # validate all the input
        if not is_valid_interval(interval):
            raise BadArgumentException('`interval` has an invalid value.')

        if not is_valid_identifier(job_id):
            raise BadArgumentException('`job_id` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        if requeue_limit is None:
            requeue_limit = self._default_job_requeue_limit

        if not is_valid_requeue_limit(requeue_limit):
            raise BadArgumentException('`requeue_limit` has an invalid value.')

        try:
            serialized_payload = serialize_payload(payload)
        except TypeError as e:
            raise BadArgumentException(e.message)

        timestamp = str(generate_epoch())

        keys = [self._key_prefix, queue_type]

        args = [
            timestamp, queue_id, job_id,
            '"%s"' % serialized_payload, interval, requeue_limit
        ]

        self._lua_enqueue(keys=keys, args=args)

        response = {'status': 'queued'}
        return response

    def dequeue(self, queue_type='default'):
        """Dequeues a job from any of the ready queues
        based on the queue_type. If no job is ready,
        returns a failure status.
        """
        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        timestamp = str(generate_epoch())

        keys = [self._key_prefix, queue_type]
        args = [timestamp, self._job_expire_interval]

        dequeue_response = self._lua_dequeue(keys=keys, args=args)

        if len(dequeue_response) < 4:
            response = {'status': 'failure'}
            return response

        queue_id, job_id, payload, requeues_remaining = dequeue_response
        payload = deserialize_payload(payload[1:-1])

        response = {
            'status': 'success',
            'queue_id': queue_id,
            'job_id': job_id,
            'payload': payload,
            'requeues_remaining': int(requeues_remaining)
        }

        return response

    def finish(self, job_id, queue_id, queue_type='default'):
        """Marks any dequeued job as *completed successfully*.
        Any job which gets a finish will be treated as complete
        and will be removed from the SharQ.
        """
        if not is_valid_identifier(job_id):
            raise BadArgumentException('`job_id` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        keys = [self._key_prefix, queue_type]

        args = [queue_id, job_id]

        response = {'status': 'success'}

        finish_response = self._lua_finish(keys=keys, args=args)
        if finish_response == 0:
            # the finish failed.
            response.update({'status': 'failure'})

        return response

    def interval(self, interval, queue_id, queue_type='default'):
        """Updates the interval for a specific queue_id
        of a particular queue type.
        """
        # validate all the input
        if not is_valid_interval(interval):
            raise BadArgumentException('`interval` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        # generate the interval key
        interval_hmap_key = '%s:interval' % self._key_prefix
        interval_queue_key = '%s:%s' % (queue_type, queue_id)
        keys = [interval_hmap_key, interval_queue_key]

        args = [interval]
        interval_response = self._lua_interval(keys=keys, args=args)
        if interval_response == 0:
            # the queue with the id and type does not exist.
            response = {'status': 'failure'}
        else:
            response = {'status': 'success'}

        return response

    def requeue(self):
        """Re-queues any expired job (one which does not get an expire
        before the job_expiry_interval) back into their respective queue.
        This function has to be run at specified intervals to ensure the
        expired jobs are re-queued back.
        """
        timestamp = str(generate_epoch())
        # get all queue_types and requeue one by one.
        # not recommended to do this entire process
        # in lua as it might take long and block other
        # enqueues and dequeues.
        active_queue_type_list = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
        for queue_type in active_queue_type_list:
            # requeue all expired jobs in all queue types.
            keys = [self._key_prefix, queue_type]

            args = [timestamp]
            job_discard_list = self._lua_requeue(keys=keys, args=args)
            # discard the jobs if any
            for job in job_discard_list:
                queue_id, job_id = job.split(':')
                # explicitly finishing a job
                # is nothing but discard.
                self.finish(job_id=job_id,
                            queue_id=queue_id,
                            queue_type=queue_type)

    def metrics(self, queue_type=None, queue_id=None):
        """Provides a way to get statistics about various parameters like,
        * global enqueue / dequeue rates per min.
        * per queue enqueue / dequeue rates per min.
        * queue length of each queue.
        * list of queue ids for each queue type.
        """
        if queue_id is not None and not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if queue_type is not None and not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        response = {'status': 'failure'}
        if not queue_type and not queue_id:
            # return global stats.
            # list of active queue types (ready + active)
            active_queue_types = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
            ready_queue_types = self._r.smembers('%s:ready:queue_type' %
                                                 self._key_prefix)
            all_queue_types = active_queue_types | ready_queue_types
            # global rates for past 10 minutes
            timestamp = str(generate_epoch())
            keys = [self._key_prefix]
            args = [timestamp]

            enqueue_details, dequeue_details = self._lua_metrics(keys=keys,
                                                                 args=args)

            enqueue_counts = {}
            dequeue_counts = {}
            # the length of enqueue & dequeue details are always same.
            for i in xrange(0, len(enqueue_details), 2):
                enqueue_counts[str(enqueue_details[i])] = int(
                    enqueue_details[i + 1] or 0)
                dequeue_counts[str(dequeue_details[i])] = int(
                    dequeue_details[i + 1] or 0)

            response.update({
                'status': 'success',
                'queue_types': list(all_queue_types),
                'enqueue_counts': enqueue_counts,
                'dequeue_counts': dequeue_counts
            })
            return response
        elif queue_type and not queue_id:
            # return list of queue_ids.
            # get data from two sorted sets in a transaction
            pipe = self._r.pipeline()
            pipe.zrange('%s:%s' % (self._key_prefix, queue_type), 0, -1)
            pipe.zrange('%s:%s:active' % (self._key_prefix, queue_type), 0, -1)
            ready_queues, active_queues = pipe.execute()
            # extract the queue_ids from the queue_id:job_id string
            active_queues = [i.split(':')[0] for i in active_queues]
            all_queue_set = set(ready_queues) | set(active_queues)
            response.update({
                'status': 'success',
                'queue_ids': list(all_queue_set)
            })
            return response
        elif queue_type and queue_id:
            # return specific details.
            active_queue_types = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
            ready_queue_types = self._r.smembers('%s:ready:queue_type' %
                                                 self._key_prefix)
            all_queue_types = active_queue_types | ready_queue_types
            # queue specific rates for past 10 minutes
            timestamp = str(generate_epoch())
            keys = ['%s:%s:%s' % (self._key_prefix, queue_type, queue_id)]
            args = [timestamp]

            enqueue_details, dequeue_details = self._lua_metrics(keys=keys,
                                                                 args=args)

            enqueue_counts = {}
            dequeue_counts = {}
            # the length of enqueue & dequeue details are always same.
            for i in xrange(0, len(enqueue_details), 2):
                enqueue_counts[str(enqueue_details[i])] = int(
                    enqueue_details[i + 1] or 0)
                dequeue_counts[str(dequeue_details[i])] = int(
                    dequeue_details[i + 1] or 0)

            # get the queue length for the job queue
            queue_length = self._r.llen(
                '%s:%s:%s' % (self._key_prefix, queue_type, queue_id))

            response.update({
                'status': 'success',
                'queue_length': int(queue_length),
                'enqueue_counts': enqueue_counts,
                'dequeue_counts': dequeue_counts
            })
            return response
        elif not queue_type and queue_id:
            raise BadArgumentException(
                '`queue_id` should be accompanied by `queue_type`.')

        return response

    def deep_status(self):
        """
        To check the availability of redis. If redis is down get will throw exception
        :return: value or None
        """
        return self._r.get('sharq:deep_status:{}'.format(self._key_prefix))

    def clear_queue(self, queue_type=None, queue_id=None, purge_all=False):
        """clear the all entries in queue with particular queue_id
        and queue_type. It takes an optional argument, 
        purge_all : if True, then it will remove the related resources
        from the redis.
        """
        if queue_id is None or not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if queue_type is None or not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        response = {'status': 'Failure', 'message': 'No queued calls found'}
        # remove from the primary sorted set
        primary_set = '{}:{}'.format(self._key_prefix, queue_type)
        queued_status = self._r.zrem(primary_set, queue_id)
        if queued_status:
            response.update({
                'status': 'Success',
                'message': 'Successfully removed all queued calls'
            })
        # do a full cleanup of reources
        # although this is not necessary as we don't remove resources
        # while dequeue operation
        job_queue_list = '{}:{}:{}'.format(self._key_prefix, queue_type,
                                           queue_id)
        if queued_status and purge_all:
            job_list = self._r.lrange(job_queue_list, 0, -1)
            pipe = self._r.pipeline()
            # clear the payload data for job_uuid
            for job_uuid in job_list:
                if job_uuid is None:
                    continue
                payload_set = '{}:payload'.format(self._key_prefix)
                job_payload_key = '{}:{}:{}'.format(queue_type, queue_id,
                                                    job_uuid)
                pipe.hdel(payload_set, job_payload_key)
            # clear jobrequest interval
            interval_set = '{}:interval'.format(self._key_prefix)
            job_interval_key = '{}:{}'.format(queue_type, queue_id)
            pipe.hdel(interval_set, job_interval_key)
            # clear job_queue_list
            pipe.delete(job_queue_list)
            pipe.execute()
            response.update({
                'status':
                'Success',
                'message':
                'Successfully removed all queued calls and purged related resources'
            })
        else:
            # always delete the job queue list
            self._r.delete(job_queue_list)
        return response