コード例 #1
0
    def pack_command(self, *args):
        "Pack a series of arguments into the Redis protocol"
        output = []
        # the client might have included 1 or more literal arguments in
        # the command name, e.g., 'CONFIG GET'. The Redis server expects these
        # arguments to be sent separately, so split the first argument
        # manually. All of these arguements get wrapped in the Token class
        # to prevent them from being encoded.
        command = args[0]
        if ' ' in command:
            args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
        else:
            args = (Token(command), ) + args[1:]

        buff = SYM_EMPTY.join((SYM_STAR, rcb(str(len(args))), SYM_CRLF))

        for arg in imap(self.encode, args):
            # to avoid large string mallocs, chunk the command into the
            # output list if we're sending large values
            if len(buff) > 6000 or len(arg) > 6000:
                buff = SYM_EMPTY.join(
                    (buff, SYM_DOLLAR, rcb(str(len(arg))), SYM_CRLF))
                output.append(buff)
                output.append(arg)
                buff = SYM_CRLF
            else:
                buff = SYM_EMPTY.join((buff, SYM_DOLLAR, rcb(str(len(arg))),
                                       SYM_CRLF, arg, SYM_CRLF))
        output.append(buff)
        return output
コード例 #2
0
    def _job_cmd(self, queue, timeout_ms=0, count=1, queues=None):
        """ This function accepts a queue name as "queue" and a list of
        additional queues as "queues="

        e.x. `getjob('firstone', queues=['another', 'and', 'another'])`

        History: This function signature is odd because of Python 2.7
        compatibility.

        PEP 3102 means the following works in Python 3.x:
            def getjob(self, *queues, timeout_ms=0, count=1):
                return self.execute_command('GETJOB', *queues)

        But that throws a SyntaxError in anything less than Python 3
        """
        if queues is None:
            queues = []
        jobs = self.execute_command('GETJOB', Token('TIMEOUT'), timeout_ms,
                                    Token('COUNT'), count, Token('FROM'),
                                    queue, *queues)
        if jobs is None:
            return
        if self.record_job_origin:
            for _, job_id, _ in jobs:
                # pull the origin node out of the job_id
                # https;//github.com/antirez/disque#job-ids
                self._job_score.add(job_id[2:10])
        return jobs
コード例 #3
0
 def cluster_setslot(self, node_id, slot_id, state, bind_to_node_id=None):
     """Bind an hash slot to a specific node"""
     if state.upper() in ('IMPORTING', 'MIGRATING',
                          'NODE') and node_id is not None:
         return self.execute_command('CLUSTER SETSLOT', slot_id,
                                     Token(state), node_id)
     elif state.upper() == 'STABLE':
         return self.execute_command('CLUSTER SETSLOT', slot_id,
                                     Token('STABLE'))
     else:
         raise RedisError('Invalid slot state: {0}'.format(state))
コード例 #4
0
ファイル: client.py プロジェクト: udisyue/perf_test
    def cluster_failover(self, node_id, option):
        """
        Forces a slave to perform a manual failover of its master

        Sends to specefied node
        """
        assert option.upper() in ('FORCE', 'TAKEOVER')  # TODO: change this option handling
        return self.execute_command('CLUSTER FAILOVER', Token(option))
コード例 #5
0
 def cluster_reset(self, node_id, soft=True):
     """
     Reset a Redis Cluster node.
     If 'soft' is True then it will send 'SOFT' argument
     If 'soft' is False then it will send 'HARD' argument
     """
     return self.execute_command('CLUSTER RESET',
                                 Token('SOFT' if soft else 'HARD'),
                                 node_id=node_id)
コード例 #6
0
    def scan_iter(self, match=None, count=None):
        """
        Make an iterator using the SCAN command so that the client doesn't
        need to remember the cursor position.

        ``match`` allows for filtering the keys by pattern
        ``count`` allows for hint the minimum number of returns

        Cluster impl:
            Result from SCAN is different in cluster mode.
        """
        cursors = {}
        nodeData = {}
        for master_node in self.connection_pool.nodes.all_masters():
            cursors[master_node["name"]] = "0"
            nodeData[master_node["name"]] = master_node

        while not all(cursors[node] == 0 for node in cursors):
            for node in cursors:
                if cursors[node] == 0:
                    continue

                conn = self.connection_pool.get_connection_by_node(
                    nodeData[node])

                pieces = ['SCAN', cursors[node]]
                if match is not None:
                    pieces.extend([Token('MATCH'), match])
                if count is not None:
                    pieces.extend([Token('COUNT'), count])

                conn.send_command(*pieces)

                raw_resp = conn.read_response()

                # if you don't release the connection, the driver will make another, and you will hate your life
                self.connection_pool.release(conn)
                cur, resp = self._parse_scan(raw_resp)
                cursors[node] = cur

                for r in resp:
                    yield r
コード例 #7
0
    def qscan(self,
              cursor=0,
              count=0,
              busyloop=False,
              minlen=-1,
              maxlen=-1,
              importrate=-1):
        args = ['QSCAN', cursor]
        if busyloop:
            args += [Token('BUSYLOOP')]
        if count > 0:
            args += [Token('COUNT'), count]
        if minlen > -1:
            args += [Token('MINLEN'), minlen]
        if maxlen > -1:
            args += [Token('MAXLEN'), maxlen]
        if importrate > -1:
            args += [Token('IMPORTRATE'), importrate]

        return self.execute_command(*args)
コード例 #8
0
    def pack_command(self, *args):
        output = []
        command = args[0]
        if ' ' in command:
            args = tuple([Token.get_token(s)
                          for s in command.split()]) + args[1:]
        else:
            args = (Token.get_token(command), ) + args[1:]

        buff = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF))

        for arg in imap(self.encoder.encode, args):
            if len(buff) > 6000 or len(arg) > 6000:
                buff = SYM_EMPTY.join(
                    (buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
                output.append(buff)
                output.append(arg)
                buff = SYM_CRLF
            else:
                buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
                                       SYM_CRLF, arg, SYM_CRLF))
        output.append(buff)
        return output
コード例 #9
0
    def cluster_reset_all_nodes(self, soft=True):
        """
        Send CLUSTER RESET to all nodes in the cluster

        If 'soft' is True then it will send 'SOFT' argument
        If 'soft' is False then it will send 'HARD' argument

        Sends to all nodes in the cluster
        """
        return [
            self.execute_command(
                'CLUSTER RESET',
                Token('SOFT' if soft else 'HARD'),
                node_id=node['id'],
            ) for node in self.cluster_nodes()
        ]
コード例 #10
0
    def tsrevrangebytime(self, name, tm_low=None, tm_high=None, num=None,
                         time_cast_func=DatetimeToTimestamp(pytz.utc),
                         timestamp_cast_func=TimestampToDatetime(pytz.utc)):
        """
        Returns the entries for the given time range
        from ``tm_low`` to ``tm_high`` in the time series
        that is specified by the given key ``name``.

        The entries are returned in descending order of the times.

        Special bounds -inf (``tm_low`` is None or -inf) and
        +inf (``tm_high`` is None or inf) are also supported to retrieve
        an entire range.

        If ``num`` is specified, then at most ``num`` entries will be fetched.

        ``time_cast_func`` a callable used to cast the time ``tm``
        to a timestamp - 64 bit signed integer (cf. ``tsadd``).

        ``timestamp_cast_func`` a callable used to cast the timestamp
        return values. It should reflect how timestamp were inserted
        (cf. ``time_cast_func``).
        """
        pieces = ['TSREVRANGEBYTIME', name]
        if tm_low is None or (isinstance(tm_low, float) and isinf(tm_low)):
            pieces.append(m_inf)
        elif time_cast_func is None:
            pieces.append(tm_low)
        else:
            pieces.append(time_cast_func(tm_low))
        if tm_high is None or (isinstance(tm_high, float) and isinf(tm_high)):
            pieces.append(p_inf)
        elif time_cast_func is None:
            pieces.append(tm_high)
        else:
            pieces.append(time_cast_func(tm_high))
        if num is not None:
            pieces.extend([Token.get_token('LIMIT'), num])
        if timestamp_cast_func is None:
            return self.execute_command(*pieces, timestamp_cast_func=int)
        return self.execute_command(*pieces,
                                    timestamp_cast_func=timestamp_cast_func)
コード例 #11
0
class DisqueAlpha(object):
    """
    Implementation of the Redis protocol.

    This abstract class provides a Python interface to all Redis commands
    and an implementation of the Redis protocol.

    Connection and Pipeline derive from this, implementing how
    the commands are sent and received to the Redis server
    """

    _job_score = None

    RESPONSE_CALLBACKS = dict_merge(
        string_keys_to_dict('GETJOB', parse_job_resp),
        string_keys_to_dict('QLEN ACKJOB FASTACK', int),
        string_keys_to_dict(
            'ADDJOB', lambda r: six.text_type(six.binary_type(r).decode())),
        {
            'INFO': parse_info,
            'CLIENT GETNAME': lambda r: r and six.text_type(r),
            'CLIENT KILL': bool_ok,
            'CLIENT LIST': parse_client_list,
            'CLIENT SETNAME': bool_ok,
            'CONFIG GET': parse_config_get,
            'CONFIG RESETSTAT': bool_ok,
            'CONFIG SET': bool_ok,
            'CLUSTER NODES': parse_cluster_nodes,
            'HELLO': parse_hello,
            'TIME': parse_time,
        },
        string_keys_to_dict('BGREWRITEAOF', lambda r: True),
    )

    @classmethod
    def from_url(cls, url, **kwargs):
        """
        Return a Disque client object configured from the given URL.

        For example::

            disque://[:password]@localhost:6379
            unix://[:password]@/path/to/socket.sock

        Any additional querystring arguments and keyword arguments will be
        passed along to the ConnectionPool class's initializer. In the case
        of conflicting arguments, querystring arguments always win.
        """
        connection_pool = ConnectionPool.from_url(url, **kwargs)
        return cls(connection_pool=connection_pool)

    def __init__(self,
                 host='localhost',
                 port=7711,
                 password=None,
                 socket_timeout=None,
                 socket_connect_timeout=None,
                 socket_keepalive=None,
                 socket_keepalive_options=None,
                 connection_pool=None,
                 unix_socket_path=None,
                 encoding='utf-8',
                 encoding_errors='strict',
                 decode_responses=False,
                 retry_on_timeout=False,
                 job_origin_ttl_secs=5,
                 record_job_origin=False):
        """
        job_origin_ttl_secs is the number of seconds to store counts of
        incoming jobs. The higher the throughput you're expecting, the lower
        this number should be.
        """
        self.record_job_origin = record_job_origin
        kwargs = {
            'password': password,
            'socket_timeout': socket_timeout,
            'encoding': encoding,
            'encoding_errors': encoding_errors,
            'decode_responses': decode_responses,
            'retry_on_timeout': retry_on_timeout,
            'db': 0,
        }
        # based on input, setup appropriate connection args
        if unix_socket_path is not None:
            kwargs.update({
                'path': unix_socket_path,
                'connection_class': UnixDomainSocketConnection
            })
        else:
            # TCP specific options
            kwargs.update({
                'host': host,
                'port': port,
                'socket_connect_timeout': socket_connect_timeout,
                'socket_keepalive': socket_keepalive,
                'socket_keepalive_options': socket_keepalive_options,
            })

        if not connection_pool:
            connection_pool = ConnectionPool(**kwargs)

        self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()

        self.connection_pool = {'default': connection_pool}
        self.default_node = 'default'

        self._job_score = RollingCounter(ttl_secs=job_origin_ttl_secs)

        self.__connect_cluster(kwargs)

    def __connect_cluster(self, connection_kwargs):
        hi = self.hello()

        self.default_node = bin_to_str(hi['id'][:8])
        self.connection_pool.pop('default')
        for node, ip, port, version in hi['nodes']:
            connection_kwargs.update(dict(host=ip, port=port))
            self.connection_pool[bin_to_str(
                node[:8])] = ConnectionPool(**connection_kwargs)

    def __repr__(self):
        return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))

    def set_response_callback(self, command, callback):
        "Set a custom Response Callback"
        self.response_callbacks[command] = callback

    __read_cmds = {'GETJOB': 0, 'ACKJOB': 0, 'FASTACK': 0}

    def _get_connection(self, command_name, **options):
        node = self.default_node
        if self.record_job_origin and command_name in self.__read_cmds:
            node = self._job_score.max(node)

        pool = self.connection_pool.get(node)
        if pool is None:
            pool = self.connection_pool[self.default_node]
            node = self.default_node

        return pool.get_connection(command_name, **options), node

    def _release_connection(self, connection, node):
        return self.connection_pool[node].release(connection)

    def execute_command(self, *args, **options):
        "Execute a command and return a parsed response"
        command_name = args[0]
        connection, node = self._get_connection(command_name, **options)
        try:
            connection.send_command(*args)
            return self.parse_response(connection, command_name, **options)
        except (ConnectionError, TimeoutError) as e:
            connection.disconnect()
            if not connection.retry_on_timeout and isinstance(e, TimeoutError):
                raise
            connection.send_command(*args)
            return self.parse_response(connection, command_name, **options)
        finally:
            self._release_connection(connection, node)

    def parse_response(self, connection, command_name, **options):
        "Parses a response from the Redis server"
        response = connection.read_response()
        if command_name in self.response_callbacks:
            return self.response_callbacks[command_name](response, **options)
        return response

    # SERVER INFORMATION
    def bgrewriteaof(self):
        "Tell the Redis server to rewrite the AOF file from data in memory."
        return self.execute_command('BGREWRITEAOF')

    def client_kill(self, address):
        "Disconnects the client at ``address`` (ip:port)"
        return self.execute_command('CLIENT KILL', address)

    def client_list(self):
        "Returns a list of currently connected clients"
        return self.execute_command('CLIENT LIST')

    def client_getname(self):
        "Returns the current connection name"
        return self.execute_command('CLIENT GETNAME')

    def client_setname(self, name):
        "Sets the current connection name"
        return self.execute_command('CLIENT SETNAME', name)

    def client_pause(self, pause_msec):
        return self.execute_command('CLIENT PAUSE', pause_msec)

    def config_get(self, pattern="*"):
        "Return a dictionary of configuration based on the ``pattern``"
        return self.execute_command('CONFIG GET', pattern)

    def config_set(self, name, value):
        "Set config item ``name`` with ``value``"
        return self.execute_command('CONFIG SET', name, value)

    def config_resetstat(self):
        "Reset runtime statistics"
        return self.execute_command('CONFIG RESETSTAT')

    def config_rewrite(self):
        "Rewrite config file with the minimal change to reflect running config"
        return self.execute_command('CONFIG REWRITE')

    # Danger: debug commands ahead

    def debug_segfault(self):
        """ Danger: will segfault connected Disque instance"""
        return self.execute_command('DEBUG SEGFAULT')

    def debug_oom(self):
        """ Danger: will OOM connected Disque instance"""
        return self.execute_command('DEBUG OOM')

    def debug_flushall(self):
        return self.execute_command('DEBUG FLUSHALL')

    def debug_loadaof(self):
        return self.execute_command('DEBUG LOADAOF')

    def debug_sleep(self, sleep_secs):
        return self.execute_command('DEBUG SLEEP', sleep_secs)

    def debug_error(self, message):
        return self.execute_command('DEBUG ERROR', message)

    def debug_structsize(self):
        return self.execute_command('DEBUG STRUCTSIZE')

    # Cluster admin commands

    def cluster_meet(self, ip, port):
        return self.execute_command('CLUSTER MEET', ip, port)

    def cluster_nodes(self):
        return self.execute_command('CLUSTER NODES')

    def cluster_saveconfig(self):
        return self.execute_command('CLUSTER SAVECONFIG')

    def cluster_forget(self, node):
        return self.execute_command('CLUSTER FORGET', node)

    def _cluster_reset(self, reset):
        return self.execute_command('CLUSTER RESET', reset)

    def cluster_reset_hard(self):
        return self._cluster_reset(Token('HARD'))

    def cluster_reset_soft(self):
        return self._cluster_reset(Token('SOFT'))

    def cluster_info(self):
        return self.execute_command('CLUSTER INFO')

    def hello(self):
        return self.execute_command('HELLO')

    def info(self, section=None):
        """
        Returns a dictionary containing information about the Disque server

        The ``section`` option can be used to select a specific section
        of information

        Valid section names are:
            SERVER, CLIENTS, MEMORY, JOBS, QUEUES, PERSISTENCE, STATS, CPU
        """
        if section is None:
            return self.execute_command('INFO')
        else:
            return self.execute_command('INFO', section)

    def ping(self):
        "Ping the Redis server"
        return self.execute_command('PING')

    def shutdown(self):
        "Shutdown the server"
        try:
            self.execute_command('SHUTDOWN')
        except ConnectionError:
            # a ConnectionError here is expected
            return
        raise DisqueError("SHUTDOWN seems to have failed.")

    def slowlog_get(self, num=None):
        """
        Get the entries from the slowlog. If ``num`` is specified, get the
        most recent ``num`` items.
        """
        args = ['SLOWLOG GET']
        if num is not None:
            args.append(num)
        return self.execute_command(*args)

    def slowlog_len(self):
        "Get the number of items in the slowlog"
        return self.execute_command('SLOWLOG LEN')

    def slowlog_reset(self):
        "Remove all items in the slowlog"
        return self.execute_command('SLOWLOG RESET')

    def time(self):
        """
        Returns the server time as a 2-item tuple of ints:
        (seconds since epoch, microseconds into this second).
        """
        return self.execute_command('TIME')

    # BASIC JOB COMMANDS

    def addjob(self,
               queue,
               body,
               timeout_ms=0,
               replicate=0,
               delay_secs=0,
               retry_secs=-1,
               ttl_secs=0,
               maxlen=0,
               async=False):
        args = ['ADDJOB', queue, body, timeout_ms]
        if replicate > 0:
            args += [Token('REPLICATE'), replicate]
        if delay_secs > 0:
            args += [Token('DELAY'), delay_secs]
        if retry_secs >= 0:
            args += [Token('RETRY'), retry_secs]
        if ttl_secs > 0:
            args += [Token('TTL'), ttl_secs]
        if maxlen > 0:
            args += [Token('MAXLEN'), maxlen]
        if async:
            args += [Token('ASYNC')]

        return self.execute_command(*args)
コード例 #12
0
 def cluster_reset_soft(self):
     return self._cluster_reset(Token('SOFT'))
コード例 #13
0
 def cluster_reset_hard(self):
     return self._cluster_reset(Token('HARD'))