Example #1
0
    def pipeline(self, transaction=None, shard_hint=None):
        if shard_hint:
            raise RedisClusterException(
                "shard_hint is deprecated in cluster mode")

        if transaction:
            raise RedisClusterException(
                "transaction is deprecated in cluster mode")

        return PrefixedStrictClusterPipeline(
            connection_pool=self.connection_pool,
            startup_nodes=self.connection_pool.nodes.startup_nodes,
            response_callbacks=self.response_callbacks,
            key_prefix=self.key_prefix,
        )
Example #2
0
    def inner(self, script, numkeys, *keys_and_args):
        if numkeys < 1:
            raise RedisClusterException(
                " ERROR: eval only works with 1 or more keys when running redis in cluster mode..."
            )

        # verify that the keys all map to the same key hash slot.
        # this will be true if there is only one key, or if all the keys are in the form:
        # A{foo} B{foo} C{foo}
        if len(set([self.keyslot(key)
                    for key in keys_and_args[0:numkeys]])) != 1:
            raise RedisClusterException(
                " ERROR: eval only works if all keys map to the same key slot when running redis in cluster mode..."
            )
        conn = self.get_connection_by_key(keys_and_args[0])
        return func(conn, script, numkeys, *keys_and_args)
Example #3
0
 def get_redis_link(self, host, port):
     """
     Open new connection to a redis server and return the connection object
     """
     try:
         return StrictRedis(host=host, port=port, **self.opt)
     except Exception as e:
         raise RedisClusterException(repr(e))
Example #4
0
 def close_redis_connection(self, connection):
     """
     Close a redis connection by disconnecting all connections in connection_pool
     """
     try:
         connection.connection_pool.disconnect()
     except Exception as e:
         raise RedisClusterException(
             "Error when closing random connection... {}".format(repr(e)))
Example #5
0
    def delete(self, *names):
        """
        "Delete a key specified by ``names``"
        """
        if len(names) != 1:
            raise RedisClusterException(
                "deleting multiple keys is not implemented in pipeline command"
            )

        return self.execute_command('DEL', names[0])
Example #6
0
    def __init__(self,
                 startup_nodes=None,
                 max_connections=32,
                 init_slot_cache=True,
                 **kwargs):
        """
        startup_nodes     --> List of nodes that initial bootstrapping can be done from
        max_connections   --> Maximum number of connections that should be kept open at one time
        **kwargs          --> Extra arguments that will be sent into StrictRedis instance when created
                              (See Official redis-py doc for supported kwargs [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
                              Some kwargs is not supported and will raise RedisClusterException
                               - db    (Redis do not support database SELECT in cluster mode)
                               - host  (Redis provides this when bootstrapping the cluster)
                               - port  (Redis provides this when bootstrapping the cluster)
        """
        super(RedisCluster, self).__init__(**kwargs)

        self.orig_startup_nodes = [node for node in startup_nodes]
        self.startup_nodes = [] if startup_nodes is None else startup_nodes
        self.max_connections = max_connections
        self.connections = {}
        self.opt = kwargs
        self.refresh_table_asap = False
        self.slots = {}
        self.nodes = []

        if len(self.startup_nodes) == 0:
            raise RedisClusterException("No startup nodes provided")

        # Tweaks to StrictRedis client arguments when running in cluster mode
        if "socket_timeout" not in self.opt:
            self.opt[
                "socket_timeout"] = RedisCluster.RedisClusterDefaultTimeout
        if "db" in self.opt:
            raise RedisClusterException("(error) [Remove 'db' from kwargs]")
        if "host" in self.opt:
            raise RedisClusterException("(error) [Remove 'host' from kwargs]")
        if "port" in self.opt:
            raise RedisClusterException("(error) [Remove 'port' from kwargs]")

        if init_slot_cache:
            self.initialize_slots_cache()
Example #7
0
 def __get(self, redis_db: RedisCluster, key: str) -> str:
     if not get_redis_availability():
         raise RedisClusterException()
     # noinspection PyBroadException
     try:
         value = self.__get0(redis_db, key)
         set_redis_availability(True)
         return value
     except Exception as e:
         set_redis_availability(False)
         raise e
Example #8
0
    def pipeline(self, transaction=None, shard_hint=None):
        """
        Cluster impl: Pipelines do not work in cluster mode the same way they do in normal mode.
                      Create a clone of this object so that simulating pipelines will work correctly.
                      Each command will be called directly when used and when calling execute() will only return the result stack.
        """
        if shard_hint:
            raise RedisClusterException(
                "shard_hint is deprecated in cluster mode")

        if transaction:
            raise RedisClusterException(
                "transaction is deprecated in cluster mode")

        return StrictClusterPipeline(
            startup_nodes=self.startup_nodes,
            max_connections=self.max_connections,
            connections=self.connections,
            opt=self.opt,
            refresh_table_asap=self.refresh_table_asap,
            slots=self.slots,
            nodes=self.nodes)
Example #9
0
def get_connection_from_node_obj(self, node):
    """
    Gets a connection object from a 'node' object
    """
    self.set_node_name(node)
    conn = self.connections.get(node["name"], None)

    if not conn:
        conn = self.get_redis_link(node["host"], int(node["port"]))
        try:
            if conn.ping() is True:
                self.close_existing_connection()
                self.connections[node["name"]] = conn
        except Exception:
            raise RedisClusterException(
                "unable to open new connection to node {0}".format(node))
    return conn
Example #10
0
 def _execute_transaction(self, connection, commands, raise_on_error):
     raise RedisClusterException(
         "method _execute_transaction() is not implemented")
Example #11
0
 def inner(*args, **kwargs):
     raise RedisClusterException(
         " ERROR: Calling pipelined function {} is blocked when running redis in cluster mode..."
         .format(func.__name__))
Example #12
0
 def get_connection_by_key(self, key):
     if not key:
         raise RedisClusterException(
             "No way to dispatch this command to Redis Cluster.")
     return self.get_connection_by_slot(self.keyslot(key))
Example #13
0
 def flushdb(self):
     raise RedisClusterException(
         "method PrefixedRedisCluster.flushall() is not implemented")
Example #14
0
    def initialize_slots_cache(self):
        """
        Init the slots cache by asking all startup nodes what the current cluster configuration is

        TODO: Currently the last node will have the last say about how the configuration is setup.
        Maybe it should stop to try after it have correctly covered all slots or when one node is reached
         and it could execute CLUSTER SLOTS command.
        """
        # Reset variables
        self.slots = {}
        self.nodes = []

        for node in self.startup_nodes:
            try:
                r = self.get_redis_link_from_node(node)
                cluster_slots = r.execute_command("cluster", "slots")
            except Exception as e:
                print(
                    "ERROR sending 'cluster slots' command to redis server: {}"
                    .format(node))
                raise e

            all_slots_covered = True

            # No need to decode response because StrictRedis should handle that for us...
            for slot in cluster_slots:
                master_node = slot[2]

                # Only store the master node as address for each slot.
                # TODO: Slave nodes have to be fixed/patched in later...
                master_addr = {
                    "host": master_node[0],
                    "port": master_node[1],
                    "name": "{}:{}".format(master_node[0], master_node[1]),
                    "server_type": "master"
                }
                self.nodes.append(master_addr)
                for i in range(int(slot[0]), int(slot[1]) + 1):
                    if i not in self.slots:
                        self.slots[i] = master_addr
                    else:
                        # Validate that 2 nodes want to use the same slot cache setup
                        if self.slots[i] != master_addr:
                            raise RedisClusterException(
                                "startup_nodes could not agree on a valid slots cache. {} vs {}"
                                .format(self.slots[i], master_addr))

                slave_nodes = [slot[i] for i in range(3, len(slot))]
                for slave_node in slave_nodes:
                    slave_addr = {
                        "host": slave_node[0],
                        "port": slave_node[1],
                        "name": "{}:{}".format(slave_node[0], slave_node[1]),
                        "server_type": "slave"
                    }
                    self.nodes.append(slave_addr)

                self.populate_startup_nodes()
                self.refresh_table_asap = False

            # Validate if all slots are covered or if we should try next startup node
            for i in range(0, self.RedisClusterHashSlots):
                if i not in self.slots:
                    all_slots_covered = False

            if all_slots_covered:
                # All slots are covered and application can continue to execute
                return

        if not all_slots_covered:
            raise RedisClusterException(
                "All slots are not covered after querry all startup_nodes. {} of {} covered..."
                .format(len(self.slots), self.RedisClusterHashSlots))
Example #15
0
 def script_load_for_pipeline(self, script):
     raise RedisClusterException(
         "method script_load_for_pipeline() is not implemented")
Example #16
0
 def unwatch(self):
     raise RedisClusterException("method unwatch() is not implemented")
Example #17
0
 def multi(self):
     raise RedisClusterException("method multi() is not implemented")
Example #18
0
 def transaction(self, func, *watches, **kwargs):
     raise RedisClusterException(
         "method RedisCluster.transaction() is not implemented")
Example #19
0
 def immediate_execute_command(self, *args, **options):
     raise RedisClusterException(
         "method immediate_execute_command() is not implemented")
Example #20
0
    def sort(self,
             name,
             start=None,
             num=None,
             by=None,
             get=None,
             desc=False,
             alpha=False,
             store=None,
             groups=None):
        """Sort and return the list, set or sorted set at ``name``.

        ``start`` and ``num`` allow for paging through the sorted data

        ``by`` allows using an external key to weight and sort the items.
            Use an "*" to indicate where in the key the item value is located

        ``get`` allows for returning items from external keys rather than the
            sorted data itself.  Use an "*" to indicate where int he key
            the item value is located

        ``desc`` allows for reversing the sort

        ``alpha`` allows for sorting lexicographically rather than numerically

        ``store`` allows for storing the result of the sort into
            the key ``store``

        ClusterImpl: A full implementation of the server side sort mechanics because many of the
                     options work on multiple keys that can exist on multiple servers.
        """
        if (start is None and num is not None) or \
           (start is not None and num is None):
            raise RedisError(
                "RedisError: ``start`` and ``num`` must both be specified")
        try:
            data_type = b(self.type(name))

            if data_type == b("none"):
                return []
            elif data_type == b("set"):
                data = list(self.smembers(name))[:]
            elif data_type == b("list"):
                data = self.lrange(name, 0, -1)
            else:
                raise RedisClusterException(
                    "Unable to sort data type : {}".format(data_type))
            if by is not None:
                # _sort_using_by_arg mutates data so we don't
                # need need a return value.
                self._sort_using_by_arg(data, by, alpha)
            elif not alpha:
                data.sort(key=self._strtod_key_func)
            else:
                data.sort()
            if desc:
                data = data[::-1]
            if not (start is None and num is None):
                data = data[start:start + num]

            if get:
                data = self._retrive_data_from_sort(data, get)

            if store is not None:
                if data_type == b("set"):
                    self.delete(store)
                    self.rpush(store, *data)
                elif data_type == b("list"):
                    self.delete(store)
                    self.rpush(store, *data)
                else:
                    raise RedisClusterException(
                        "Unable to store sorted data for data type : {}".
                        format(data_type))

                return len(data)

            if groups:
                if not get or isinstance(get, basestring) or len(get) < 2:
                    raise DataError('when using "groups" the "get" argument '
                                    'must be specified and contain at least '
                                    'two keys')
                n = len(get)
                return list(izip(*[data[i::n] for i in range(n)]))
            else:
                return data
        except KeyError:
            return []
Example #21
0
 def load_scripts(self):
     raise RedisClusterException("method load_scripts() is not implemented")
Example #22
0
                    1: {
                        'db': 1
                    },
                }
            },
            'baz': {
                'is_redis_cluster': True,
                'hosts': {
                    0: {},
                },
            },
        },
    },
)

rc_exception = RedisClusterException('Failed to connect')


class ClusterManagerTestCase(TestCase):
    def test_get(self):
        manager = make_manager()
        assert manager.get('foo') is manager.get('foo')
        assert manager.get('foo') is not manager.get('bar')
        assert manager.get('foo').pool_cls is _shared_pool
        with pytest.raises(KeyError):
            manager.get('invalid')

    @mock.patch('sentry.utils.redis.RetryingStrictRedisCluster')
    def test_specific_cluster(self, cluster):
        manager = make_manager(cluster_type=_RedisCluster)
        assert manager.get('baz') is cluster.return_value
Example #23
0
 def watch(self, *names):
     raise RedisClusterException("method watch() is not implemented")