Beispiel #1
0
def loop(rc, reset_last_key=None):
    """
    Regular debug loop that can be used to test how redis behaves during changes in the cluster.
    """
    if reset_last_key:
        rc.set("__last__", 0)

    last = False
    while last is False:
        try:
            last = rc.get("__last__")
            last = 0 if not last else int(last)
            print("starting at foo{0}".format(last))
        except Exception as e:
            print("error {0}".format(e))
            time.sleep(1)

    for i in xrange(last, 1000000000):  # noqa
        try:
            print("SET foo{0} {1}".format(i, i))
            rc.set("foo{0}".format(i), i)
            got = rc.get("foo{0}".format(i))
            print("GET foo{0} {1}".format(i, got))
            rc.set("__last__", i)
        except Exception as e:
            print("error {0}".format(e))

        time.sleep(0.05)
Beispiel #2
0
def timeit(rc, itterations=50000):
    """
    Time how long it take to run a number of set/get:s
    """
    t0 = time.time()
    for i in xrange(0, itterations):  # noqa
        s = "foo{0}".format(i)
        rc.set(s, i)
        rc.get(s)

    t1 = time.time() - t0
    print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format((itterations / 1000) * 2, t1, (itterations / t1) * 2))
Beispiel #3
0
    def read_response(self):
        try:
            response = self._buffer.readline()
        except:
            print("SC Debug: race condition that the _buffer is already released")
            return None
        if not response:
            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            # if the error is a ConnectionError, raise immediately so the user
            # is notified
            if isinstance(error, ConnectionError):
                raise error
            # otherwise, we're dealing with a ResponseError that might belong
            # inside a pipeline response. the connection's read_response()
            # and/or the pipeline's execute() will raise this error if
            # necessary, so just return the exception instance here.
            return error
        # single value
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                return None
            response = self._buffer.read(length)
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes):
            response = self.encoder.decode(response)
        return response
Beispiel #4
0
    def read_response(self):
        response = self._buffer.readline()
        if not response:
            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            # if the error is a ConnectionError, raise immediately so the user
            # is notified
            if isinstance(error, ConnectionError):
                raise error
            # otherwise, we're dealing with a ResponseError that might belong
            # inside a pipeline response. the connection's read_response()
            # and/or the pipeline's execute() will raise this error if
            # necessary, so just return the exception instance here.
            return error
        # single value
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                return None
            response = self._buffer.read(length)
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
            response = convertToNumber(response)
        return response
Beispiel #5
0
def timeit_pipeline(rc, itterations=50000):
    """
    Time how long it takes to run a number of set/get:s inside a cluster pipeline
    """
    t0 = time.time()
    for i in xrange(0, itterations):  # noqa
        s = "foo{0}".format(i)

        p = rc.pipeline()
        p.set(s, i)
        p.get(s)
        p.execute()

    t1 = time.time() - t0
    print("{0}k SET/GET operations inside pipelines took: {1} seconds... {2} operations per second".format(
        (itterations / 1000) * 2, t1, (itterations / t1) * 2)
    )
 def rotate_slaves(self):
     "Round-robin slave balancer"
     slaves = self.sentinel_manager.discover_slaves(self.service_name)
     if slaves:
         if self.slave_rr_counter is None:
             self.slave_rr_counter = random.randint(0, len(slaves) - 1)
         for _ in xrange(len(slaves)):
             self.slave_rr_counter = (
                 self.slave_rr_counter + 1) % len(slaves)
             slave = slaves[self.slave_rr_counter]
             yield slave
     # Fallback to the master connection
     try:
         yield self.get_master_address()
     except MasterNotFoundError:
         pass
     raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
Beispiel #7
0
def timeit_pipeline(rc, itterations=50000):
    """
    Time how long it takes to run a number of set/get:s inside a cluster pipeline
    """
    t0 = time.time()
    for i in xrange(0, itterations):  # noqa
        s = "foo{0}".format(i)

        p = rc.pipeline()
        p.set(s, i)
        p.get(s)
        p.execute()

    t1 = time.time() - t0
    print("{0}k SET/GET operations inside pipelines took: {1} seconds... {2} operations per second".format(
        (itterations / 1000) * 2, t1, (itterations / t1) * 2)
    )
Beispiel #8
0
 def rotate_slaves(self):
     "Round-robin slave balancer"
     slaves = self.sentinel_manager.discover_slaves(self.service_name)
     if slaves:
         if self.slave_rr_counter is None:
             self.slave_rr_counter = random.randint(0, len(slaves) - 1)
         for _ in xrange(len(slaves)):
             self.slave_rr_counter = (self.slave_rr_counter +
                                      1) % len(slaves)
             slave = slaves[self.slave_rr_counter]
             yield slave
     # Fallback to the master connection
     try:
         yield self.get_master_address()
     except MasterNotFoundError:
         pass
     raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
Beispiel #9
0
    def read_response(self):
        response = self.read()
        if not response:
            raise ConnectionError("Socket closed on remote end")

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            # if the error is a ConnectionError, raise immediately so the user
            # is notified
            if isinstance(error, ConnectionError):
                raise error
            # otherwise, we're dealing with a ResponseError that might belong
            # inside a pipeline response. the connection's read_response()
            # and/or the pipeline's execute() will raise this error if
            # necessary, so just return the exception instance here.
            return error
        # single value
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                return None
            response = self.read(length)
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        return response
    def _execute_pipeline(self, connection, commands, raise_on_error):
        """
        Code borrowed from StrictRedis so it can be fixed
        """
        # build up all commands into a single request to increase network perf
        if self.explicit_transaction:
            # build up all commands into a single request to increase network perf
            cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
            all_cmds = connection.pack_commands([args for args, _ in cmds])
        else:
            all_cmds = connection.pack_commands([args for args, _ in commands])

        try:
            connection.send_packed_command(all_cmds)
        except ConnectionError as e:
            return [e for _ in xrange(len(commands))]  # noqa

        response = []

        if self.explicit_transaction:
            # Get rid of multi values (OK, QUEUED...)
            self.parse_response(connection, "_")
            for _, _ in enumerate(commands):
                self.parse_response(connection, "_")

            response = self.parse_response(connection, "_")
            if response is None:
                raise WatchError("Watched variable changed.")
            for r, cmd in izip(response, commands):
                response.append(r)
            self.explicit_transaction = False
            self.watching = False
        else:
            for args, options in commands:
                try:
                    response.append(self.parse_response(connection, args[0], **options))
                except (ConnectionError, ResponseError):
                    response.append(sys.exc_info()[1])

        if raise_on_error:
            self.raise_first_error(commands, response)

        return response
Beispiel #11
0
    def read_response(self):
        response = self.read()
        if not response:
            raise ConnectionError("Socket closed on remote end")

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error")

        # server returned an error
        if byte == '-':
            if nativestr(response).startswith('LOADING '):
                # if we're loading the dataset into memory, kill the socket
                # so we re-initialize (and re-SELECT) next time.
                raise ConnectionError("Redis is loading data into memory")
            # if the error starts with ERR, trim that off
            if nativestr(response).startswith('ERR '):
                response = response[4:]
            # *return*, not raise the exception class. if it is meant to be
            # raised, it will be at a higher level.
            return ResponseError(response)
        # single value
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                return None
            response = self.read(length)
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        return response
Beispiel #12
0
    def read_response(self):
        try:
            response = yield self._stream.read_until(SYM_CRLF)
        except StreamClosedError:
            raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
        response = response[:-2]
        if not response:
            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
        byte, response = byte_to_chr(response[0]), response[1:]
        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            if isinstance(error, ConnectionError):
                raise error
            raise gen.Return(error)
        elif byte == '+':
            pass
        elif byte == ':':
            response = long(response)
        elif byte == '$':
            length = int(response)
            if length == -1:
                raise gen.Return(None)
            response = yield self._stream.read_bytes(
                length + 2)  # make sure to read the '\r\n'
            response = response[:-2]
        elif byte == '*':
            length = int(response)
            if length == -1:
                raise gen.Return(None)
            response = []
            for i in xrange(length):
                part = yield self.read_response()
                response.append(part)
        if isinstance(response, bytes):
            response = self.encoder.decode(response)
        raise gen.Return(response)
Beispiel #13
0
    def read_response(self):
        response = self.read()
        if not response:
            raise ConnectionError("Socket closed on remote end")

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ("-", "+", ":", "$", "*"):
            raise InvalidResponse("Protocol Error")

        # server returned an error
        if byte == "-":
            response = nativestr(response)
            if response.startswith("LOADING "):
                # if we're loading the dataset into memory, kill the socket
                # so we re-initialize (and re-SELECT) next time.
                raise ConnectionError("Redis is loading data into memory")
            # *return*, not raise the exception class. if it is meant to be
            # raised, it will be at a higher level.
            return self.parse_error(response)
        # single value
        elif byte == "+":
            pass
        # int value
        elif byte == ":":
            response = long(response)
        # bulk response
        elif byte == "$":
            length = int(response)
            if length == -1:
                return None
            response = self.read(length)
        # multi-bulk response
        elif byte == "*":
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        return response
    def _execute_pipeline(self, connection, commands, raise_on_error):
        """
        Code borrowed from StrictRedis so it can be fixed
        """
        # build up all commands into a single request to increase network perf
        all_cmds = connection.pack_commands([args for args, _ in commands])
        try:
            connection.send_packed_command(all_cmds)
        except ConnectionError as e:
            return [e for _ in xrange(len(commands))]  # noqa

        response = []
        for args, options in commands:
            try:
                response.append(self.parse_response(connection, args[0], **options))
            except (ConnectionError, ResponseError):
                response.append(sys.exc_info()[1])

        if raise_on_error:
            self.raise_first_error(commands, response)
        return response
    def _execute_pipeline(self, connection, commands, raise_on_error):
        """
        Code borrowed from StrictRedis so it can be fixed
        """
        # build up all commands into a single request to increase network perf
        all_cmds = connection.pack_commands([args for args, _ in commands])
        try:
            connection.send_packed_command(all_cmds)
        except ConnectionError as e:
            return [e for _ in xrange(len(commands))]  # noqa

        response = []
        for args, options in commands:
            try:
                response.append(
                    self.parse_response(connection, args[0], **options))
            except (ConnectionError, ResponseError):
                response.append(sys.exc_info()[1])

        if raise_on_error:
            self.raise_first_error(commands, response)
        return response
Beispiel #16
0
    def read_response(self):
        response = self.read()
        if not response:
            raise ConnectionError("Socket closed on remote end")

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error")

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            # *return*, not raise the exception class. if it is meant to be
            # raised, it will be at a higher level.
            return self.parse_error(response)
        # single value
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                return None
            response = self.read(length)
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for i in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        return response
from redis._compat import xrange
from others.redis.cluster import rc
from others.redis.cluster import current_milli_time

s = current_milli_time()
for i in xrange(1000000):
    d = str(i)
    rc.set(d, d)
    rc.incrby(d, 1)

print current_milli_time() - s


# 1824127
# 1482678 32 conn
Beispiel #18
0
def publisher(n):
    time.sleep(1)
    for i in xrange(n):
        conn.publish('channel', i)
        time.sleep(1)
from redis._compat import xrange
from others.redis.cluster import rc
from others.redis.cluster import current_milli_time

s = current_milli_time()
for i in xrange(1000000):
    d = str(i)
    rc.set(d, d)
    rc.incrby(d, 1)

print current_milli_time() - s

# 1824127
# 1482678 32 conn
def get_online_users():
    current = int(time.time()) // 60
    minutes = xrange(app.config['ONLINE_LAST_MINUTES'])
    return redis.sunion(['online-users/%d' % (current - x) for x in minutes])
    def read_response(self):
        '''
        Reads one line from the wire, and interprets it.
        Example: the acknowledgment to an unsubscribe
        from topic myTopic on the wire looks like this:
        
             *3\r\n$11\r\nUNSUBSCRIBE\r\n$7\r\nmyTopic\r\n:1\r\n'
             
        *3    # three items to follow
        $11   # string of 11 chars
        UNSUBSCRIBE
        $7    # string of 7 chars
        myTopic
        :1    # one topic subscribed to now
        
        Each line will cause a recursive call to this method
        (see elif byte == '*' below).
        
        Simpler calls will be individual elements, such
        as ':12', which returns the integer 12.
        
        These are the possible prefixes; each item
        is followed by a \r\n, which is stripped
        by SocketLineReader:
        
            +<str>    simple string
            :<int>    integer
            $<n>    string of length <n>
            *<num>    start of array with <num> elements

        When the message to parse is the acknowledgment of
        a SUBSCRIBE or UNSUBSCRIBE command, this method
        will set() event self.unsubscribeAckEvent/self.unsubscribeAckEvent.

        :return: response string
        :rtype: string
        '''
        response = self._buffer.readline()
        if not response:
            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            # if the error is a ConnectionError, raise immediately so the user
            # is notified
            if isinstance(error, ConnectionError):
                raise error
            # otherwise, we're dealing with a ResponseError that might belong
            # inside a pipeline response. the connection's read_response()
            # and/or the pipeline's execute() will raise this error if
            # necessary, so just return the exception instance here.
            return error
        # simple-string: response holds result:
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                # Null string:
                return None
            response = self._buffer.read(length)

        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for _ in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        #***********
        #print('Response: %s' % byte + '|' + str(response))
        #***********

        return response
    def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
        """
        Send a bunch of cluster commands to the redis cluster.

        `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
        automatically. If set to false it will raise RedisClusterException.
        """
        if self.refresh_table_asap:
            self.connection_pool.nodes.initialize()
            self.refresh_table_asap = False

        ttl = self.RedisClusterRequestTTL

        # this is where we store all the responses to the pipeline execute.
        # the actual response is a list, not a dict, but because we may be getting
        # the responses for commands out of order or sometimes retrying them it is important
        # that we have a way to key the responses by the order they came in so that we can return
        # the responses as if we did them sequentially.
        response = {}

        # `attempt` corresponds to the sequence number of commands still left to process or retry.
        # initially this corresponds exactly to the number of commands we need to run, and if all goes
        # well, that's where it'll end. Everything will be attempted once, and we end up with an empty
        # array of commands left to process. But if we need to retry, `attempt` gets repopulated with
        # the sequence number of the command that is being retried.
        attempt = xrange(0, len(stack)) if stack else []  # noqa

        # there are different types of retries. redis cluster says if it responds with an ASK error,
        # you need to handle it differently than a moved error. And if we hit a connection error, we
        # don't really know what to do for that command so we pick a random other node in the cluster.
        ask_retry = {}
        conn_retry = {}

        # as long as we have commands left to attempt and we haven't overrun the max attempts, keep trying.
        while attempt and ttl > 0:

            # decrement our counter for number of attempts left before giving up.
            ttl -= 1

            # each time we go through this we need to separate out the commands by node.
            node_commands = {}

            # build a list of node objects based on node names we need to
            nodes = {}

            # as we move through each command that still needs to be processed,
            # we figure out the slot number that command maps to, then from the slot determine the node.
            slots = set()
            for i in attempt:
                c = stack[i]
                slot = self._determine_slot(*c[0])
                slots.add(slot)
                if self.explicit_transaction and len(slots) > 1:
                    raise WatchError("Cannot run explicit transaction against more than 1 hashslot, keys: {}."
                                     .format([str(c[0][1]) for c in stack]))

                # normally we just refer to our internal node -> slot table that tells us where a given
                # command should route to.
                # but if we are retrying, the cluster could have told us we were wrong or the node was down.
                # in that case, we have to try something that contradicts our rules.
                if i in ask_retry:
                    node = ask_retry[i]
                elif i in conn_retry:
                    node = conn_retry[i]
                else:
                    if self.refresh_table_asap:  # MOVED
                        node = self.connection_pool.get_master_node_by_slot(slot)
                    else:
                        node = self.connection_pool.get_node_by_slot(slot)

                # little hack to make sure the node name is populated. probably could clean this up.
                self.connection_pool.nodes.set_node_name(node)

                # now that we know the name of the node ( it's just a string in the form of host:port )
                # we can build a list of commands for each node.
                node_name = node['name']
                nodes[node_name] = node
                node_commands.setdefault(node_name, {})
                node_commands[node_name][i] = c

            # Get one connection at a time from the pool and basiccly copy the logic from
            #  _execute_pipeline() below and do what a normal pipeline does.

            # now that we've split out the commands by the node we plan to send them to,
            # we can reset the commands we'll attempt next time around back to nothing.
            # when we process the response, any commands that need to be retried because of
            # connection errors, MOVED errors, or ASKING errors will be dumped into here.
            # most of the time this array will just stay empty.
            attempt = []

            # only use threads when it makes sense performance-wise to do so.
            # if you are doing an ask_retry, explicitly disable it.
            # makes it easier to define the logic of how to do the ASKING command,
            # and in practice, the server will only be moving one slot at a time,
            # so there will only be one server that will be recieving these ASKING retries
            # anyway.
            if self.use_threads and not ask_retry and len(node_commands) > 1:

                # for each node we query, we need to have one worker.
                # that way all pipelined commands are issued in parallel.
                # this could be a problem if you have hundreds of nodes in your cluster.
                # We should really refactor this to be a thread pool of workers, and allocate up to a
                # certain max number of threads.
                workers = dict()

                # allocate all of the redis connections from the connection pool.
                # each connection gets passed into its own thread so it can query each node in paralle.
                connections = {
                    node_name: self.connection_pool.get_connection_by_node(nodes[node_name])
                    for node_name in node_commands
                }

                # iterate through each set of commands and pass them into a worker thread so
                # it can be executed in it's own socket connection from the redis connection pool
                # in parallel.
                try:

                    for node_name in node_commands:
                        node = nodes[node_name]
                        # build the list of commands to be passed to a particular node.
                        # we have to do this on each attempt, because the cluster may respond to us
                        # that a command for a given slot actually should be routed to a different node.
                        cmds = [node_commands[node_name][i] for i in sorted(node_commands[node_name].keys())]

                        # pass all the commands bound for a particular node into a thread worker object
                        # along with the redis connection needed to run the commands and parse the response.
                        workers[node_name] = ThreadedPipelineExecute(
                            execute=self._execute_pipeline,
                            conn=connections[node_name],
                            cmds=cmds)

                        workers[node_name].start()

                    # now that all the queries are running against all the nodes,
                    # wait for all of them to come back so we can parse the responses.
                    for node_name, worker in workers.items():
                        worker.join()

                        # if the worker hit an exception this is really bad.
                        # that means something completely unexpected happened.
                        # we have to assume the worst and assume that all the calls against
                        # that particular node in the cluster failed and will need to be retried.
                        # maybe that isn't a safe assumption?
                        if worker.exception:
                            for i in node_commands[node_name].keys():
                                response[i] = worker.exception
                        else:
                            # we got a valid response back from redis.
                            # map each response based on the sequence of the original request stack.
                            # some of these responses may represent redis connection or ask errors etc.
                            for i, v in zip(sorted(node_commands[node_name].keys()), worker.value):
                                response[i] = v
                finally:
                    # don't need our threads anymore.
                    # explicitly remove them from the current namespace so they can be garbage collected.
                    del workers

                    # release all of the redis connections we allocated earlier back into the connection pool.
                    for conn in connections.values():
                        self.connection_pool.release(conn)
                    del connections
            else:
                # if we got here, it's because threading is disabled explicitly, or
                # all the commands map to a single node so we don't need to use different threads to
                # issue commands in parallel.

                # first, we need to keep track of all the commands and what responses they map to.
                # this is because we need to interject ASKING commands into the mix. I thought of a little
                # hack to map these responses back to None instead of the integer sequence id that was the
                # position number of the command issued in the stack of command requests at the point pipeline
                # execute was issued.
                track_cmds = {}

                # send the commands in sequence.
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = []
                    track_cmds[node_name] = []

                    # we've got the commands we need to run for each node,
                    # sort them to make sure that they are executed in the same order
                    # they came in on the stack otherwise it changes the logic.
                    # we make no guarantees about the order of execution of the commands run
                    # except that we are sure we will always process the commands for a given key
                    # in a sequential order. If we get an error from the server about a given key,
                    # that will apply the same for all commands on that key (MOVED, ASKING, etc)
                    # so we will be resending all of the commands for that key to a new node.
                    for i in sorted(node_commands[node_name].keys()):
                        if i in ask_retry:
                            # put in our fake stub placeholder for the response.
                            track_cmds[node_name].append(None)
                            cmds.append((['ASKING'], {}))

                        # keep track of the sequence number and the mapping of actual commands
                        # sent to the node. (ASKING SCREWS EVERYTHING UP!!!!!)
                        track_cmds[node_name].append(i)
                        cmds.append(node_commands[node_name][i])

                    # allocate a connection from the connection pool and send the commands for each node
                    # as a packed single network request. Since we aren't using threads here, we are
                    # only able to send each request sequentially and block, waiting for the response.
                    # After we get the response to one connection, we move on to the next.
                    with by_node_context(self.connection_pool, node) as connection:
                        result = zip(
                            track_cmds[node_name],
                            self._execute_pipeline(connection, cmds, False))

                        # map the response from the connection to the commands we were running.
                        for i, v in result:

                            # remember None is a shim value used above as a placeholder for the ASKING
                            # command. That response is just `OK` and we don't care about that.
                            # throw it away.
                            # Map the response here to the original sequence of commands in the stack
                            # sent to pipeline.
                            if i is not None:
                                response[i] = v

            ask_retry = {}
            conn_retry = {}

            # now that we have tried to execute all the commands let's see what we have left.
            for i, v in response.items():
                # if the response isn't an exception it is a valid response from the node
                # we're all done with that command, YAY!
                # if we move beyond this point, we've run into problems and we need to retry the command.
                if not isinstance(v, Exception):
                    continue

                # connection errors are tricky because most likely we routed to the right node but it is
                # down. In that case, the best we can do is randomly try another node in the cluster
                # and hope that it tells us to try that node again with a MOVED error or tells us the new
                # master.
                if isinstance(v, ConnectionError):
                    conn_retry[i] = random.choice(self.startup_nodes)
                    attempt.append(i)

                    # if we are stuck in a retry loop, slow things down a bit to give the failover
                    # a chance of actually happening.
                    if ttl < self.RedisClusterRequestTTL / 2:
                        time.sleep(0.1)
                    continue

                # If cluster is down it should be raised and bubble up to
                # utils.clusterdown_wrapper()
                if isinstance(v, ClusterDownError):
                    self.connection_pool.disconnect()
                    self.connection_pool.reset()
                    self.refresh_table_asap = True

                    raise v

                # A MOVED response from the cluster means that somehow we were misinformed about which node
                # a given key slot maps to. This can happen during cluster resharding, during master-slave
                # failover, or if we got a connection error and were forced to re-attempt the command against a
                # random node.
                if isinstance(v, MovedError):
                    # Do not perform full cluster refresh on every MOVED error
                    self.reinitialize_counter += 1

                    if self.reinitialize_counter % self.reinitialize_steps == 0:
                        self.refresh_table_asap = True

                    node = self.connection_pool.nodes.set_node(v.host, v.port, server_type='master')
                    self.connection_pool.nodes.slots[v.slot_id][0] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

                # an ASK error from the server means that only this specific command needs to be tried against
                # a different server (not every key in the slot). This happens only during cluster re-sharding
                # and is a major pain in the ass. For it to work correctly, we have to resend the command to
                # the new node but only after first sending an ASKING command immediately beforehand.
                elif isinstance(v, AskError):
                    node = self.connection_pool.nodes.set_node(v.host, v.port, server_type='master')
                    ask_retry[i] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

        # YAY! we made it out of the attempt loop.
        # turn the response back into a simple flat array that corresponds
        # to the sequence of commands issued in the stack in pipeline.execute()
        response = [response[k] for k in sorted(response.keys())]

        if raise_on_error:
            self.raise_first_error(stack, response)

        return response
    def read_response(self):
        '''
        Reads one line from the wire, and interprets it.
        Example: the acknowledgment to an unsubscribe
        from topic myTopic on the wire looks like this:
        
             *3\r\n$11\r\nUNSUBSCRIBE\r\n$7\r\nmyTopic\r\n:1\r\n'
             
        *3    # three items to follow
        $11   # string of 11 chars
        UNSUBSCRIBE
        $7    # string of 7 chars
        myTopic
        :1    # one topic subscribed to now
        
        Each line will cause a recursive call to this method
        (see elif byte == '*' below).
        
        Simpler calls will be individual elements, such
        as ':12', which returns the integer 12.
        
        These are the possible prefixes; each item
        is followed by a \r\n, which is stripped
        by SocketLineReader:
        
            +<str>    simple string
            :<int>    integer
            $<n>    string of length <n>
            *<num>    start of array with <num> elements

        When the message to parse is the acknowledgment of
        a SUBSCRIBE or UNSUBSCRIBE command, this method
        will set() event self.unsubscribeAckEvent/self.unsubscribeAckEvent.

        :return: response string
        :rtype: string
        '''
        response = self._buffer.readline()
        if not response:
            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)

        byte, response = byte_to_chr(response[0]), response[1:]

        if byte not in ('-', '+', ':', '$', '*'):
            raise InvalidResponse("Protocol Error: %s, %s" %
                                  (str(byte), str(response)))

        # server returned an error
        if byte == '-':
            response = nativestr(response)
            error = self.parse_error(response)
            # if the error is a ConnectionError, raise immediately so the user
            # is notified
            if isinstance(error, ConnectionError):
                raise error
            # otherwise, we're dealing with a ResponseError that might belong
            # inside a pipeline response. the connection's read_response()
            # and/or the pipeline's execute() will raise this error if
            # necessary, so just return the exception instance here.
            return error
        # simple-string: response holds result:
        elif byte == '+':
            pass
        # int value
        elif byte == ':':
            response = long(response)
        # bulk response
        elif byte == '$':
            length = int(response)
            if length == -1:
                # Null string:
                return None
            response = self._buffer.read(length)
                        
        # multi-bulk response
        elif byte == '*':
            length = int(response)
            if length == -1:
                return None
            response = [self.read_response() for _ in xrange(length)]
        if isinstance(response, bytes) and self.encoding:
            response = response.decode(self.encoding)
        #***********
        #print('Response: %s' % byte + '|' + str(response))
        #***********
                
        return response
    def send_cluster_commands(self,
                              stack,
                              raise_on_error=True,
                              allow_redirections=True):
        """
        Send a bunch of cluster commands to the redis cluster.

        `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
        automatically. If set to false it will raise RedisClusterException.
        """
        if self.refresh_table_asap:
            self.connection_pool.nodes.initialize()
            self.refresh_table_asap = False

        ttl = self.RedisClusterRequestTTL
        response = {}
        attempt = xrange(0, len(stack)) if stack else []  # noqa
        ask_slots = {}

        while attempt and ttl > 0:
            ttl -= 1
            node_commands = {}
            nodes = {}

            # Keep this section so that we can determine what nodes to contact
            for i in attempt:
                c = stack[i]
                slot = self._determine_slot(*c[0], **c[1])
                if slot in ask_slots:
                    node = ask_slots[slot]
                else:
                    node = self.connection_pool.nodes.slots[slot]

                self.connection_pool.nodes.set_node_name(node)
                node_name = node['name']
                nodes[node_name] = node
                node_commands.setdefault(node_name, {})
                node_commands[node_name][i] = c

            # Get one connection at a time from the pool and basiccly copy the logic from
            #  _execute_pipeline() below and do what a normal pipeline does.

            attempt = []
            if self.use_threads and len(node_commands) > 1:
                workers = dict()
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = [
                        node_commands[node_name][i]
                        for i in sorted(node_commands[node_name].keys())
                    ]
                    with by_node_context(self.connection_pool,
                                         node) as connection:
                        workers[node_name] = Worker(self._execute_pipeline,
                                                    connection, cmds, False)
                        workers[node_name].start()

                for node_name, worker in workers.items():
                    worker.join()
                    if worker.exception:
                        for i in sorted(node_commands[node_name].keys()):
                            response[i] = worker.exception
                    else:
                        for i, v in zip(
                                sorted(node_commands[node_name].keys()),
                                worker.value):
                            response[i] = v
                del workers
            else:
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = [
                        node_commands[node_name][i]
                        for i in sorted(node_commands[node_name].keys())
                    ]
                    with by_node_context(self.connection_pool,
                                         node) as connection:
                        result = zip(
                            sorted(node_commands[node_name].keys()),
                            self._execute_pipeline(connection, cmds, False))
                        for i, v in result:
                            response[i] = v

            ask_slots = {}
            for i, v in response.items():
                if not isinstance(v, Exception):
                    continue

                if isinstance(v, ConnectionError):
                    ask_slots[self.connection_pool.nodes.keyslot(
                        stack[i][0][1])] = random.choice(self.startup_nodes)
                    attempt.append(i)
                    if ttl < self.RedisClusterRequestTTL / 2:
                        time.sleep(0.1)
                    continue

                errv = RedisCluster._exception_message(v)
                if errv is None:
                    continue

                if errv.startswith('CLUSTERDOWN'):
                    self.connection_pool.disconnect()
                    self.connection_pool.reset()
                    self.refresh_table_asap = True
                    raise ClusterDownException()

                redir = self.parse_redirection_exception_msg(errv)

                if not redir:
                    continue

                if redir['action'] == "MOVED":
                    self.refresh_table_asap = True
                    node = self.connection_pool.nodes.set_node(
                        redir['host'], redir['port'], server_type='master')
                    self.connection_pool.nodes.slots[redir['slot']] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)
                elif redir['action'] == "ASK":
                    node = self.connection_pool.nodes.set_node(
                        redir['host'], redir['port'], server_type='master')
                    ask_slots[redir['slot']] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

        response = [response[k] for k in sorted(response.keys())]
        if raise_on_error:
            self.raise_first_error(stack, response)

        return response
def low_zeros(value):
    for i in xrange(1, 32):
        if value >> i << i != value:
            break
    return i - 1
    def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
        """
        Send a bunch of cluster commands to the redis cluster.

        `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
        automatically. If set to false it will raise RedisClusterException.
        """
        if self.refresh_table_asap:
            self.connection_pool.nodes.initialize()
            self.refresh_table_asap = False

        ttl = self.RedisClusterRequestTTL
        response = {}
        attempt = xrange(0, len(stack)) if stack else []  # noqa
        ask_slots = {}

        while attempt and ttl > 0:
            ttl -= 1
            node_commands = {}
            nodes = {}

            # Keep this section so that we can determine what nodes to contact
            for i in attempt:
                c = stack[i]
                slot = self._determine_slot(*c[0])
                if slot in ask_slots:
                    node = ask_slots[slot]
                else:
                    node = self.connection_pool.nodes.slots[slot]

                self.connection_pool.nodes.set_node_name(node)
                node_name = node['name']
                nodes[node_name] = node
                node_commands.setdefault(node_name, {})
                node_commands[node_name][i] = c

            # Get one connection at a time from the pool and basiccly copy the logic from
            #  _execute_pipeline() below and do what a normal pipeline does.

            attempt = []
            if self.use_threads and len(node_commands) > 1:
                workers = dict()
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = [node_commands[node_name][i] for i in sorted(node_commands[node_name].keys())]
                    with by_node_context(self.connection_pool, node) as connection:
                        workers[node_name] = Worker(self._execute_pipeline, connection, cmds, False)
                        workers[node_name].start()

                for node_name, worker in workers.items():
                    worker.join()
                    if worker.exception:
                        for i in sorted(node_commands[node_name].keys()):
                            response[i] = worker.exception
                    else:
                        for i, v in zip(sorted(node_commands[node_name].keys()), worker.value):
                            response[i] = v
                del workers
            else:
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = [node_commands[node_name][i] for i in sorted(node_commands[node_name].keys())]
                    with by_node_context(self.connection_pool, node) as connection:
                        result = zip(
                            sorted(node_commands[node_name].keys()),
                            self._execute_pipeline(connection, cmds, False))
                        for i, v in result:
                            response[i] = v

            ask_slots = {}
            for i, v in response.items():
                if not isinstance(v, Exception):
                    continue

                if isinstance(v, ConnectionError):
                    ask_slots[self.connection_pool.nodes.keyslot(stack[i][0][1])] = random.choice(self.startup_nodes)
                    attempt.append(i)
                    if ttl < self.RedisClusterRequestTTL / 2:
                        time.sleep(0.1)
                    continue

                errv = StrictRedisCluster._exception_message(v)
                if errv is None:
                    continue

                if errv.startswith('CLUSTERDOWN'):
                    self.connection_pool.disconnect()
                    self.connection_pool.reset()
                    self.refresh_table_asap = True
                    raise ClusterDownException()

                redir = self.parse_redirection_exception_msg(errv)

                if not redir:
                    continue

                if redir['action'] == "MOVED":
                    self.refresh_table_asap = True
                    node = self.connection_pool.nodes.set_node(redir['host'], redir['port'], server_type='master')
                    self.connection_pool.nodes.slots[redir['slot']] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)
                elif redir['action'] == "ASK":
                    node = self.connection_pool.nodes.set_node(redir['host'], redir['port'], server_type='master')
                    ask_slots[redir['slot']] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

        response = [response[k] for k in sorted(response.keys())]
        if raise_on_error:
            self.raise_first_error(stack, response)

        return response
Beispiel #27
0
    def send_cluster_commands(self,
                              stack,
                              raise_on_error=True,
                              allow_redirections=True):
        """
        Send a bunch of cluster commands to the redis cluster.

        `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
        automatically. If set to false it will raise RedisClusterException.
        """
        if self.refresh_table_asap:
            self.connection_pool.nodes.initialize()
            self.refresh_table_asap = False

        ttl = self.RedisClusterRequestTTL

        # this is where we store all the responses to the pipeline execute.
        # the actual response is a list, not a dict, but because we may be getting
        # the responses for commands out of order or sometimes retrying them it is important
        # that we have a way to key the responses by the order they came in so that we can return
        # the responses as if we did them sequentially.
        response = {}

        # `attempt` corresponds to the sequence number of commands still left to process or retry.
        # initially this corresponds exactly to the number of commands we need to run, and if all goes
        # well, that's where it'll end. Everything will be attempted once, and we end up with an empty
        # array of commands left to process. But if we need to retry, `attempt` gets repopulated with
        # the sequence number of the command that is being retried.
        attempt = xrange(0, len(stack)) if stack else []  # noqa

        # there are different types of retries. redis cluster says if it responds with an ASK error,
        # you need to handle it differently than a moved error. And if we hit a connection error, we
        # don't really know what to do for that command so we pick a random other node in the cluster.
        ask_retry = {}
        conn_retry = {}

        # as long as we have commands left to attempt and we haven't overrun the max attempts, keep trying.
        while attempt and ttl > 0:

            # decrement our counter for number of attempts left before giving up.
            ttl -= 1

            # each time we go through this we need to separate out the commands by node.
            node_commands = {}

            # build a list of node objects based on node names we need to
            nodes = {}

            # as we move through each command that still needs to be processed,
            # we figure out the slot number that command maps to, then from the slot determine the node.
            for i in attempt:
                c = stack[i]
                slot = self._determine_slot(*c[0])

                # normally we just refer to our internal node -> slot table that tells us where a given
                # command should route to.
                # but if we are retrying, the cluster could have told us we were wrong or the node was down.
                # in that case, we have to try something that contradicts our rules.
                if i in ask_retry:
                    node = ask_retry[i]
                elif i in conn_retry:
                    node = conn_retry[i]
                else:
                    if self.refresh_table_asap:  # MOVED
                        node = self.connection_pool.get_master_node_by_slot(
                            slot)
                    else:
                        node = self.connection_pool.get_node_by_slot(slot)

                # little hack to make sure the node name is populated. probably could clean this up.
                self.connection_pool.nodes.set_node_name(node)

                # now that we know the name of the node ( it's just a string in the form of host:port )
                # we can build a list of commands for each node.
                node_name = node['name']
                nodes[node_name] = node
                node_commands.setdefault(node_name, {})
                node_commands[node_name][i] = c

            # Get one connection at a time from the pool and basiccly copy the logic from
            #  _execute_pipeline() below and do what a normal pipeline does.

            # now that we've split out the commands by the node we plan to send them to,
            # we can reset the commands we'll attempt next time around back to nothing.
            # when we process the response, any commands that need to be retried because of
            # connection errors, MOVED errors, or ASKING errors will be dumped into here.
            # most of the time this array will just stay empty.
            attempt = []

            # only use threads when it makes sense performance-wise to do so.
            # if you are doing an ask_retry, explicitly disable it.
            # makes it easier to define the logic of how to do the ASKING command,
            # and in practice, the server will only be moving one slot at a time,
            # so there will only be one server that will be recieving these ASKING retries
            # anyway.
            if self.use_threads and not ask_retry and len(node_commands) > 1:

                # for each node we query, we need to have one worker.
                # that way all pipelined commands are issued in parallel.
                # this could be a problem if you have hundreds of nodes in your cluster.
                # We should really refactor this to be a thread pool of workers, and allocate up to a
                # certain max number of threads.
                workers = dict()

                # allocate all of the redis connections from the connection pool.
                # each connection gets passed into its own thread so it can query each node in paralle.
                connections = {
                    node_name: self.connection_pool.get_connection_by_node(
                        nodes[node_name])
                    for node_name in node_commands
                }

                # iterate through each set of commands and pass them into a worker thread so
                # it can be executed in it's own socket connection from the redis connection pool
                # in parallel.
                try:

                    for node_name in node_commands:
                        node = nodes[node_name]
                        # build the list of commands to be passed to a particular node.
                        # we have to do this on each attempt, because the cluster may respond to us
                        # that a command for a given slot actually should be routed to a different node.
                        cmds = [
                            node_commands[node_name][i]
                            for i in sorted(node_commands[node_name].keys())
                        ]

                        # pass all the commands bound for a particular node into a thread worker object
                        # along with the redis connection needed to run the commands and parse the response.
                        workers[node_name] = ThreadedPipelineExecute(
                            execute=self._execute_pipeline,
                            conn=connections[node_name],
                            cmds=cmds)

                        workers[node_name].start()

                    # now that all the queries are running against all the nodes,
                    # wait for all of them to come back so we can parse the responses.
                    for node_name, worker in workers.items():
                        worker.join()

                        # if the worker hit an exception this is really bad.
                        # that means something completely unexpected happened.
                        # we have to assume the worst and assume that all the calls against
                        # that particular node in the cluster failed and will need to be retried.
                        # maybe that isn't a safe assumption?
                        if worker.exception:
                            for i in node_commands[node_name].keys():
                                response[i] = worker.exception
                        else:
                            # we got a valid response back from redis.
                            # map each response based on the sequence of the original request stack.
                            # some of these responses may represent redis connection or ask errors etc.
                            for i, v in zip(
                                    sorted(node_commands[node_name].keys()),
                                    worker.value):
                                response[i] = v
                finally:
                    # don't need our threads anymore.
                    # explicitly remove them from the current namespace so they can be garbage collected.
                    del workers

                    # release all of the redis connections we allocated earlier back into the connection pool.
                    for conn in connections.values():
                        self.connection_pool.release(conn)
                    del connections
            else:
                # if we got here, it's because threading is disabled explicitly, or
                # all the commands map to a single node so we don't need to use different threads to
                # issue commands in parallel.

                # first, we need to keep track of all the commands and what responses they map to.
                # this is because we need to interject ASKING commands into the mix. I thought of a little
                # hack to map these responses back to None instead of the integer sequence id that was the
                # position number of the command issued in the stack of command requests at the point pipeline
                # execute was issued.
                track_cmds = {}

                # send the commands in sequence.
                for node_name in node_commands:
                    node = nodes[node_name]
                    cmds = []
                    track_cmds[node_name] = []

                    # we've got the commands we need to run for each node,
                    # sort them to make sure that they are executed in the same order
                    # they came in on the stack otherwise it changes the logic.
                    # we make no guarantees about the order of execution of the commands run
                    # except that we are sure we will always process the commands for a given key
                    # in a sequential order. If we get an error from the server about a given key,
                    # that will apply the same for all commands on that key (MOVED, ASKING, etc)
                    # so we will be resending all of the commands for that key to a new node.
                    for i in sorted(node_commands[node_name].keys()):
                        if i in ask_retry:
                            # put in our fake stub placeholder for the response.
                            track_cmds[node_name].append(None)
                            cmds.append((['ASKING'], {}))

                        # keep track of the sequence number and the mapping of actual commands
                        # sent to the node. (ASKING SCREWS EVERYTHING UP!!!!!)
                        track_cmds[node_name].append(i)
                        cmds.append(node_commands[node_name][i])

                    # allocate a connection from the connection pool and send the commands for each node
                    # as a packed single network request. Since we aren't using threads here, we are
                    # only able to send each request sequentially and block, waiting for the response.
                    # After we get the response to one connection, we move on to the next.
                    with by_node_context(self.connection_pool,
                                         node) as connection:
                        result = zip(
                            track_cmds[node_name],
                            self._execute_pipeline(connection, cmds, False))

                        # map the response from the connection to the commands we were running.
                        for i, v in result:

                            # remember None is a shim value used above as a placeholder for the ASKING
                            # command. That response is just `OK` and we don't care about that.
                            # throw it away.
                            # Map the response here to the original sequence of commands in the stack
                            # sent to pipeline.
                            if i is not None:
                                response[i] = v

            ask_retry = {}
            conn_retry = {}

            # now that we have tried to execute all the commands let's see what we have left.
            for i, v in response.items():
                # if the response isn't an exception it is a valid response from the node
                # we're all done with that command, YAY!
                # if we move beyond this point, we've run into problems and we need to retry the command.
                if not isinstance(v, Exception):
                    continue

                # connection errors are tricky because most likely we routed to the right node but it is
                # down. In that case, the best we can do is randomly try another node in the cluster
                # and hope that it tells us to try that node again with a MOVED error or tells us the new
                # master.
                if isinstance(v, ConnectionError):
                    conn_retry[i] = random.choice(self.startup_nodes)
                    attempt.append(i)

                    # if we are stuck in a retry loop, slow things down a bit to give the failover
                    # a chance of actually happening.
                    if ttl < self.RedisClusterRequestTTL / 2:
                        time.sleep(0.1)
                    continue

                # If cluster is down it should be raised and bubble up to
                # utils.clusterdown_wrapper()
                if isinstance(v, ClusterDownError):
                    self.connection_pool.disconnect()
                    self.connection_pool.reset()
                    self.refresh_table_asap = True

                    raise v

                # A MOVED response from the cluster means that somehow we were misinformed about which node
                # a given key slot maps to. This can happen during cluster resharding, during master-slave
                # failover, or if we got a connection error and were forced to re-attempt the command against a
                # random node.
                if isinstance(v, MovedError):
                    # Do not perform full cluster refresh on every MOVED error
                    self.reinitialize_counter += 1

                    if self.reinitialize_counter % self.reinitialize_steps == 0:
                        self.refresh_table_asap = True

                    node = self.connection_pool.nodes.set_node(
                        v.host, v.port, server_type='master')
                    self.connection_pool.nodes.slots[v.slot_id][0] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

                # an ASK error from the server means that only this specific command needs to be tried against
                # a different server (not every key in the slot). This happens only during cluster re-sharding
                # and is a major pain in the ass. For it to work correctly, we have to resend the command to
                # the new node but only after first sending an ASKING command immediately beforehand.
                elif isinstance(v, AskError):
                    node = self.connection_pool.nodes.set_node(
                        v.host, v.port, server_type='master')
                    ask_retry[i] = node
                    attempt.append(i)
                    self._fail_on_redirect(allow_redirections)

        # YAY! we made it out of the attempt loop.
        # turn the response back into a simple flat array that corresponds
        # to the sequence of commands issued in the stack in pipeline.execute()
        response = [response[k] for k in sorted(response.keys())]

        if raise_on_error:
            self.raise_first_error(stack, response)

        return response