def _send_request(self, request, field_types=None): """ Send the request to the server through the socket. Return an instance of `Response` class. :param request: object representing a request :type request: `Request` instance :rtype: `Response` instance """ assert isinstance(request, Request) connected = True attempt = 1 while True: try: if not connected: time.sleep(self.reconnect_delay) self.connect() connected = True warn('Successfully reconnected', NetworkWarning) response = self._send_request_wo_reconnect( request, field_types) break except NetworkError as e: if attempt > self.reconnect_max_attempts: raise warn('%s : Reconnect attempt %d of %d' % ( e.message, attempt, self.reconnect_max_attempts), NetworkWarning) attempt += 1 connected = False return response
def _send_request(self, request): assert isinstance(request, Request) if not self.connected: self.connect() for attempt in range(RETRY_MAX_ATTEMPTS): self._write_buffer += bytes(request) self._write_event.set() # read response sync = request.sync response, ex = self.req_event[sync].get() # fix me del self.req_event[sync] if ex is not None: raise ex if response.completion_status != 1: if response.return_code != 0: raise DatabaseError(response.return_code, response.return_message) return response warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _opt_reconnect(self): ''' Check that connection is alive using low-level recv from libc(ctypes) **Due to bug in python - timeout is internal python construction. ''' if not self._socket: return self.connect() def check(): # Check that connection is alive buf = ctypes.create_string_buffer(2) try: sock_fd = self._socket.fileno() except socket.error as e: if e.errno == errno.EBADF: return errno.ECONNRESET else: if os.name == 'nt': flag = socket.MSG_PEEK self._socket.setblocking(False) else: flag = socket.MSG_DONTWAIT | socket.MSG_PEEK self._sys_recv(sock_fd, buf, 1, flag) if ctypes.get_errno() == errno.EAGAIN: ctypes.set_errno(0) return errno.EAGAIN return (ctypes.get_errno() if ctypes.get_errno() else errno.ECONNRESET) last_errno = check() if self.connected and last_errno == errno.EAGAIN: return attempt = 0 last_errno = errno.ECONNRESET while True: time.sleep(self.reconnect_delay) try: self.connect_basic() except NetworkError: pass else: if self.connected: break warn( "Reconnect attempt %d of %d" % (attempt, self.reconnect_max_attempts), NetworkWarning) if attempt == self.reconnect_max_attempts: raise NetworkError( socket.error(last_errno, errno.errorcode[last_errno])) attempt += 1 self.handshake() # It is important to set socket timeout *after* connection. # Otherwise the timeout exception will be raised, even when # the connection fails because the server is simply # not bound to port self._socket.settimeout(self.socket_timeout)
def _opt_reconnect(self): ''' Check that connection is alive using low-level recv from libc(ctypes) **Due to bug in python - timeout is internal python construction. ''' if not self._socket: return self.connect() def check(): # Check that connection is alive buf = ctypes.create_string_buffer(2) try: sock_fd = self._socket.fileno() except socket.error as e: if e.errno == errno.EBADF: return errno.ECONNRESET else: if os.name == 'nt': flag = socket.MSG_PEEK self._socket.setblocking(False) else: flag = socket.MSG_DONTWAIT | socket.MSG_PEEK self._sys_recv(sock_fd, buf, 1, flag) if ctypes.get_errno() == errno.EAGAIN: ctypes.set_errno(0) return errno.EAGAIN return (ctypes.get_errno() if ctypes.get_errno() else errno.ECONNRESET) last_errno = check() if self.connected and last_errno == errno.EAGAIN: return attempt = 0 last_errno = errno.ECONNRESET while True: time.sleep(self.reconnect_delay) try: self.connect_basic() except NetworkError: pass else: if self.connected: break warn("Reconnect attempt %d of %d" % (attempt, self.reconnect_max_attempts), NetworkWarning) if attempt == self.reconnect_max_attempts: raise NetworkError( socket.error(last_errno, errno.errorcode[last_errno])) attempt += 1 self.handshake() # It is important to set socket timeout *after* connection. # Otherwise the timeout exception will be raised, even when # the connection fails because the server is simply # not bound to port self._socket.settimeout(self.socket_timeout)
def _opt_reconnect(self): attempt = 0 while True: try: if not self._socket or not self._socket.recv(0, socket.MSG_DONTWAIT): time.sleep(self.reconnect_delay) self.connect() warn("Reconnect attempt %d of %d"%(attempt, self.reconnect_max_attempts), NetworkWarning) except socket.error as e: if e.errno == errno.EAGAIN: break else: time.sleep(self.reconnect_delay) self.connect() warn("%s : Reconnect attempt %d of %d"%(e.message, attempt, self.reconnect_max_attempts), NetworkWarning) if attempt == self.reconnect_max_attempts: raise attempt += 1
def _send_request_no_check_connected(self, request): sync = request.sync for attempt in range(RETRY_MAX_ATTEMPTS): waiter = self._waiters[sync] # self._writer.write(bytes(request)) self._write_buf += bytes(request) self._write_event.set() # read response response = yield from waiter if response.completion_status != 1: return response self._waiters[sync] = asyncio.Future(loop=self.loop) warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _send_request_wo_reconnect(self, request): ''' :rtype: `Response` instance :raise: NetworkError ''' assert isinstance(request, Request) # Repeat request in a loop if the server returns completion_status == 1 # (try again) for attempt in range(RETRY_MAX_ATTEMPTS): # pylint: disable=W0612 self._socket.sendall(bytes(request)) response = Response(self, self._read_response()) if response.completion_status != 1: return response warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _send_request_wo_reconnect(self, request): ''' :rtype: `Response` instance :raise: NetworkError ''' assert isinstance(request, Request) # Repeat request in a loop if the server returns completion_status == 1 # (try again) for attempt in xrange(RETRY_MAX_ATTEMPTS): # pylint: disable=W0612 self._socket.sendall(bytes(request)) response = Response(self, self._read_response()) if response.completion_status != 1: return response warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _send_request_wo_reconnect(self, request, field_types=None): """ :rtype: `Response` instance :raise: NetworkError """ assert isinstance(request, Request) # Repeat request in a loop if the server # returns completion_status == 1 (try again) for attempt in xrange(RETRY_MAX_ATTEMPTS): try: self._socket.sendall(bytes(request)) header, body = self._read_response() response = Response(header, body, field_types) except socket.error as e: raise NetworkError(e) if response.completion_status != 1: return response warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _send_request(self, request): assert isinstance(request, Request) if not self.connected: yield from self.connect() sync = request.sync for attempt in range(RETRY_MAX_ATTEMPTS): waiter = self._waiters[sync] # self._writer.write(bytes(request)) self._write_buf += bytes(request) self._write_event.set() # read response response = yield from waiter if response.completion_status != 1: return response self._waiters[sync] = asyncio.Future(loop=self.loop) warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message)
def _opt_reconnect(self): ''' Check that connection is alive using low-level recv from libc(ctypes) **Due to bug in python - timeout is internal python construction. ''' if not self._socket: return self.connect() def check(): # Check that connection is alive buf = ctypes.create_string_buffer(2) try: sock_fd = self._socket.fileno() except socket.error as e: if e.errno == errno.EBADF: return errno.ECONNRESET else: if os.name == 'nt': flag = socket.MSG_PEEK self._socket.setblocking(False) else: flag = socket.MSG_DONTWAIT | socket.MSG_PEEK retbytes = self._sys_recv(sock_fd, buf, 1, flag) err = 0 if os.name != 'nt': err = ctypes.get_errno() else: err = ctypes.get_last_error() self._socket.setblocking(True) WWSAEWOULDBLOCK = 10035 if (retbytes < 0) and (err == errno.EAGAIN or err == errno.EWOULDBLOCK or err == WWSAEWOULDBLOCK): ctypes.set_errno(0) return errno.EAGAIN else: return errno.ECONNRESET last_errno = check() if self.connected and last_errno == errno.EAGAIN: return attempt = 0 last_errno = errno.ECONNRESET while True: time.sleep(self.reconnect_delay) try: self.connect_basic() except NetworkError: pass else: if self.connected: break warn( "Reconnect attempt %d of %d" % (attempt, self.reconnect_max_attempts), NetworkWarning) if attempt == self.reconnect_max_attempts: raise NetworkError( socket.error(last_errno, errno.errorcode[last_errno])) attempt += 1 self.handshake()
def _opt_refresh_instances(self): ''' Refresh list of tarantool instances in a cluster. Reconnect if a current instance was gone from the list. ''' now = time.time() if not self.connected or not self.cluster_discovery_function or \ now - self.last_nodes_refresh < self.cluster_discovery_delay: return # Call a cluster discovery function w/o reconnection. If # something going wrong: warn about that and ignore. request = RequestCall(self, self.cluster_discovery_function, (), self.call_16) try: resp = self._send_request_wo_reconnect(request) except DatabaseError as e: msg = 'got "%s" error, skipped addresses updating' % str(e) warn(msg, ClusterDiscoveryWarning) return if not resp.data or not resp.data[0] or \ not isinstance(resp.data[0], list): msg = "got incorrect response instead of URI list, " + \ "skipped addresses updating" warn(msg, ClusterDiscoveryWarning) return # Validate received address list. new_addrs = [] for uri in resp.data[0]: addr, msg = parse_uri(uri) if not addr: warn(msg, ClusterDiscoveryWarning) continue ok, msg = validate_address(addr) if not ok: warn(msg, ClusterDiscoveryWarning) continue new_addrs.append(addr) if not new_addrs: msg = "got no correct URIs, skipped addresses updating" warn(msg, ClusterDiscoveryWarning) return self.strategy.update(new_addrs) self.last_nodes_refresh = now # Disconnect from a current instance if it was gone from # an instance list and connect to one of new instances. current_addr = {'host': self.host, 'port': self.port} if current_addr not in self.strategy.addrs: self.close() addr = self.strategy.getnext() self.host = addr['host'] self.port = addr['port'] self._opt_reconnect()
while len_buf - curr >= 5: length_pack = buf[curr:curr + 5] length = msgpack.unpackb(length_pack) if len_buf - curr < 5 + length: break body = buf[curr + 5:curr + 5 + length] curr += 5 + length response = Response(self, body) # unpack response sync = response.sync if sync not in self._waiters: logger.error("aio git happens: {r}", response) continue waiter = self._waiters[sync] if response.return_code != 0: waiter.set_exception(DatabaseError(response.return_code, response.return_message)) else: waiter.set_result(response) del self._waiters[sync] # one cut for buffer if curr: buf = buf[curr:] yield from self._do_close(None) @asyncio.coroutine def _wait_response(self, sync): resp = yield from self._waiters[sync] # renew request waiter self._waiters[sync] = asyncio.Future(loop=self.loop) return resp @asyncio.coroutine def _send_request(self, request): assert isinstance(request, Request) if not self.connected: yield from self.connect() sync = request.sync for attempt in range(RETRY_MAX_ATTEMPTS): waiter = self._waiters[sync] # self._writer.write(bytes(request)) self._write_buf += bytes(request) self._write_event.set() # read response response = yield from waiter if response.completion_status != 1: return response self._waiters[sync] = asyncio.Future(loop=self.loop) warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message) def generate_sync(self): self.req_num += 1 if self.req_num > 10000000: self.req_num = 0 self._waiters[self.req_num] = asyncio.Future(loop=self.loop) return self.req_num @asyncio.coroutine def close(self): yield from self._do_close(None) def _do_close(self, exc): if not self.connected: return with (yield from self.lock): self.connected = False self._writer.transport.close() self._reader_task.cancel() self._reader_task = None self._writer_task.cancel() self._writer_task = None self._write_event = None self._write_buf = None self._writer = None self._reader = None self._auth_event = None for waiter in self._waiters.values(): if exc is None: waiter.cancel() else: waiter.set_exception(exc) self._waiters = dict() def __repr__(self): return "aiotarantool.Connection(host=%r, port=%r)" % (self.host, self.port) @asyncio.coroutine def call(self, func_name, *args): assert isinstance(func_name, str) if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] resp = yield from self._send_request(RequestCall(self, func_name, args)) return resp