def connect(self, host: bytes, port: int, connect_timeout: float = None) -> None: """ Connect to host:port (with an optional connect timeout) and emit 'connect' when connected, or 'connect_error' in the case of an error. """ self.host = host self.port = port self.on("fd_writable", self.handle_connect) # TODO: use socket.getaddrinfo(); needs to be non-blocking. try: err = self.sock.connect_ex((host, port)) except socket.gaierror as why: self.handle_conn_error(socket.gaierror, why) return except socket.error as why: self.handle_conn_error(socket.error, why) return if err != errno.EINPROGRESS: self.handle_conn_error(socket.error, socket.error(err, os.strerror(err))) return if connect_timeout: self._timeout_ev = self._loop.schedule( connect_timeout, self.handle_conn_error, socket.error, socket.error(errno.ETIMEDOUT, os.strerror(errno.ETIMEDOUT)), True, )
def connect(self, address): if self.act_non_blocking: return self.fd.connect(address) fd = self.fd if self.gettimeout() is None: while not socket_connect(fd, address): try: self._trampoline(fd, write=True) except IOClosed: raise socket.error(errno.EBADFD) socket_checkerr(fd) else: end = time.time() + self.gettimeout() while True: if socket_connect(fd, address): return if time.time() >= end: raise socket.timeout("timed out") try: self._trampoline(fd, write=True, timeout=end - time.time(), timeout_exc=socket.timeout("timed out")) except IOClosed: # ... we need some workable errno here. raise socket.error(errno.EBADFD) socket_checkerr(fd)
def bind(self, *pos, **kw): """ Implements proxy connection for UDP sockets, which happens during the bind() phase. """ proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy if not proxy_type or self.type != socket.SOCK_DGRAM: return _orig_socket.bind(self, *pos, **kw) if self._proxyconn: raise socket.error(EINVAL, "Socket already bound to an address") if proxy_type != SOCKS5: msg = "UDP only supported by SOCKS5 proxy type" raise socket.error(EOPNOTSUPP, msg) _BaseSocket.bind(self, *pos, **kw) # Need to specify actual local port because # some relays drop packets if a port of zero is specified. # Avoid specifying host address in case of NAT though. _, port = self.getsockname() dst = ("0", port) self._proxyconn = _orig_socket() proxy = self._proxy_addr() self._proxyconn.connect(proxy) UDP_ASSOCIATE = b"\x03" _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst) # The relay is most likely on the same host as the SOCKS proxy, # but some proxies return a private IP address (10.x.y.z) host, _ = proxy _, port = relay _BaseSocket.connect(self, (host, port)) self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def inet_pton(address_family, ip_string): """ Convert an IP address from its family-specific string format to a packed, binary format. inet_pton() is useful when a library or network protocol calls for an object of type ``struct in_addr`` or ``struct in6_addr``. =============== ============ Argument Description =============== ============ address_family Supported values are ``socket.AF_INET`` and ``socket.AF_INET6``. ip_string The IP address to pack. =============== ============ """ if not address_family in (socket.AF_INET, socket.AF_INET6): raise socket.error(97, os.strerror(97)) if address_family == socket.AF_INET: bytes = 5 else: bytes = 17 buf = create_string_buffer(bytes) result = _inet_pton(address_family, ip_string, buf) if result == 0: raise socket.error("illegal IP address string passed to inet_pton") elif result != 1: raise socket.error("unknown error calling inet_pton") return buf.raw[: bytes - 1]
def GET(self, path): try: default_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(self.timeout) try: self.connect() self.connection.request('GET', self.path + path) response = self.connection.getresponse() # If ssl error : may come from bad configuration except ssl.SSLError as exc: if exc.message == 'The read operation timed out': raise socket.error(str(exc) + self.error_message_timeout) raise ssl.SSLError(str(exc) + self.ssl_error_message_connect_fail) except socket.error as exc: if exc.message == 'timed out': raise socket.error(str(exc) + self.error_message_timeout) raise socket.error(self.error_message_connect_fail + str(exc)) # check self.response.status and raise exception early if response.status == httplib.REQUEST_TIMEOUT: # resource is not ready raise ResourceNotReady(path) elif response.status == httplib.NOT_FOUND: raise NotFoundError(path) elif response.status == httplib.FORBIDDEN: raise Unauthorized(path) elif response.status != httplib.OK: message = parsed_error_message(response.status, response.read(), path) raise ServerError(message) finally: socket.setdefaulttimeout(default_timeout) return response.read()
def recv(self, bufsize, flags=0): if self.__commstate in (SHUT_RD, SHUT_RDWR): return "" self.__checkconnected() if not isinstance(bufsize, int): raise TypeError("buffer size must be int, was %s" % type(bufsize)) if bufsize < 0: raise ValueError("negative buffersize in recv") # as for tcp if bufsize == 0: return "" # need this to ensure the _isclosed() check is up-to-date _macutil.looponce() if self._isclosed(): if len(self.__incomingdata) == 0: raise _socket.error(errno.ECONNRESET, os.strerror(errno.ECONNRESET)) return self.__incomingdata.read(bufsize) # if incoming data buffer is empty, wait until data is available or # channel is closed def gotdata(): return not self.__incomingdata.empty() or self._isclosed() if not gotdata(): self.__waituntil(gotdata, "recv timed out") # other side closed connection while waiting? if self._isclosed() and len(self.__incomingdata) == 0: raise _socket.error(errno.ECONNRESET, os.strerror(errno.ECONNRESET)) return self.__incomingdata.read(bufsize)
def inet_ntop(address_family, packed_ip): """ Convert a packed IP address (a string of some number of characters) to its standard, family-specific string representation (for example, ``'7.10.0.5`` or ``5aef:2b::8``). inet_ntop() is useful when a library or network protocol returns an object of type ``struct in_addr`` or ``struct in6_addr``. =============== ============ Argument Description =============== ============ address_family Supported values are ``socket.AF_INET`` and ``socket.AF_INET6``. packed_ip The IP address to unpack. =============== ============ """ if not address_family in (socket.AF_INET, socket.AF_INET6): raise socket.error(97, os.strerror(97)) if address_family == socket.AF_INET: bytes = 17 else: bytes = 47 buf = create_string_buffer(bytes) result = _inet_ntop(address_family, packed_ip, buf, bytes) if not result: raise socket.error("unknown error calling inet_ntop") return buf.value
def inet_ntop(address_family, packed_ip, encoding="UTF-8"): addr = sockaddr() addr.sa_family = address_family addr_size = c_int(sizeof(addr)) ip_string = create_string_buffer(128) ip_string_size = c_int(sizeof(addr)) if address_family == socket.AF_INET: if len(packed_ip) != sizeof(addr.ipv4_addr): raise socket.error('packed IP wrong length for inet_ntop') memmove(addr.ipv4_addr, packed_ip, 4) elif address_family == socket.AF_INET6: if len(packed_ip) != sizeof(addr.ipv6_addr): raise socket.error('packed IP wrong length for inet_ntop') memmove(addr.ipv6_addr, packed_ip, 16) else: raise socket.error('unknown address family') if WSAAddressToStringA(byref(addr), addr_size, None, ip_string, byref(ip_string_size)) != 0: raise socket.error(FormatError()) return (ip_string[:ip_string_size.value - 1]).decode(encoding)
def sendConsoleCommand(cls, command, timeout=1.0): ourNonce = libnacl.utils.rand_nonce() theirNonce = None sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if timeout: sock.settimeout(timeout) sock.connect(("127.0.0.1", cls._consolePort)) sock.send(ourNonce) theirNonce = sock.recv(len(ourNonce)) if len(theirNonce) != len(ourNonce): print("Received a nonce of size %d, expecting %d, console command will not be sent!" % (len(theirNonce), len(ourNonce))) if len(theirNonce) == 0: raise socket.error("Got EOF while reading a nonce of size %d, console command will not be sent!" % (len(ourNonce))) return None halfNonceSize = int(len(ourNonce) / 2) readingNonce = ourNonce[0:halfNonceSize] + theirNonce[halfNonceSize:] writingNonce = theirNonce[0:halfNonceSize] + ourNonce[halfNonceSize:] msg = cls._encryptConsole(command, writingNonce) sock.send(struct.pack("!I", len(msg))) sock.send(msg) data = sock.recv(4) if not data: raise socket.error("Got EOF while reading the response size") (responseLen,) = struct.unpack("!I", data) data = sock.recv(responseLen) response = cls._decryptConsole(data, readingNonce) return response
def bind(self, address): _checkaddrpair(address, False) if self.__isbound(): raise _socket.error('Socket is already bound') elif self.__isconnected(): raise _socket.error("Socket is already connected, cannot be bound") if self.__conn.proto == _lightbluecommon.L2CAP: raise NotImplementedError("L2CAP server sockets not currently supported") if address[1] != 0: raise _socket.error("must bind to port 0, other ports not supported on Mac OS X") address = (address[0], _getavailableport(self.__conn.proto)) # address must be either empty string or local device address if address[0] != "": try: import lightblue localaddr = lightblue.gethostaddr() except: localaddr = None if localaddr is None or address[0] != localaddr: raise _socket.error( errno.EADDRNOTAVAIL, os.strerror(errno.EADDRNOTAVAIL)) # is this port already in use? if address[1] in self._boundports[self.__conn.proto]: raise _socket.error(errno.EADDRINUSE, os.strerror(errno.EADDRINUSE)) self._boundports[self.__conn.proto].add(address[1]) self.__port = address[1]
def read_response(self): if not self._reader: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) # _next_response might be cached from a can_read() call if self._next_response is not False: response = self._next_response self._next_response = False return response response = self._reader.gets() socket_read_size = self.socket_read_size while response is False: try: if HIREDIS_USE_BYTE_BUFFER: bufflen = self._sock.recv_into(self._buffer) if bufflen == 0: raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) else: buffer = self._sock.recv(socket_read_size) # an empty string indicates the server shutdown the socket if not isinstance(buffer, bytes) or len(buffer) == 0: raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) except socket.timeout: raise TimeoutError("Timeout reading from socket") except socket.error: e = sys.exc_info()[1] raise ConnectionError("Error while reading from socket: %s" % (e.args,)) if HIREDIS_USE_BYTE_BUFFER: self._reader.feed(self._buffer, 0, bufflen) else: self._reader.feed(buffer) # proactively, but not conclusively, check if more data is in the # buffer. if the data received doesn't end with \r\n, there's more. if HIREDIS_USE_BYTE_BUFFER: if bufflen > 2 and \ self._buffer[bufflen - 2:bufflen] != SYM_CRLF: continue else: if not buffer.endswith(SYM_CRLF): continue response = self._reader.gets() # if an older version of hiredis is installed, we need to attempt # to convert ResponseErrors to their appropriate types. if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: if isinstance(response, ResponseError): response = self.parse_error(response.args[0]) elif isinstance(response, list) and response and \ isinstance(response[0], ResponseError): response[0] = self.parse_error(response[0].args[0]) # if the response is a ConnectionError or the response is a list and # the first item is a ConnectionError, raise it as something bad # happened if isinstance(response, ConnectionError): raise response elif isinstance(response, list) and response and \ isinstance(response[0], ConnectionError): raise response[0] return response
def run(self): print "#{} connection started".format(self.identifier) try: while StoppableThread.stopped(self): inp = self.socket.recv(self.BufferSize) if inp == '': raise socket.error("connection terminated") command, inp = inp[0],inp[1:] out = None if command == self.ListGamesCommand: out = self.handleListGames(inp) elif command == self.ListMovesCommand: out = self.handleListMoves(inp) elif command == self.SetGameCommand: out = self.handleSetGame(inp) if out is None: raise socket.error("got bad command {}".format(command+inp)) self.socket.send(out) except socket.error as e: print "#{} Connection Error: {}".format(self.identifier,e) pass finally: self.socket.close() self.server.unManage(self,True) print "#{} connection ended".format(self.identifier)
def inet_ntop(address_family, packed_ip): if address_family == socket.AF_INET: return socket.inet_ntoa(packed_ip) addr = sockaddr() addr.sa_family = address_family addr_size = ctypes.c_int(ctypes.sizeof(addr)) ip_string = ctypes.create_string_buffer(128) ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) if address_family == socket.AF_INET6: if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): raise socket.error('packed IP wrong length for inet_ntoa') ctypes.memmove(addr.ipv6_addr, packed_ip, 16) else: raise socket.error('unknown address family') if WSAAddressToStringA( ctypes.byref(addr), addr_size, None, ip_string, ctypes.byref(ip_string_size) ) != 0: raise socket.error(ctypes.FormatError()) return ip_string[:ip_string_size.value - 1]
def test_drain_nowait(self): c = Connection(transport=Mock) c.drain_events = Mock() c.drain_events.side_effect = socket.timeout() c.more_to_read = True self.assertFalse(c.drain_nowait()) self.assertFalse(c.more_to_read) c.drain_events.side_effect = socket.error() c.drain_events.side_effect.errno = errno.EAGAIN c.more_to_read = True self.assertFalse(c.drain_nowait()) self.assertFalse(c.more_to_read) c.drain_events.side_effect = socket.error() c.drain_events.side_effect.errno = errno.EPERM with self.assertRaises(socket.error): c.drain_nowait() c.more_to_read = False c.drain_events = Mock() self.assertTrue(c.drain_nowait()) c.drain_events.assert_called_with(timeout=0) self.assertTrue(c.more_to_read)
def connect(self, address): if self.__peername is not None: raise socket.error(errno.EISCONN, 'Socket is already connected') host, port = address target = '%s:%s' % (host, port) request = Request(method='CONNECT', target=target) # The 'Host' header is not strictly needed, # it's only added here for consistency request.headers['Host'] = target request.headers.update(self.__headers) self.__sock.sendall(request.toString()) limit = 65536 parser = ResponseParser() while True: data = self.__readline(limit) if not data: raise HTTPDataError("not enough data for response") limit -= len(data) response = parser.parse(data) if response: assert len(response) == 1 response = response[0] assert parser.done data = parser.clear() assert not data break if limit <= 0: raise HTTPLimitError("CONNECT: response too big") if response.code != 200: raise socket.error(errno.ECONNREFUSED, '%d %s' % (response.code, response.phrase)) self.__peername = (host, port)
def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, ipv6_socket_style = 1, randomizer = False): e = 'maxport less than minport - no ports to check' if minport == 0 and maxport == 0: portrange = range(1) elif maxport - minport < 50 or not randomizer: portrange = range(minport, maxport + 1) if randomizer: shuffle(portrange) portrange = portrange[:20] else: portrange = [] while len(portrange) < 20: listen_port = randrange(minport, maxport + 1) if listen_port not in portrange: portrange.append(listen_port) if first_try != 0: try: self.bind(first_try, bind, reuse=reuse, ipv6_socket_style=ipv6_socket_style) return first_try except socket.error as e: pass for listen_port in portrange: try: interfaces = self.bind(listen_port, bind, reuse=reuse, ipv6_socket_style=ipv6_socket_style) if len(interfaces) == 0: raise socket.error('failed to bind on port') host, listen_port = interfaces[0] return listen_port except socket.error as e: raise raise socket.error(str(e))
def setup_client(self): if self.args.srv: if not has_dns: raise socket.error('DNS SRV lookups not available; please install dnspython') hosts = [] for lookup in self.args.host: lookup_hosts_found = 0 self.print_debug('SRV lookup: %s' % lookup) try: res = dns.resolver.query('_2ping._udp.%s' % lookup, 'srv') except dns.exception.DNSException as e: raise socket.error('%s: %s' % (lookup, repr(e))) for rdata in res: self.print_debug('SRV result for %s: %s' % ( lookup, repr(rdata), )) if (str(rdata.target), rdata.port) in hosts: continue hosts.append((str(rdata.target), rdata.port)) lookup_hosts_found += 1 if lookup_hosts_found == 0: raise socket.error('%s: No SRV results' % lookup) else: hosts = [(x, self.args.port) for x in self.args.host] for (hostname, port) in hosts: try: self.setup_client_host(hostname, port) except socket.error as e: eargs = list(e.args) if len(eargs) == 1: eargs[0] = '%s: %s' % (hostname, eargs[0]) else: eargs[1] = '%s: %s' % (hostname, eargs[1]) raise socket.error(*eargs)
def connect(self, host, port): """Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. """ if not port and (host.find(':') == host.rfind(':')): i = host.rfind(':') if i >= 0: host, port = host[:i], host[i + 1:] try: port = int(port) except ValueError: raise socket.error("nonnumeric port") # if self.verbose > 0: # print 'connect:', (host, port) msg = "getaddrinfo returns an empty list" self.sock = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, _canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) # if self.debuglevel > 0: print 'connect:', (host, port) self.sock.connect(sa) except socket.error as e: msg = str(e) # if self.debuglevel > 0: print 'connect fail:', (host, port) self.close() continue break if not self.sock: raise socket.error(msg)
def send_command(self, command, flags=0, payload={}): payload = Expression.to_expr(payload) packed = msgpack.packb(payload) if self.enable_gzip: packed = zlib.compress(packed) flags |= REQUEST_FLAG_GZIP def robust_recv(sock, size): data = '' while size: #ready = select.select([sock], [], [sock], 600) if 1: # ready[0] or ready[2]: piece = sock.recv(size) if piece == '': raise socket.error() data += piece size -= len(piece) else: raise socket.error() return data while True: try: if self.sock == None: raise socket.error() self.seq += 1 to_send = struct.pack('<iiqiii', self.version, 0, self.seq, command, flags, len(packed)) + packed while to_send: sendsize = self.sock.send(to_send) to_send = to_send[sendsize:] if not flags & REQUEST_FLAG_NO_REPLY: seq, status, size = struct.unpack('<qii', robust_recv(self.sock, 16)) if self.seq != seq: print self.seq, seq raise socket.error() if size > 0: response_packed = robust_recv(self.sock, size) if self.enable_gzip: response_packed = zlib.decompress(response_packed) response = msgpack.unpackb(response_packed) else: response = None if status == RESPONSE_STATUS_SUCCESS: return response elif status == RESPONSE_STATUS_REDIRECTION: print 'CrabDB is redirecting to', (response['host'], response['port']) if self.host_port == (response['host'], response['port']): print 'ERROR: redirecting to itself.' time.sleep(1) else: self.host_port = (response['host'], response['port']) raise socket.error() else: raise CrabException('CrabDB Exception %d: %s' % (status, response)) return except socket.error: if not self.__reconnect(): if not self.retry: return time.sleep(0.1)
def makeport(self): '''Create a new socket and send a PORT command for it.''' err = None sock = None for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) sock.bind(sa) except socket.error as _: err = _ if sock: sock.close() sock = None continue break if sock is None: if err is not None: raise err else: raise socket.error("getaddrinfo returns an empty list") raise socket.error(msg) sock.listen(1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock
def send(self, buf): self.ssl_want = None if not self.ssl: return super(AsyncHttpSocket, self).send(buf) r = None if not self.lastbuffer: try: r = self.socket.send(buf) except ssl.SSLError, e: if e.args[0] == ssl.SSL_ERROR_WANT_WRITE: log.warning("write_want_write") self.ssl_want = "write" self.lastbuffer = buf # -1: store the bytes for later return len(buf) # consume from asyncore elif e.args[0] == ssl.SSL_ERROR_WANT_READ: log.warning("write_want_read") self.ssl_want = "read" return 0 else: raise socket.error(e, r) else: if r < 0: raise socket.error("unknown -1 for ssl send", r) return r
def _tunnel(self): self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host, self._tunnel_port)) for header, value in self._tunnel_headers.iteritems(): self.send("%s: %s\r\n" % (header, value)) self.send("\r\n") response = self.response_class(self.sock, strict = self.strict, method = self._method) (version, code, message) = response._read_status() if version == "HTTP/0.9": # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has # concluded HTTP/0.9 is being used something has gone wrong. self.close() raise socket.error("Invalid response from tunnel request") if code != 200: self.close() raise socket.error("Tunnel connection failed: %d %s" % (code, message.strip())) while True: line = response.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") if not line: # for sites which EOF without sending trailer break if line == '\r\n': break
def InetPtoN(protocol, addr_string): """Convert ipv6 string to packed bytes. Args: protocol: socket.AF_INET or socket.AF_INET6 addr_string: IPv6 address string Returns: bytestring representing address Raises: socket.error: on bad IPv6 address format """ if protocol == socket.AF_INET: return socket.inet_aton(addr_string) if protocol != socket.AF_INET6: raise socket.error("Unsupported protocol") if not addr_string: raise socket.error("Empty address string") if BAD_SINGLE_COLON.match(addr_string): raise socket.error("Start or ends with single colon") if addr_string == "::": return ("0" * 32).decode("hex_codec") addr_string = _RemoveV4Ending(addr_string) addr_string = _StripLeadingOrTrailingDoubleColons(addr_string) addr_string = _ZeroPad(addr_string) try: return addr_string.decode("hex_codec") except TypeError: raise socket.error("Error decoding: %s" % addr_string)
def _ZeroPad(addr_string): """Pad out zeros in each address chunk as necessary.""" chunks = addr_string.split(":") total_length = len(chunks) if total_length > 8: raise socket.error( "Too many address chunks in %s, expected 8" % addr_string) double_colon = False addr_array = [] for chunk in chunks: if chunk: chunk_len = len(chunk) if chunk_len > 4: raise socket.error("Chunk must be length 4: %s" % addr_string) if chunk_len != 4: # Pad out with 0's until we have 4 digits chunk = "0" * (4 - chunk_len) + chunk addr_array.append(chunk) else: if double_colon: raise socket.error("More than one double colon in %s" % addr_string) else: double_colon = True # Add zeros for the compressed chunks addr_array.extend(["0000"] * (8 - total_length + 1)) if len(addr_array) != 8: raise socket.error("Bad address length, expected 8 chunks: %s" % addr_array) return "".join(addr_array)
def start_serving(self, connection_handler, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None): """XXX""" if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') AF_INET6 = getattr(socket, 'AF_INET6', 0) if reuse_address is None: reuse_address = os.name == 'posix' and sys.platform != 'cygwin' sockets = [] if host == '': host = None infos = yield from self.getaddrinfo( host, port, family=family, type=socket.SOCK_STREAM, proto=0, flags=flags) if not infos: raise socket.error('getaddrinfo() returned empty list') completed = False try: for res in infos: af, socktype, proto, canonname, sa = res sock = socket.socket(af, socktype, proto) sockets.append(sock) if reuse_address: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, True) try: sock.bind(sa) except socket.error as err: raise socket.error(err.errno, 'error while attempting ' 'to bind on address %r: %s' % (sa, err.strerror.lower())) completed = True finally: if not completed: for sock in sockets: sock.close() else: if sock is None: raise ValueError( 'host and port was not specified and no sock specified') sockets = [sock] for sock in sockets: sock.listen(backlog) sock.setblocking(False) self._start_serving(connection_handler, sock, ssl) return sockets
def write_to_socket(self, frame_data): """Write data to the socket. :param str frame_data: :return: """ self._lock.acquire() try: total_bytes_written = 0 bytes_to_send = len(frame_data) while total_bytes_written < bytes_to_send: try: if not self.socket: raise socket.error('connection/socket error') bytes_written = \ self.socket.send(frame_data[total_bytes_written:]) if bytes_written == 0: raise socket.error('connection/socket error') total_bytes_written += bytes_written except socket.timeout: pass except socket.error as why: if why.args[0] in (EWOULDBLOCK, EAGAIN): continue self._exceptions.append(AMQPConnectionError(why)) return finally: self._lock.release()
def callback(self, inputs, outputs, errors): try: for s in inputs: if s == self.server: try: conn, addr = self.server.accept() except socket.error as e: if e[0] == 24: # ulimit maxfiles, need to raise ulimit self._root.console_write('Maximum files reached, refused new connection.') else: raise socket.error(e) client = Client.Client(self._root, conn, addr, self._root.session_id) self.addClient(client) else: try: data = s.recv(4096) if data: if s in self.socketmap: # for threading, just need to pass this to a worker thread... remember to fix the problem for any calls to handler, and fix msg ids (handler.thread) self.socketmap[s].Handle(data) else: self._root.console_write('Problem, sockets are not being cleaned up properly.') else: raise socket.error('Connection closed.') except socket.error: self.removeSocket(s) for s in outputs: try: self.socketmap[s].FlushBuffer() except KeyError: self.removeSocket(s) except socket.error: self.removeSocket(s) except: self._root.error(traceback.format_exc())
def read_message(f): """Read a packet return msg received, if error happens, raise socket.error """ # Read two bytes: message version and length data = f.recv(8) if not data: raise socket.error('socket recv error') ver, length = unpack('!ii', data) # Read message body data = f.recv(length) if not data: raise socket.error('socket recv error') msg = OODict(cPickle.loads(data)) if 'payload_length' not in msg: # Simple message return msg # Read payload done_len = 0 payload = '' while True: data = f.recv(BUFFER_SIZE) if not data: raise socket.error('socket recv error') done_len += len(data) payload += data if done_len >= msg.payload_length: break debug('protocol.read_message: payload-length %d get %d', msg.payload_length, len(payload)) msg.payload = payload del msg['payload_length'] return msg
def _read_response(self): ''' Read response from the transport (socket) :return: tuple of the form (header, body) :rtype: tuple of two byte arrays ''' # Read response header buff_header = ctypes.create_string_buffer(12) nbytes = self._socket.recv_into(buff_header, 12) # Immediately raises an exception if the data cannot be read if nbytes != 12: raise socket.error(socket.errno.ECONNABORTED, "Software caused connection abort") # Extract body lenght from header body_length = struct_L.unpack(buff_header[4:8])[0] # Unpack body if it is not empty (i.e. not PING) if body_length != 0: buff_body = ctypes.create_string_buffer(body_length) nbytes = self._socket.recv_into(buff_body) # Immediately raises an exception if the data cannot be read if nbytes != body_length: raise socket.error(socket.errno.ECONNABORTED, "Software caused connection abort") else: buff_body = b"" return buff_header, buff_body
def get_socket(self, request, addrinfo, nonblocking): # yikes family, socktype, sockaddr = addrinfo[0], addrinfo[1], addrinfo[2:] ret = socket.socket(family, socktype) is_ssl = request.host_url.scheme.lower() == 'https' if nonblocking: ret.setblocking(0) if is_ssl: ret = ssl.wrap_socket(ret) try: conn_res = ret.connect_ex(sockaddr) except socket.error as se: conn_res = se.args[0] if conn_res: if conn_res not in (errno.EISCONN, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EALREADY): socket.error('Unknown', conn_res) # djb points out that some socket error conditions are only # visible with this 'one weird old trick' err = ret.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err: raise socket.error('Unknown', err) return ret
def test_hge_socket_error(self): utils.log.set_expected_msg(self.msg) utils.handle_gui_exception(socket.error(), self.msg, None) self.assertEqual(utils.log.expected_msg, self.msg)
def test_select(self, __select): ebadf = socket.error() ebadf.errno = errno.EBADF with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.return_value = {3}, set(), 0 assert asynpool._select({3}, poll=poll) == ({3}, set(), 0) poll.return_value = {3}, set(), 0 assert asynpool._select({3}, None, {3}, poll=poll) == ( {3}, set(), 0, ) eintr = socket.error() eintr.errno = errno.EINTR poll.side_effect = eintr readers = {3} assert asynpool._select(readers, poll=poll) == (set(), set(), 1) assert 3 in readers with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = ebadf with patch('select.select') as selcheck: selcheck.side_effect = ebadf readers = {3} assert asynpool._select(readers, poll=poll) == ( set(), set(), 1, ) assert 3 not in readers with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = MemoryError() with pytest.raises(MemoryError): asynpool._select({1}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') with patch('select.select') as selcheck: def se(*args): selcheck.side_effect = MemoryError() raise ebadf poll.side_effect = se with pytest.raises(MemoryError): asynpool._select({3}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') with patch('select.select') as selcheck: def se2(*args): selcheck.side_effect = socket.error() selcheck.side_effect.errno = 1321 raise ebadf poll.side_effect = se2 with pytest.raises(socket.error): asynpool._select({3}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = socket.error() poll.side_effect.errno = 34134 with pytest.raises(socket.error): asynpool._select({3}, poll=poll)
def __init__(self, host, connect_timeout, write_timeout=None, read_timeout=None): self.connected = True msg = None port = AMQP_PORT m = IPV6_LITERAL.match(host) if m: host = m.group(1) if m.group(2): port = int(m.group(2)) else: if ':' in host: host, port = host.rsplit(':', 1) port = int(port) self.sock = None last_err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, SOL_TCP): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) try: set_cloexec(self.sock, True) except NotImplementedError: pass self.sock.settimeout(connect_timeout) self.sock.connect(sa) except socket.error as exc: msg = exc self.sock.close() self.sock = None last_err = msg continue break if not self.sock: # Didn't connect, return the most recent error message raise socket.error(last_err) try: self.sock.settimeout(None) # set socket back to blocking mode self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # set socket timeouts for (timeout, interval) in ((socket.SO_SNDTIMEO, write_timeout), (socket.SO_RCVTIMEO, read_timeout)): if interval is not None: self.sock.setsockopt(socket.SOL_SOCKET, timeout, struct.pack('ll', interval, 0)) self._setup_transport() self._write(AMQP_PROTOCOL_HEADER) except (OSError, IOError, socket.error) as exc: if get_errno(exc) not in _UNAVAIL: self.connected = False raise
def listen(self, *args, **kwargs): self._actions.append(('listen', args, kwargs)) if self._fail == 'listen': raise socket.error("listen error occurred")
def bind(self, *args, **kwargs): self._actions.append(('bind', args, kwargs)) if self._fail == 'bind': raise socket.error("bind error occurred")
def setsockopt(self, *args, **kwargs): self._actions.append(('setsockopt', args, kwargs)) if self._fail == 'setsockopt': raise socket.error("setsockopt error occurred")
def _check_closed(self): if self._closed: raise socket.error(errno.EBADF, 'Bad file descriptor')
def se2(*args): selcheck.side_effect = socket.error() selcheck.side_effect.errno = 1321 raise ebadf
class TestSonosMediaPlayer(unittest.TestCase): """Test the media_player module.""" # pylint: disable=invalid-name def setUp(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() def monkey_available(self): """Make a monkey available.""" return True # Monkey patches self.real_available = sonos.SonosEntity.available sonos.SonosEntity.available = monkey_available # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" # Monkey patches sonos.SonosEntity.available = self.real_available self.hass.stop() @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) def test_ensure_setup_discovery(self, *args): """Test a single device using the autodiscovery provided by HASS.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entities = self.hass.data[sonos.DATA_SONOS].entities assert len(entities) == 1 assert entities[0].name == 'Kitchen' @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) @mock.patch('pysonos.discover') def test_ensure_setup_config_interface_addr(self, discover_mock, *args): """Test an interface address config'd by the HASS config file.""" discover_mock.return_value = {SoCoMock('192.0.2.1')} config = { DOMAIN: { CONF_PLATFORM: 'sonos', CONF_INTERFACE_ADDR: '192.0.1.1', } } assert setup_component(self.hass, DOMAIN, config) assert len(self.hass.data[sonos.DATA_SONOS].entities) == 1 assert discover_mock.call_count == 1 @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) def test_ensure_setup_config_hosts_string_single(self, *args): """Test a single address config'd by the HASS config file.""" config = { DOMAIN: { CONF_PLATFORM: 'sonos', CONF_HOSTS: ['192.0.2.1'], } } assert setup_component(self.hass, DOMAIN, config) entities = self.hass.data[sonos.DATA_SONOS].entities assert len(entities) == 1 assert entities[0].name == 'Kitchen' @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) def test_ensure_setup_config_hosts_string_multiple(self, *args): """Test multiple address string config'd by the HASS config file.""" config = { DOMAIN: { CONF_PLATFORM: 'sonos', CONF_HOSTS: ['192.0.2.1,192.168.2.2'], } } assert setup_component(self.hass, DOMAIN, config) entities = self.hass.data[sonos.DATA_SONOS].entities assert len(entities) == 2 assert entities[0].name == 'Kitchen' @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) def test_ensure_setup_config_hosts_list(self, *args): """Test a multiple address list config'd by the HASS config file.""" config = { DOMAIN: { CONF_PLATFORM: 'sonos', CONF_HOSTS: ['192.0.2.1', '192.168.2.2'], } } assert setup_component(self.hass, DOMAIN, config) entities = self.hass.data[sonos.DATA_SONOS].entities assert len(entities) == 2 assert entities[0].name == 'Kitchen' @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch.object(pysonos, 'discover', new=pysonosDiscoverMock.discover) @mock.patch('socket.create_connection', side_effect=socket.error()) def test_ensure_setup_sonos_discovery(self, *args): """Test a single device using the autodiscovery provided by Sonos.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass)) entities = self.hass.data[sonos.DATA_SONOS].entities assert len(entities) == 1 assert entities[0].name == 'Kitchen' @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) @mock.patch.object(SoCoMock, 'set_sleep_timer') def test_sonos_set_sleep_timer(self, set_sleep_timerMock, *args): """Ensure pysonos methods called for sonos_set_sleep_timer service.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entity = self.hass.data[sonos.DATA_SONOS].entities[-1] entity.hass = self.hass entity.set_sleep_timer(30) set_sleep_timerMock.assert_called_once_with(30) @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) @mock.patch.object(SoCoMock, 'set_sleep_timer') def test_sonos_clear_sleep_timer(self, set_sleep_timerMock, *args): """Ensure pysonos method called for sonos_clear_sleep_timer service.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entity = self.hass.data[sonos.DATA_SONOS].entities[-1] entity.hass = self.hass entity.set_sleep_timer(None) set_sleep_timerMock.assert_called_once_with(None) @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('pysonos.alarms.Alarm') @mock.patch('socket.create_connection', side_effect=socket.error()) def test_set_alarm(self, pysonos_mock, alarm_mock, *args): """Ensure pysonos methods called for sonos_set_sleep_timer service.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entity = self.hass.data[sonos.DATA_SONOS].entities[-1] entity.hass = self.hass alarm1 = alarms.Alarm(pysonos_mock) alarm1.configure_mock(_alarm_id="1", start_time=None, enabled=False, include_linked_zones=False, volume=100) with mock.patch('pysonos.alarms.get_alarms', return_value=[alarm1]): attrs = { 'time': datetime.time(12, 00), 'enabled': True, 'include_linked_zones': True, 'volume': 0.30, } entity.set_alarm(alarm_id=2) alarm1.save.assert_not_called() entity.set_alarm(alarm_id=1, **attrs) assert alarm1.enabled == attrs['enabled'] assert alarm1.start_time == attrs['time'] assert alarm1.include_linked_zones == \ attrs['include_linked_zones'] assert alarm1.volume == 30 alarm1.save.assert_called_once_with() @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) @mock.patch.object(pysonos.snapshot.Snapshot, 'snapshot') def test_sonos_snapshot(self, snapshotMock, *args): """Ensure pysonos methods called for sonos_snapshot service.""" sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entities = self.hass.data[sonos.DATA_SONOS].entities entity = entities[-1] entity.hass = self.hass snapshotMock.return_value = True entity.soco.group = mock.MagicMock() entity.soco.group.members = [e.soco for e in entities] sonos.SonosEntity.snapshot_multi(entities, True) assert snapshotMock.call_count == 1 assert snapshotMock.call_args == mock.call() @mock.patch('pysonos.SoCo', new=SoCoMock) @mock.patch('socket.create_connection', side_effect=socket.error()) @mock.patch.object(pysonos.snapshot.Snapshot, 'restore') def test_sonos_restore(self, restoreMock, *args): """Ensure pysonos methods called for sonos_restore service.""" from pysonos.snapshot import Snapshot sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {'host': '192.0.2.1'}) entities = self.hass.data[sonos.DATA_SONOS].entities entity = entities[-1] entity.hass = self.hass restoreMock.return_value = True entity._snapshot_group = mock.MagicMock() entity._snapshot_group.members = [e.soco for e in entities] entity._soco_snapshot = Snapshot(entity.soco) sonos.SonosEntity.restore_multi(entities, True) assert restoreMock.call_count == 1 assert restoreMock.call_args == mock.call()
def testIsPortFreeException(self): port = portpicker.pick_unused_port() with mock.patch.object(socket, 'socket') as mock_sock: mock_sock.side_effect = socket.error('fake socket error', 0) self.assertFalse(portpicker.is_port_free(port))
def proxy_ssl(self, host=None, port=None): if host and port: host = '%s:%d' % (host, port) else: host = '%s:%d' % (self.host, self.port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((self.proxy, int(self.proxy_port))) except: raise boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host) sock.sendall("CONNECT %s HTTP/1.0\r\n" % host) sock.sendall("User-Agent: %s\r\n" % UserAgent) if self.proxy_user and self.proxy_pass: for k, v in self.get_proxy_auth_header().items(): sock.sendall("%s: %s\r\n" % (k, v)) # See discussion about this config option at # https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False): sock.sendall("\r\n") else: sock.sendall("\r\n") resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug) resp.begin() if resp.status != 200: # Fake a socket error, use a code that make it obvious it hasn't # been generated by the socket library raise socket.error( -71, "Error talking to HTTP proxy %s:%s: %s (%s)" % (self.proxy, self.proxy_port, resp.status, resp.reason)) # We can safely close the response, it duped the original socket resp.close() h = httplib.HTTPConnection(host) if self.https_validate_certificates and HAVE_HTTPS_CONNECTION: boto.log.debug( "wrapping ssl socket for proxied connection; " "CA certificate file=%s", self.ca_certificates_file) key_file = self.http_connection_kwargs.get('key_file', None) cert_file = self.http_connection_kwargs.get('cert_file', None) sslSock = ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_certificates_file) cert = sslSock.getpeercert() hostname = self.host.split(':', 0)[0] if not https_connection.ValidateCertificateHostname( cert, hostname): raise https_connection.InvalidCertificateException( hostname, cert, 'hostname mismatch') else: # Fallback for old Python without ssl.wrap_socket if hasattr(httplib, 'ssl'): sslSock = httplib.ssl.SSLSocket(sock) else: sslSock = socket.ssl(sock, None, None) sslSock = httplib.FakeSocket(sock, sslSock) # This is a bit unclean h.sock = sslSock return h
def socks5_handler(sock, address, hls={'hmac':{}}): if not hls['hmac']: hls['hmac'] = dict((hmac.new(__password__, chr(x)).hexdigest(),x) for x in xrange(256)) bufsize = 8192 rfile = sock.makefile('rb', bufsize) wfile = sock.makefile('wb', 0) remote_addr, remote_port = address MessageClass = dict try: line = rfile.readline(bufsize) if not line: raise socket.error('empty line') method, path, version = line.rstrip().split(' ', 2) headers = MessageClass() while 1: line = rfile.readline(bufsize) if not line or line == '\r\n': break keyword, _, value = line.partition(':') keyword = keyword.title() value = value.strip() headers[keyword] = value logging.info('%s:%s "%s %s %s" - -', remote_addr, remote_port, method, path, version) if headers.get('Connection', '').lower() != 'upgrade': logging.error('%s:%s Connection(%s) != "upgrade"', remote_addr, remote_port, headers.get('Connection')) return m = re.search('([0-9a-f]{32})', path) if not m: logging.error('%s:%s Path(%s) not valid', remote_addr, remote_port, path) return need_digest = m.group(1) bitmask = hls['hmac'].get(need_digest) if bitmask is None: logging.error('%s:%s Digest(%s) not match', remote_addr, remote_port, need_digest) return else: logging.info('%s:%s Digest(%s) return bitmask=%r', remote_addr, remote_port, need_digest, bitmask) wfile.write('HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\n\r\n') wfile.flush() rfile_read = lambda n:''.join(chr(ord(x)^bitmask) for x in rfile.read(n)) wfile_write = lambda s:wfile.write(''.join(chr(ord(x)^bitmask) for x in s)) rfile_read(ord(rfile_read(2)[-1])) wfile_write(b'\x05\x00'); # 2. Request data = rfile_read(4) mode = ord(data[1]) addrtype = ord(data[3]) if addrtype == 1: # IPv4 addr = socket.inet_ntoa(rfile_read(4)) elif addrtype == 3: # Domain name addr = rfile_read(ord(rfile_read(1)[0])) port = struct.unpack('>H',rfile_read(2)) reply = b'\x05\x00\x00\x01' try: logging.info('%s:%s socks5 mode=%r', remote_addr, remote_port, mode) if mode == 1: # 1. TCP Connect remote = socket.create_connection((addr, port[0])) logging.info('%s:%s TCP Connect to %s:%s', remote_addr, remote_port, addr, port[0]) local = remote.getsockname() reply += socket.inet_aton(local[0]) + struct.pack(">H", local[1]) else: reply = b'\x05\x07\x00\x01' # Command not supported except socket.error: # Connection refused reply = '\x05\x05\x00\x01\x00\x00\x00\x00\x00\x00' wfile_write(reply) # 3. Transfering if reply[1] == '\x00': # Success if mode == 1: # 1. Tcp connect socket_forward(sock, remote, bitmask=bitmask) except socket.error as e: if e[0] not in (10053, errno.EPIPE, 'empty line'): raise finally: rfile.close() wfile.close() sock.close()
def test_collect__channel_raises_socket_error(self): self.conn.channels = self.conn.channels = {1: Mock(name='c1')} self.conn.channels[1].collect.side_effect = socket.error() self.conn.collect()
def bind(self, *pos, **kw): """ Implements proxy connection for UDP sockets, which happens during the bind() phase. """ if not self.proxy: return _orig_socket.bind(self, *pos, **kw) if self.proxy[-1][0] != SOCKS5: raise socket.error(EINVAL, 'Only SOCKS5 proxies supported') if self.type != socket.SOCK_DGRAM: bind_addr = pos[0] if type(bind_addr) is not tuple or len(bind_addr) != 2: raise socket.error(EINVAL, 'Bind address should be tuple') self._socks5_bind_addr = bind_addr self.close() return True if self._proxyconn: raise socket.error(EINVAL, "Socket already bound to an address") last_proxy = self.proxy[-1] last_proxy_type = last_proxy[0] if last_proxy_type != SOCKS5: msg = "UDP only supported by SOCKS5 proxy type" raise socket.error(EOPNOTSUPP, msg) _BaseSocket.bind(self, *pos, **kw) # Need to specify actual local port because # some relays drop packets if a port of zero is specified. # Avoid specifying host address in case of NAT though. _, port = self.getsockname() dst = ("0", port) while True: try: self._proxyconn = _orig_socket() self._connect_first(self._proxyconn) self._connect_rest(self._proxyconn) break except AuthenticationRequired: _orig_socket.__init__(self, *self._orig_args, **self._orig_kwargs) proxy = self.proxy[-1] properties = proxy[3:] UDP_ASSOCIATE = b"\x03" _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst, properties) # The relay is most likely on the same host as the SOCKS proxy, # but some proxies return a private IP address (10.x.y.z) _, port = relay if len(self.proxy) == 1: proxy_host, proxy_port = self._proxy_addr(proxy) _BaseSocket.connect(self, (proxy_host, port)) else: self._connect_first() self._connect_rest() self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def recv(self, bufsize=4096, flags=0): if self._raise_socket_error: raise socket.error(self._raise_socket_error) res = self._server_replies[0:bufsize] self._server_replies = self._server_replies[bufsize:] return res
def send(self, string, flags=0): if self._raise_socket_error: raise socket.error(self._raise_socket_error) self._client_sends.append(string) return len(string)
def connect(self, dest_pair): """ Connects to the specified destination through a proxy. Uses the same API as socket's connect(). To select the proxy server, use set_proxy(). dest_pair - 2-tuple of (IP/hostname, port). """ if len(dest_pair) != 2 or dest_pair[0].startswith("["): # Probably IPv6, not supported -- raise an error, and hope # Happy Eyeballs (RFC6555) makes sure at least the IPv4 # connection works... raise socket.error("PySocks doesn't support IPv6") dest_addr, dest_port = dest_pair if self.type == socket.SOCK_DGRAM: if not self._proxyconn: self.bind(("", 0)) dest_addr = socket.gethostbyname(dest_addr) # If the host address is INADDR_ANY or similar, reset the peer # address so that packets are received from any peer if dest_addr == "0.0.0.0" and not dest_port: self.proxy_peername = None else: self.proxy_peername = (dest_addr, dest_port) return proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy # Do a minimal input check first if (not isinstance(dest_pair, (list, tuple)) or len(dest_pair) != 2 or not dest_addr or not isinstance(dest_port, int)): raise GeneralProxyError( "Invalid destination-connection (host, port) pair") if proxy_type is None: # Treat like regular socket object self.proxy_peername = dest_pair _BaseSocket.connect(self, (dest_addr, dest_port)) return proxy_addr = self._proxy_addr() try: # Initial connection to proxy server _BaseSocket.connect(self, proxy_addr) except socket.error as error: # Error while connecting to proxy self.close() proxy_addr, proxy_port = proxy_addr proxy_server = "{0}:{1}".format(proxy_addr, proxy_port) printable_type = PRINTABLE_PROXY_TYPES[proxy_type] msg = "Error connecting to {0} proxy {1}".format( printable_type, proxy_server) raise ProxyConnectionError(msg, error) else: # Connected to proxy server, now negotiate try: # Calls negotiate_{SOCKS4, SOCKS5, HTTP} negotiate = self._proxy_negotiators[proxy_type] negotiate(self, dest_addr, dest_port) except socket.error as error: # Wrap socket errors self.close() raise GeneralProxyError("Socket error", error) except ProxyError: # Protocol error while negotiating with proxy self.close() raise
def get_fd_error(self): errno = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) return socket.error(errno, os.strerror(errno))
def neterror(*argv): err = net.error(*argv) xerr.register_wde_object(err) return err
sock.settimeout(self.conn_timeout) sock.connect(sa) return sock except socket.error, e: err = e if sock is not None: sock.close() if err is not None: raise err else: # This likely means we tried to connect to an IPv6 only # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. raise socket.error('getaddrinfo failed') def connect(self): """Connect to Mongo and return a new (connected) socket. Note that the pool does not keep a reference to the socket -- you must call return_socket() when you're done with it. """ sock = self.create_connection() hostname = self.pair[0] if self.use_ssl: try: sock = ssl.wrap_socket(sock, certfile=self.ssl_certfile, keyfile=self.ssl_keyfile, ca_certs=self.ssl_ca_certs,
def start(self): """This method fires up the daemon server based on initialized parameters of the class""" # initialize the server instance with given resources try: print("Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port=self.trex_daemon_port)) logger.info( "Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port=self.trex_daemon_port)) logger.info("current working dir is: {0}".format(self.TREX_PATH)) logger.info("current files dir is : {0}".format( self.trex_files_path)) logger.debug( "Starting TRex server. Registering methods to process.") logger.info(self.get_trex_version(base64=False)) self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port)) except socket.error as e: if e.errno == errno.EADDRINUSE: logger.error( "TRex server requested address already in use. Aborting server launching." ) print( "TRex server requested address already in use. Aborting server launching." ) raise socket.error( errno.EADDRINUSE, "TRex daemon requested address already in use. " "Server launch aborted. Please make sure no other process is " "using the desired server properties.") elif isinstance(e, socket.gaierror) and e.errno == -3: # handling Temporary failure in name resolution exception raise socket.gaierror( -3, "Temporary failure in name resolution.\n" "Make sure provided hostname has DNS resolving.") else: raise # set further functionality and peripherals to server instance self.server.register_function(self.add) self.server.register_function(self.get_devices_info) self.server.register_function(self.cancel_reservation) self.server.register_function(self.connectivity_check) self.server.register_function(self.connectivity_check, 'check_connectivity') # alias self.server.register_function(self.force_trex_kill) self.server.register_function(self.get_file) self.server.register_function(self.get_files_list) self.server.register_function(self.get_files_path) self.server.register_function(self.get_latest_dump) self.server.register_function(self.get_running_info) self.server.register_function(self.get_running_status) self.server.register_function(self.get_trex_cmds) self.server.register_function(self.get_trex_config) self.server.register_function(self.get_trex_config_metadata) self.server.register_function(self.get_trex_daemon_log) self.server.register_function(self.get_trex_log) self.server.register_function(self.get_trex_version) self.server.register_function(self.is_reserved) self.server.register_function(self.is_running) self.server.register_function(self.kill_all_trexes) self.server.register_function(self.push_file) self.server.register_function(self.reserve_trex) self.server.register_function(self.start_trex) self.server.register_function(self.stop_trex) self.server.register_function(self.wait_until_kickoff_finish) signal.signal(signal.SIGTSTP, self.stop_handler) signal.signal(signal.SIGTERM, self.stop_handler) try: self.zmq_monitor.start() self.server.serve_forever() except KeyboardInterrupt: logger.info("Daemon shutdown request detected.") finally: self.zmq_monitor.join() # close ZMQ monitor thread resources self.server.shutdown()
def send(self, payload): raise socket.error("Socket error")
def create_connection(dest_pair, proxy_type=None, proxy_addr=None, proxy_port=None, proxy_rdns=True, proxy_username=None, proxy_password=None, timeout=None, source_address=None, socket_options=None): """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object Like socket.create_connection(), but connects to proxy before returning the socket object. dest_pair - 2-tuple of (IP/hostname, port). **proxy_args - Same args passed to socksocket.set_proxy() if present. timeout - Optional socket timeout value, in seconds. source_address - tuple (host, port) for the socket to bind to as its source address before connecting (only for compatibility) """ # Remove IPv6 brackets on the remote address and proxy address. remote_host, remote_port = dest_pair if remote_host.startswith("["): remote_host = remote_host.strip("[]") if proxy_addr and proxy_addr.startswith("["): proxy_addr = proxy_addr.strip("[]") err = None # Allow the SOCKS proxy to be on IPv4 or IPv6 addresses. for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM): family, socket_type, proto, canonname, sa = r sock = None try: sock = socksocket(family, socket_type, proto) if socket_options: for opt in socket_options: sock.setsockopt(*opt) if isinstance(timeout, (int, float)): sock.settimeout(timeout) if proxy_type: sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns, proxy_username, proxy_password) if source_address: sock.bind(source_address) sock.connect((remote_host, remote_port)) return sock except (socket.error, ProxyConnectionError) as e: err = e if sock: sock.close() sock = None if err: raise err raise socket.error("gai returned empty list.")
def test_select(self): ebadf = socket.error() ebadf.errno = errno.EBADF with patch('select.select') as select: select.return_value = ([3], [], []) self.assertEqual( asynpool._select({3}), ([3], [], 0), ) select.return_value = ([], [], [3]) self.assertEqual( asynpool._select({3}, None, {3}), ([3], [], 0), ) eintr = socket.error() eintr.errno = errno.EINTR select.side_effect = eintr readers = {3} self.assertEqual(asynpool._select(readers), ([], [], 1)) self.assertIn(3, readers) with patch('select.select') as select: select.side_effect = ebadf readers = {3} self.assertEqual(asynpool._select(readers), ([], [], 1)) select.assert_has_calls([call([3], [], [], 0)]) self.assertNotIn(3, readers) with patch('select.select') as select: select.side_effect = MemoryError() with self.assertRaises(MemoryError): asynpool._select({1}) with patch('select.select') as select: def se(*args): select.side_effect = MemoryError() raise ebadf select.side_effect = se with self.assertRaises(MemoryError): asynpool._select({3}) with patch('select.select') as select: def se2(*args): select.side_effect = socket.error() select.side_effect.errno = 1321 raise ebadf select.side_effect = se2 with self.assertRaises(socket.error): asynpool._select({3}) with patch('select.select') as select: select.side_effect = socket.error() select.side_effect.errno = 34134 with self.assertRaises(socket.error): asynpool._select({3})
def recv(self, length: int) -> bytes: if self.sock is None: raise socket.error("not connected") return self.sock.recv(length)
def test_read_otio_file(self): # Install package temp_dir = self.create_temp_dir() source_package_path = create_rvpkg(temp_dir) install_package(source_package_path) env = os.environ.copy() env.update({'RV_SUPPORT_PATH': temp_dir}) sample_file = tempfile.NamedTemporaryFile('w', prefix='otio_data_', suffix='.otio', dir=temp_dir, delete=False) otio.adapters.write_to_file(sample_timeline, sample_file.name) run_cmd = '{root}/{exe} ' \ '-nc ' \ '-network ' \ '-networkHost localhost ' \ '-networkPort {port} ' \ '{sample_file}' \ .format( exe='RV' if platform.system() == 'Darwin' else 'rv', root=RV_BIN_DIR, port=9876, sample_file=sample_file.name ) proc = Popen(shlex.split(run_cmd), env=env) # Connect with RV rvc = rvNetwork.RvCommunicator() try: attempts = 0 while not rvc.connected: attempts += 1 rvc.connect('localhost', 9876) if not rvc.connected: time.sleep(.5) if attempts == 20: raise socket.error("Unable to connect to RV!") # some time can pass between the RV connection # and the complete startup of RV print("Waiting for RV startup to complete") time.sleep(10) # Check clips at positions clip1 = rv_media_name_at_frame(rvc, 1) self.assertEqual(clip1, 'clip1.mov') # note RV has a default res of 1280,720 when the media doesn't exist aspect_ratio = 1280.0 / 720.0 clip1_scale, clip1_translate = rv_transform_at_frame(rvc, 1) self.assertEqual(clip1_scale, [1.0, 1.0]) self.assertEqual(clip1_translate, [0.0, 0.0]) clip2 = rv_media_name_at_frame(rvc, 4) self.assertEqual(clip2, 'clip2.mov') clip2_scale, clip2_translate = rv_transform_at_frame(rvc, 4) self.assertEqual(clip2_scale, [1.0, 1.0]) self.assertAlmostEqual(clip2_translate[0], 0.5 * aspect_ratio) self.assertEqual(clip2_translate[1], 0) clip3 = rv_media_name_at_frame(rvc, 7) self.assertEqual(clip3, 'clip3.mov') clip3_scale, clip3_translate = rv_transform_at_frame(rvc, 7) self.assertEqual(clip3_scale, [0.5, 0.5]) self.assertAlmostEqual(clip3_translate[0], -0.25 * aspect_ratio) self.assertEqual(clip3_translate[1], -0.25) rvc.disconnect() finally: # Cleanup proc.terminate() shutil.rmtree(temp_dir)
def sendall(self, b: bytes) -> None: if self.sock is None: raise socket.error("not connected") self.sock.sendall(b)
def send_response(sock, dest, resp): raw_data = pickle.dumps(resp) if sock.sendto(raw_data, dest) != len(raw_data): raise socket.error("Transfer not completed.") return True
def test_memcache_server_check(self): with open(os.path.join(self.testdir, 'proxy-server.conf'), 'wb') as f: f.write('[DEFAULT]\n' '\n' '[filter:cache]\n' 'memcache_servers = 1.2.3.4:9998, 1.2.3.5:9999\n' '\n' '[filter:authtoken]\n' 'identity_uri = http://10.2.3.4:5000/\n' 'auth_uri = http://10.2.3.4:5000/\n') expected = [] # # fake pings succeed to 1.2.3.4 and fail to 1.2.3.5 # ping_args = {('ping -c 1 -A 1.2.3.4'): 0, # ('ping -c 1 -A 1.2.3.5'): 1} # fake_ping = make_fake_run_cmd_call(ping_args) # # scenarios = (('1.2.3.4', '_', Severity.ok, '1.2.3.4:_ ok'), # ('1.2.3.5', '_', Severity.fail, # '1.2.3.5:_ ping_check failed')) # for scenario in scenarios: # expected_dimensions = dict(self.expected_dimensions_base) # expected_dimensions.update({'hostname': scenario[0], # 'target_port': scenario[1]}) # if scenario[2] == Severity.fail: # expected_dimensions.update({'fail_message': # 'ping_check failed'}) # expected_value_meta = dict(msg=scenario[3]) # expected_metric = dict(self.expected_metric_base) # expected_metric['metric'] += '.ping_check' # expected_metric.update(dict(dimensions=expected_dimensions, # value=scenario[2], # value_meta=expected_value_meta)) # expected.append(expected_metric) # # fake memcache connections, one ok, one fails. # fake keystone connection succeeds connection_status = { ('10.2.3.4', '5000'): 0, ('1.2.3.4', '9998'): 0, ('1.2.3.5', '9999'): 0 } sendall_returns = { ('10.2.3.4', '5000'): 0, ('1.2.3.4', '9998'): 0, ('1.2.3.5', '9999'): socket.error('connection refused') } fake_create_connection = make_fake_create_connection( connection_status=connection_status, sendall_status=sendall_returns) scenarios = (('1.2.3.4', '9998', Severity.ok, '1.2.3.4:9998 ok'), ('1.2.3.5', '9999', Severity.fail, '1.2.3.5:9999 connection refused')) for scenario in scenarios: expected_dimensions = dict(self.expected_dimensions_base) expected_dimensions.update({ 'hostname': scenario[0], 'target_port': scenario[1] }) if scenario[2] == Severity.fail: expected_dimensions.update( {'fail_message': 'connection refused'}) expected_value_meta = dict(msg=scenario[3]) expected_metric = dict(self.expected_metric_base) expected_metric['metric'] += '.memcache_check' expected_metric.update( dict(dimensions=expected_dimensions, value=scenario[2], value_meta=expected_value_meta)) expected.append(expected_metric) scenarios = (('10.2.3.4', '5000', Severity.ok, '10.2.3.4:5000 ok'), ) for scenario in scenarios: expected_dimensions = dict(self.expected_dimensions_base) expected_dimensions.update({ 'hostname': scenario[0], 'target_port': scenario[1] }) expected_value_meta = dict(msg=scenario[3]) expected_metric = dict(self.expected_metric_base) expected_metric['metric'] += '.connect_check' expected_metric.update( dict(dimensions=expected_dimensions, value=scenario[2], value_meta=expected_value_meta)) expected.append(expected_metric) @patch(self.module + 'get_ring_hosts', fake_get_ring_hosts) @patch(self.module + 'server_type', lambda x: x == ServerType.proxy) # @patch(self.module + 'run_cmd', fake_ping) @patch(self.module + 'socket.create_connection', fake_create_connection) def do_it(): actual = connectivity.main() for metric in actual: metric_dict = metric.metric() self.assertTrue( metric_dict in expected, 'Unexpected result\n%s\nnot in:\n%s' % (metric_dict, expected)) expected.remove(metric_dict) self.assertFalse(expected, expected) do_it()
def test_send_message_error(self): self.sck.exception = socket.error() self.assertRaises(WriteFailed, self.writer.send_message, MSG_TYPE_STATUS, {MSG_KEY_STATUS: STATUS_RESYNC})