async def test_extended_stop(responder): resp, host, port = responder reader, writer = await asyncio.open_connection(host, port) writer.write(pynetstring.encode(b'test blackhole.loc')) writer.write(pynetstring.encode(b'test blackhole.loc')) writer.write(pynetstring.encode(b'test blackhole.loc')) await writer.drain() await asyncio.sleep(0.2) await resp.stop() assert await reader.read() == b'' writer.close()
async def send(self, descr) -> None: await self._connect() data = descr.encode("utf8") data = pynetstring.encode(data) self._writer.write(data)
async def test_corrupt_dialog(responder): resp, host, port = responder reader, writer = await asyncio.open_connection(host, port) msg = pynetstring.encode(b'test good.loc')[:-1] + b'!' writer.write(msg) assert await reader.read() == b'' writer.close()
async def test_cached(responder): resp, host, port = responder decoder = pynetstring.Decoder() reader, writer = await asyncio.open_connection(host, port) writer.write(pynetstring.encode(b'test good.loc')) writer.write(pynetstring.encode(b'test good.loc')) answers = [] try: while True: data = await reader.read(4096) assert data res = decoder.feed(data) if res: answers += res if len(answers) == 2: break assert answers[0] == answers[1] finally: writer.close()
async def test_fast_expire(responder): resp, host, port = responder decoder = pynetstring.Decoder() reader, writer = await asyncio.open_connection(host, port) async def answer(): while True: data = await reader.read(4096) assert data res = decoder.feed(data) if res: return res[0] try: writer.write(pynetstring.encode(b'test fast-expire.loc')) answer_a = await answer() await asyncio.sleep(2) writer.write(pynetstring.encode(b'test fast-expire.loc')) answer_b = await answer() assert answer_a == answer_b == b'OK secure match=mail.loc' finally: writer.close()
async def _handle_request(self, jrpc): method = jrpc["method"] if not method in self.__methods: raise JSON_RPC_Error(METHOD_NOT_FOUND, f"no such method '{method}'") m = self.__methods[method] #self._debug("\n".join("{}\t{}".format(k, v) for k, v in m.items())) nf = m.get("nf") if not nf and not "id" in jrpc: raise JSON_RPC_Error( NOT_NOTIFICATION, f"method '{method}' is not a notification" ) reqid = jrpc.get("id") if not "params" in jrpc: raise JSON_RPC_Error( INVALID_REQUEST, f"no parameters given for method '{method}'" ) params = jrpc["params"] if not (isinstance(params, dict) or isinstance(params, list)): raise JSON_RPC_Error(INVALID_REQUEST, f"params should be array or object") ret = { "jsonrpc": "2.0", "id": reqid } if m.get("magic"): # announced methods rpcswitch = jrpc["rpcswitch"] assert rpcswitch["vcookie"] == "eatme" ret["rpcswitch"] = rpcswitch try: mode = m["mode"] if mode == "async": res = [ RES_WAIT, await self._async_wrapper(rpcswitch, m["cb"], reqid, params), ] elif mode == "sync": res = [RES_OK, m["cb"](reqid, params)] elif mode == "subproc": res = [RES_ERROR, "mode subproc not implemented"] else: res = [RES_ERROR, f"invalid mode '{mode}'"] except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() res = [RES_ERROR, traceback.format_exception(exc_type, exc_value, exc_traceback)] else: # internal rpcswitch.* methods res = m["cb"](params) ret["result"] = res if nf: self._debug(f"N {ret!r}") return self._debug(f"W {ret!r}") self.writer.write(pynetstring.encode(json.dumps(ret).encode()))
async def query(host, port, domain): reader, writer = await asyncio.open_connection(host, port) decoder = pynetstring.Decoder() writer.write(pynetstring.encode(b'test ' + domain.encode('ascii'))) try: while True: data = await reader.read(4096) assert data res = decoder.feed(data) if res: return res[0] finally: writer.close()
async def test_responder(responder, params): (request, response), bufsize = params resp, host, port = responder decoder = pynetstring.Decoder() reader, writer = await asyncio.open_connection(host, port) try: writer.write(pynetstring.encode(request)) while True: data = await reader.read(bufsize) assert data res = decoder.feed(data) if res: assert res[0] == response break finally: writer.close()
def done(t): self._debug(f"in done handler for waitid {waitid} and task {t!s}") status = RES_OK try: result = t.result() except Exception: status = RES_ERROR exc_type, exc_value, exc_traceback = sys.exc_info() result = traceback.format_exception(exc_type, exc_value, exc_traceback) req = { "jsonrpc": "2.0", "method": "rpcswitch.result", "rpcswitch": rpcswitch, "params": [status, waitid, result], } self._debug(f"W {req!r}") self.writer.write(pynetstring.encode(json.dumps(req).encode()))
async def test_unix_responder(unix_responder, params): (request, response), bufsize = params resp, path = unix_responder assert os.stat(path).st_mode & 0o777 == 0o666 decoder = pynetstring.Decoder() reader, writer = await asyncio.open_unix_connection(path) try: writer.write(pynetstring.encode(request)) while True: data = await reader.read(bufsize) assert data res = decoder.feed(data) if res: assert res[0] == response break finally: writer.close()
async def call(self, method, params): f = self.__loop.create_future() self.__id += 1 reqid = (base64.b64encode( hashlib.md5( f"{self.__id}{method}{params!s}{id(f)}".encode("utf-8") ).digest() ).decode("utf-8").strip("=")) req = { "jsonrpc": "2.0", "method": method, "params": params, "id": reqid } self._debug(f"W {req!r}") self.__calls[reqid] = f self.writer.write(pynetstring.encode(json.dumps(req).encode())) return await f
async def test_responder_with_custom_socket(event_loop, responder, params): (request, response), bufsize = params resp, host, port = responder decoder = pynetstring.Decoder() sock = await utils.create_custom_socket(host, 0, flags=0, options=[(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]) await event_loop.run_in_executor(None, sock.connect, (host, port)) reader, writer = await asyncio.open_connection(sock=sock) try: writer.write(pynetstring.encode(request)) while True: data = await reader.read(bufsize) assert data res = decoder.feed(data) if res: assert res[0] == response break finally: writer.close()
def test_encode_one_byte_string(self): self.assertEqual(b'1:X,', netstring.encode('X')) self.assertEqual(b'1:X,', netstring.encode(b'X'))
async def process_request(self, raw_req): # Update local cache async def cache_set(domain, entry): try: await self._cache.set(domain, entry) except asyncio.CancelledError: # pragma: no cover pylint: disable=try-except-raise raise except Exception as exc: # pragma: no cover self._logger.exception("Cache set failed: %s", str(exc)) have_policy = True # Parse request and canonicalize domain req_zone, _, req_domain = raw_req.decode('latin-1').partition(' ') domain = req_domain # Skip lookups for parent domain policies # Skip lookups to non-recepient domains or non-domains at all if domain.startswith('.') or domain.startswith('[') or ':' in domain: return pynetstring.encode('NOTFOUND ') # Normalize domain name domain = req_domain.lower().strip().rstrip('.') # Find appropriate zone config if req_zone in self._zones: zone_cfg = self._zones[req_zone] else: zone_cfg = self._default_zone # Lookup for cached policy try: cached = await self._cache.get(domain) except asyncio.CancelledError: # pragma: no cover pylint: disable=try-except-raise raise except Exception as exc: # pragma: no cover self._logger.exception("Cache get failed: %s", str(exc)) cached = None ts = time.time() # pylint: disable=invalid-name # Check if cached record exists and recent enough to omit # DNS lookup and cache update if cached is None or ts - cached.ts > self._grace: self._logger.debug("Lookup PERFORMED: domain = %s", domain) # Check if newer policy exists or # retrieve policy from scratch if there is no cached one latest_pol_id = None if cached is None else cached.pol_id status, policy = await zone_cfg.resolver.resolve( domain, latest_pol_id) if status is STSFetchResult.NOT_CHANGED: cached = CacheEntry(ts, cached.pol_id, cached.pol_body) await cache_set(domain, cached) elif status is STSFetchResult.VALID: pol_id, pol_body = policy cached = CacheEntry(ts, pol_id, pol_body) await cache_set(domain, cached) else: if cached is None: have_policy = False else: # Check if cached policy is expired if cached.pol_body['max_age'] + cached.ts < ts: have_policy = False else: self._logger.debug("Lookup skipped: domain = %s", domain) if have_policy: mode = cached.pol_body['mode'] # pylint: disable=no-else-return if mode == 'none' or (mode == 'testing' and not zone_cfg.strict): return pynetstring.encode('NOTFOUND ') else: assert cached.pol_body[ 'mx'], "Empty MX list for restrictive policy!" mxlist = [mx.lstrip('*') for mx in set(cached.pol_body['mx'])] resp = "OK secure match=" + ":".join(mxlist) return pynetstring.encode(resp) else: return pynetstring.encode('NOTFOUND ')
def handle(self): self.data = pynetstring.decode(self.request.recv(1024))[0] data_parts = self.data.decode().split(' ') if len(data_parts) != 2: self.request.sendall(pynetstring.encode('PERM Invalid request')) return # We don't really care about the name of the request/lookup # lookup_name = parts[0] email_parts = data_parts[1].split('@') if len(email_parts) == 1: # Postfix does various lookups, not just with the to-address of the # received email. We'll just act like we don't know anything about them. self.request.sendall(pynetstring.encode('NOTFOUND ')) return elif len(email_parts) != 2: # Either an empty key or several '@'... doesn't seem right. self.request.sendall(pynetstring.encode('PERM Invalid request')) return alias = email_parts[0] # Ignore lookup requests for other domains than our own if email_parts[1] != django.conf.settings.EMAIL_DOMAIN: self.request.sendall(pynetstring.encode('NOTFOUND ')) return # If it's a bounce address, send it to the 'list-bounces' list if bounce_pattern.match(alias) is not None: alias = 'list-bounces' found = False try: email_list = models.EmailList.objects.get(alias=alias) found = True except models.EmailList.DoesNotExist: found = False if found == False: # Perhaps it's a username try: user = User.objects.annotate( username_lower=django.db.models.functions.Lower( 'username')).get(username_lower=alias.lower()) if user.person is not None and user.person.email is not None and user.person.email != '': reply = 'OK {}'.format(user.person.email) self.request.sendall(pynetstring.encode(reply)) return else: found = False except User.DoesNotExist: found = False if found == False: # Perhaps it matches some common (and uncommon) misspellings try: email_list = models.EmailList.objects.get( alias=spellcorrect(alias)) found = True except models.EmailList.DoesNotExist: found = False if found == False: self.request.sendall(pynetstring.encode('NOTFOUND ')) return addresses = email_list.get_recipients_email() if len(addresses) == 0: self.request.sendall( pynetstring.encode('TEMP No recipients in list')) return reply = 'OK {}'.format(','.join(addresses)) self.request.sendall(pynetstring.encode(reply))
def process_output_package(self, output_package): data_endpoint_list = [] message_data = pickle.dumps(output_package.message) data = pynetstring.encode(message_data) return data, output_package.endpoint_list
async def test_early_disconnect(responder): resp, host, port = responder reader, writer = await asyncio.open_connection(host, port) writer.write(pynetstring.encode(b'test good.loc')) writer.close()
def test_encode_sequence(self): self.assertEqual([b'3:foo,', b'3:bar,'], netstring.encode(['foo', 'bar']))
def test_encode_empty_string(self): self.assertEqual(b'0:,', netstring.encode('')) self.assertEqual(b'0:,', netstring.encode(b''))
async def _handle(self): self._debug('in _handle') while True: # self._debug(f"waiting for data with timeout {self.ping_timeout}") try: data = await asyncio.wait_for(self.reader.read(10000), timeout=self.ping_timeout) except asyncio.TimeoutError: self._logger.error('_handle: timeout waiting for work and/or ping') await asyncio.wait_for(self.close(), timeout=10.0) return WORK_PING_TIMEOUT if not data: # eof? self._logger.error("_handle: connection closed") return WORK_CONNECTION_CLOSED # self._debug(f"_handle got data: {data!r}") decoded_list = self._decoder.feed(data) # FIXME: try? for item in decoded_list: reqid = None try: try: jrpc = json.loads(item.decode()) except json.JSONDecodeError: raise JSON_RPC_Error(PARSE_ERROR, "invalid JSON") self._debug(f"R {jrpc!r}") if not isinstance(jrpc, dict): raise JSON_RPC_Error(INVALID_REQUEST, "not a JSON object") if jrpc.get("jsonrpc", "") != "2.0": raise JSON_RPC_Error(INVALID_REQUEST, "not JSON-RPC 2.0") if "id" in jrpc: reqid = jrpc["id"] if reqid and not ( isinstance(reqid, str) or isinstance(reqid, int) ): raise JSON_RPC_Error( INVALID_REQUEST, "id is not a string or number" ) if "method" in jrpc: try: await self._handle_request(jrpc) except JSON_RPC_Error: raise except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() raise RPC_Switch_Client_Error(traceback.format_exception(exc_type, exc_value, exc_traceback)) elif reqid and ("result" in jrpc or "error" in jrpc): self._handle_response(jrpc) else: raise JSON_RPC_Error( INVALID_REQUEST, "invalid JSON_RPC object!" ) except JSON_RPC_Error as err: res = { "jsonrpc": "2.0", "id": reqid, "error": { "code": err.code, "message": err.message } } if err.data: res["error"]["data"] = data self._debug(f"E {res!r}") self.writer.write(pynetstring.encode(json.dumps(res).encode()))