def test_h2_712_03(self, env):
     # same as 712_02 but with smaller chunks
     #
     url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo")
     base_chunk = "0"
     chunks = ["ck{0}-{1}\n".format(i, base_chunk) for i in range(3)]
     stutter = timedelta(seconds=0.4)  # need a bit more delay since we have the extra connection
     piper = CurlPiper(env=env, url=url)
     piper.stutter_check(chunks, stutter)
 def test_h2_712_02(self, env):
     # same as 712_01 but via mod_proxy_http2
     #
     url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo")
     base_chunk = "0123456789"
     chunks = ["chunk-{0:03d}-{1}\n".format(i, base_chunk) for i in range(3)]
     stutter = timedelta(seconds=0.4)  # need a bit more delay since we have the extra connection
     piper = CurlPiper(env=env, url=url)
     piper.stutter_check(chunks, stutter)
Beispiel #3
0
 def test_h2_105_10(self, env):
     # just a check without delays if all is fine
     conf = H2Conf(env)
     conf.add_vhost_cgi()
     conf.install()
     assert env.apache_restart() == 0
     url = env.mkurl("https", "cgi", "/h2test/delay")
     piper = CurlPiper(env=env, url=url)
     piper.start()
     stdout, stderr = piper.close()
     assert piper.exitcode == 0
     assert len("".join(stdout)) == 3 * 8192
Beispiel #4
0
 def test_h2_105_11(self, env):
     # short connection timeout, longer stream delay
     # receiving the first response chunk, then timeout
     conf = H2Conf(env)
     conf.add_vhost_cgi()
     conf.add("Timeout 1")
     conf.install()
     assert env.apache_restart() == 0
     url = env.mkurl("https", "cgi", "/h2test/delay?5")
     piper = CurlPiper(env=env, url=url)
     piper.start()
     stdout, stderr = piper.close()
     assert len("".join(stdout)) == 8192
 def test_h2_712_01(self, env):
     # test gRPC like requests that do not end, but give answers, see #207
     #
     # this test works like this:
     # - use curl to POST data to the server /h2test/echo
     # - feed curl the data in chunks, wait a bit between chunks
     # - since some buffering on curl's stdout to Python is involved,
     #   we will see the response data only at the end.
     # - therefore, we enable tracing with timestamps in curl on stderr
     #   and see when the response chunks arrive
     # - if the server sends the incoming data chunks back right away,
     #   as it should, we see receiving timestamps separated roughly by the
     #   wait time between sends.
     #
     url = env.mkurl("https", "cgi", "/h2test/echo")
     base_chunk = "0123456789"
     chunks = ["chunk-{0:03d}-{1}\n".format(i, base_chunk) for i in range(5)]
     stutter = timedelta(seconds=0.2)  # this is short, but works on my machine (tm)
     piper = CurlPiper(env=env, url=url)
     piper.stutter_check(chunks, stutter)
Beispiel #6
0
 def test_h2_105_12(self, env):
     # long connection timeout, short stream timeout
     # sending a slow POST
     conf = H2Conf(env)
     conf.add_vhost_cgi()
     conf.add("Timeout 10")
     conf.add("H2StreamTimeout 1")
     conf.install()
     assert env.apache_restart() == 0
     url = env.mkurl("https", "cgi", "/h2test/delay?5")
     piper = CurlPiper(env=env, url=url)
     piper.start()
     for _ in range(3):
         time.sleep(2)
         try:
             piper.send("0123456789\n")
         except BrokenPipeError:
             break
     piper.close()
     assert piper.response
     assert piper.response['status'] == 408