def test_gunicorn_timeout(tmpdir): def test_app(): import time def app(environ, start_response): start_response('200 OK', [('content-type', 'text/plain')]) time.sleep(100) return [] app_module = str(tmpdir / 'app.py') with open(app_module, 'w') as f: f.write(get_function_body(test_app)) # ensure devel mode env = os.environ.copy() env['DEVEL'] = '1' p = GunicornProcess('app:app', args=['-t1'], cwd=str(tmpdir), env=env) with p: response = requests.get( p.url('/'), headers={ 'Accept': 'application/json' }, ).json() assert response['title'].startswith( 'RequestTimeout: gunicorn worker timeout (pid:')
def test_gunicorn_clears_context(): app = __name__ + ':counter_app' pr = GunicornProcess(app, args=['--worker-class=sync']) with pr: r1 = requests.get(pr.url('/')).json() r2 = requests.get(pr.url('/')).json() r3 = requests.get(pr.url('/')).json() assert r1['1'] == '1' assert r2['2'] == '2' assert '1' not in r2 assert r3['3'] == '3' assert '2' not in r3
def test_django_app(monkeypatch): env = os.environ.copy() env['PYTHONPATH'] = 'tests/django_app/' with GunicornProcess('tests.django_app.django_app.wsgi:application', env=env) as p: response = requests.get(p.url('/')) assert response.status_code == 200
def test_multiprocess_metrics(tmpdir): from prometheus_client.parser import text_string_to_metric_families def get_count(response): for family in text_string_to_metric_families(response.text): if family.name == 'test': return family.samples[0][2] # ensure we isolate multiprocess metrics env = os.environ.copy() env.pop('prometheus_multiproc_dir', None) with GunicornProcess(APP, args=['-w', '2'], env=env) as p: inc = p.url('/_status/test/prometheus') read = p.url('/_status/metrics') response = requests.get(read) initial = get_count(response) if initial is None: initial = 0 for i in range(1, 3): requests.get(inc) # try ensure the update is written before we read it sleep(1) response = requests.get(read) assert get_count(response) == float(initial + i)
def test_gunicorn_prometheus_cleanup(caplog): caplog.set_level(logging.INFO) app = __name__ + ':counter_app' server = GunicornProcess(app, args=['--worker-class=sync', '-w', '16']) def increment(n): for i in range(n): requests.get(server.url('/_status/test/prometheus')) def files(): return list(sorted(os.listdir(os.environ['prometheus_multiproc_dir']))) def stats(): return requests.get(server.url('/_status/metrics')).text with server: increment(1000) assert 'test 1000.0' in stats() assert len(files()) > 3 os.kill(server.ps.pid, signal.SIGHUP) time.sleep(2.0) assert 'test 1000.0' in stats() assert len(files()) == 3 # archives plus the bugged master file increment(1000) assert 'test 2000.0' in stats() assert len(files()) > 3 os.kill(server.ps.pid, signal.SIGHUP) time.sleep(2.0) assert 'test 2000.0' in stats() assert len(files()) == 3 # archives plus the bugged master file
def test_flask_app(): try: import flask # noqa except ImportError: pytest.skip('need flask installed') with GunicornProcess('tests.flask_app:app') as p: response = requests.get(p.url('/')) assert response.status_code == 200 assert response.headers['X-View-Name'] == 'tests.flask_app.index'
def test_gunicorn_status_interface(): args = ['--bind', '127.0.0.2:0'] # additional bind env = os.environ.copy() env['TALISKER_REVISION_ID'] = 'test-rev-id' env['TALISKER_STATUS_INTERFACE'] = '127.0.0.2' with GunicornProcess('tests.wsgi_app:app404', args=args, env=env) as p: resp1 = requests.get(p.url('/_status/check', iface='127.0.0.1')) resp2 = requests.get(p.url('/_status/check', iface='127.0.0.2')) assert resp1.status_code == 404 assert resp1.text == 'Not Found' assert resp2.status_code == 200 assert resp2.text == 'test-rev-id\n'
def test_django_app(django): try: import django # noqa except ImportError: pytest.skip('need django installed') app = 'tests.django_app.django_app.wsgi:application' with GunicornProcess(app) as p: response = requests.get(p.url('/')) assert response.status_code == 200 assert response.headers['X-View-Name'] == 'django_app.views.index'
def test_prometheus_lock_timeouts(tmpdir): def test_app(): from talisker import prometheus prometheus.setup_prometheus_multiproc(async_mode=False) prometheus._lock.acquire() def app(environ, start_response): try: with prometheus.try_prometheus_lock(0.5): result = b'no timeout' except prometheus.PrometheusLockTimeout: result = b'timeout' start_response('200 OK', [('content-type', 'text/plain')]) return [result] app_module = str(tmpdir / 'app.py') with open(app_module, 'w') as f: f.write(get_function_body(test_app)) with GunicornProcess('app:app', args=['-w', '2'], cwd=str(tmpdir)) as p: response = requests.get(p.url('/')) assert response.text == 'timeout'
def test_flask_app(): with GunicornProcess('tests.flask_app:app') as p: response = requests.get(p.url('/')) assert response.status_code == 200
def test_gunicorn_eventlet_worker(): with GunicornProcess(APP, args=['--worker-class=eventlet']) as p: response = requests.get(p.url('/')) assert response.status_code == 200
def test_gunicorn_prometheus_cleanup(caplog): caplog.set_level(logging.INFO) app = __name__ + ':counter_app' workers = 8 server = GunicornProcess( app, args=['--worker-class=sync', '-w', str(workers)]) def increment(n): for i in range(n): requests.get(server.url('/_status/test/prometheus')) def files(pid): pid = str(pid) pid_files = set() archives = set() for path in os.listdir(os.environ['prometheus_multiproc_dir']): # ignore master pids if pid in path: continue if '_archive.db' in path: archives.add(path) else: pid_files.add(path) return archives, pid_files def stats(): return requests.get(server.url('/_status/metrics')).text name = counter_name('test_total') valid_archives = set(['counter_archive.db', 'histogram_archive.db']) sleep_factor = 1 if os.environ.get('CI') == 'true': # travis is slow sleep_factor = 20 with server: # forking can be really slow on travis, so make sure *all* the workers # have had time to spin up before running the test time.sleep(1 * sleep_factor) increment(2000) archives, pid_files_1 = files(server.ps.pid) assert len(archives) == 0 # different number of files depending on prometheus_client version # so we assert against 1x or 2x workers assert len(pid_files_1) in (workers, 2 * workers) assert name + ' 2000.0' in stats() os.kill(server.ps.pid, signal.SIGHUP) time.sleep(2 * sleep_factor) archives, pid_files_2 = files(server.ps.pid) assert archives == valid_archives assert pid_files_1.isdisjoint(pid_files_2) assert name + ' 2000.0' in stats() increment(2000) assert name + ' 4000.0' in stats() archives, pid_files_3 = files(server.ps.pid) assert archives == valid_archives assert len(pid_files_3) in (workers, 2 * workers) os.kill(server.ps.pid, signal.SIGHUP) time.sleep(2 * sleep_factor) archives, pid_files_4 = files(server.ps.pid) assert archives == valid_archives assert pid_files_3.isdisjoint(pid_files_4) assert name + ' 4000.0' in stats()
def test_flask_app(): with GunicornProcess('tests.flask_app:app') as p: response = requests.get(p.url('/')) assert response.status_code == 200 assert response.headers['X-View-Name'] == 'tests.flask_app.index'