def frontend_server(tmpdir, user): # type: (LocalPath, str) -> Iterator[str] proxy_socket = _bind_socket() proxy_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) proxy_port = proxy_socket.getsockname()[1] fe_socket = _bind_socket() fe_port = fe_socket.getsockname()[1] proxy_cmd = [ sys.executable, src_path("bin", "grouper-ctl"), "-vvc", src_path("config", "test.yaml"), "user_proxy", "-P", str(fe_port), "-p", str(proxy_port), user, ] fe_cmd = [ sys.executable, src_path("bin", "grouper-fe"), "-vvc", src_path("config", "test.yaml"), "-d", db_url(tmpdir), "--listen-stdin", ] subprocesses = [] logging.info("Starting command: %s", " ".join(fe_cmd)) fe_process = subprocess.Popen(fe_cmd, env=bin_env(), stdin=fe_socket.fileno()) subprocesses.append(fe_process) fe_socket.close() # TODO(rra): There is a race condition here because grouper-ctl user_proxy doesn't implement # --listen-stdin yet, which in turn is because the built-in Python HTTPServer doesn't support # wrapping a pre-existing socket. Since we have to close the socket so that grouper-ctl # user_proxy can re-open it, something else might grab it in the interim. Once it is rewritten # using Tornado, it can use the same approach as the frontend and API servers and take an open # socket on standard input. At that point, we can also drop the SO_REUSEADDR above, which is # there to protect against the race condition. logging.info("Starting command: %s", " ".join(proxy_cmd)) proxy_socket.close() proxy_process = subprocess.Popen(proxy_cmd, env=bin_env()) subprocesses.append(proxy_process) logging.info("Waiting on server to come online") _wait_until_accept(fe_port) _wait_until_accept(proxy_port) logging.info("Connection established") yield "http://localhost:{}".format(proxy_port) for p in subprocesses: p.terminate()
def frontend_server(tmpdir, user): # type: (LocalPath, str) -> Iterator[str] proxy_socket = _bind_socket() proxy_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) proxy_port = proxy_socket.getsockname()[1] fe_socket = _bind_socket() fe_port = fe_socket.getsockname()[1] proxy_cmd = [ sys.executable, src_path("bin", "grouper-ctl"), "-vvc", src_path("config", "dev.yaml"), "user_proxy", "-P", str(fe_port), "-p", str(proxy_port), user, ] fe_cmd = [ sys.executable, src_path("bin", "grouper-fe"), "-vvc", src_path("config", "dev.yaml"), "-d", db_url(tmpdir), "--listen-stdin", ] subprocesses = [] logging.info("Starting command: %s", " ".join(fe_cmd)) fe_process = subprocess.Popen(fe_cmd, env=bin_env(), stdin=fe_socket.fileno()) subprocesses.append(fe_process) fe_socket.close() # TODO(rra): There is a race condition here because grouper-ctl user_proxy doesn't implement # --listen-stdin yet, which in turn is because the built-in Python HTTPServer doesn't support # wrapping a pre-existing socket. Since we have to close the socket so that grouper-ctl # user_proxy can re-open it, something else might grab it in the interim. Once it is rewritten # using Tornado, it can use the same approach as the frontend and API servers and take an open # socket on standard input. At that point, we can also drop the SO_REUSEADDR above, which is # there to protect against the race condition. logging.info("Starting command: %s", " ".join(proxy_cmd)) proxy_socket.close() proxy_process = subprocess.Popen(proxy_cmd, env=bin_env()) subprocesses.append(proxy_process) logging.info("Waiting on server to come online") _wait_until_accept(fe_port) _wait_until_accept(proxy_port) logging.info("Connection established") yield "http://localhost:{}".format(proxy_port) for p in subprocesses: p.kill()
def api_server(tmpdir): # type: (LocalPath) -> Iterator[str] api_socket = _bind_socket() api_port = api_socket.getsockname()[1] cmd = [ sys.executable, src_path("bin", "grouper-api"), "-vvc", src_path("config", "test.yaml"), "-d", db_url(tmpdir), "--listen-stdin", ] logging.info("Starting server with command: %s", " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env(), stdin=api_socket.fileno()) api_socket.close() logging.info("Waiting on server to come online") _wait_until_accept(api_port) logging.info("Connection established") yield "localhost:{}".format(api_port) p.terminate()
def api_server(tmpdir): # type: (LocalPath) -> Iterator[str] api_port = _get_unused_port() cmd = [ sys.executable, src_path("bin", "grouper-api"), "-c", src_path("config", "dev.yaml"), "-p", str(api_port), "-d", db_url(tmpdir), ] logging.info("Starting server with command: %s", " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env()) logging.info("Waiting on server to come online") _wait_until_accept(api_port) logging.info("Connection established") yield "localhost:{}".format(api_port) p.kill()
def api_server(tmpdir): # type: (LocalPath) -> Iterator[str] api_socket = _bind_socket() api_port = api_socket.getsockname()[1] cmd = [ sys.executable, src_path("bin", "grouper-api"), "-vvc", src_path("config", "dev.yaml"), "-d", db_url(tmpdir), "--listen-stdin", ] logging.info("Starting server with command: %s", " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env(), stdin=api_socket.fileno()) api_socket.close() logging.info("Waiting on server to come online") _wait_until_accept(api_port) logging.info("Connection established") yield "localhost:{}".format(api_port) p.kill()
def frontend_server(tmpdir, user): # type: (LocalPath, str) -> Iterator[str] proxy_port = _get_unused_port() fe_port = _get_unused_port() cmds = [ [ sys.executable, src_path("bin", "grouper-ctl"), "-vvc", src_path("config", "dev.yaml"), "user_proxy", "-P", str(fe_port), "-p", str(proxy_port), user, ], [ sys.executable, src_path("bin", "grouper-fe"), "-vvc", src_path("config", "dev.yaml"), "-p", str(fe_port), "-d", db_url(tmpdir), ], ] subprocesses = [] for cmd in cmds: logging.info("Starting command: %s", " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env()) subprocesses.append(p) logging.info("Waiting on server to come online") _wait_until_accept(proxy_port) _wait_until_accept(fe_port) logging.info("Connection established") yield "http://localhost:{}".format(proxy_port) for p in subprocesses: p.kill()
def async_server(standard_graph, tmpdir): # type: (GroupGraph, LocalPath) -> Iterator[str] proxy_port = _get_unused_port() fe_port = _get_unused_port() cmds = [ [ sys.executable, src_path("bin", "grouper-ctl"), "-vvc", src_path("config", "dev.yaml"), "user_proxy", "-P", str(fe_port), "-p", str(proxy_port), "*****@*****.**", ], [ sys.executable, src_path("bin", "grouper-fe"), "-c", src_path("config", "dev.yaml"), "-p", str(fe_port), "-d", db_url(tmpdir), ], ] subprocesses = [] for cmd in cmds: print("Starting command: " + " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env()) subprocesses.append(p) print("Waiting on server to come online") wait_until_accept(proxy_port) print("Connection established") yield "http://localhost:{}".format(proxy_port) for p in subprocesses: p.kill()
def async_api_server(standard_graph, tmpdir): api_port = _get_unused_port() cmd = [ src_path("bin", "grouper-api"), "-c", src_path("config", "dev.yaml"), "-p", str(api_port), "-d", db_url(tmpdir), ] print("Starting server with command: " + " ".join(cmd)) p = subprocess.Popen(cmd, env=bin_env()) print("Waiting on server to come online") wait_until_accept(api_port) print("Connection established") yield "localhost:{}".format(api_port) p.kill()
def test_api(): # type: () -> None bin_path = src_path("bin", "grouper-api") out = subprocess.check_output([bin_path, "--help"], env=bin_env()) assert out.startswith("usage: grouper-api")
def test_fe(): # type: () -> None bin_path = src_path("bin", "grouper-fe") out = subprocess.check_output([sys.executable, bin_path, "--help"], env=bin_env()) assert out.decode().startswith("usage: grouper-fe")
def test_fe(): # type: () -> None bin_path = src_path("bin", "grouper-fe") out = subprocess.check_output([sys.executable, bin_path, "--help"], env=bin_env()) assert out.decode().startswith("usage: grouper-fe")