def sql_c(remote, show_all, json, query): """ Run SQL on or connect to the Splitgraph Data Delivery Network. If a query isn't passed, this will return a libpq-compatible connection string to the registry's SQL endpoint. It can be used to connect to the endpoint with other SQL clients: ``` pgcli $(sgr cloud sql) ``` If a query is passed, this will run an SQL query against the SQL endpoint. """ ddn_params = _get_ddn_conn_params(remote) from splitgraph.engine.postgres.engine import get_conn_str, PostgresEngine if not query: click.echo(get_conn_str(ddn_params)) return # Build an engine to connect to the DDN, disable pre-flight API checks etc engine = PostgresEngine(name=remote, conn_params=ddn_params, registry=False, check_version=False) try: results = engine.run_sql(query) emit_sql_results(results, use_json=json, show_all=show_all) finally: engine.close()
def test_commandline_engine_creation_list_stop_deletion(teardown_test_engine): runner = CliRunner() client = docker.from_env() # Create an engine with default password and wait for it to initialize result = runner.invoke( add_engine_c, [ "--image", _get_test_engine_image(), "--no-pull", "--port", "5428", "--username", "not_sgr", "--no-sgconfig", TEST_ENGINE_NAME, ], input="notsosecure\nnotsosecure\n", ) assert result.exit_code == 0 # Connect to the engine to check that it's up conn_params = { "SG_ENGINE_HOST": "localhost", "SG_ENGINE_PORT": "5428", "SG_ENGINE_USER": "******", "SG_ENGINE_PWD": "notsosecure", "SG_ENGINE_DB_NAME": "splitgraph", "SG_ENGINE_POSTGRES_DB_NAME": "postgres", "SG_ENGINE_ADMIN_USER": "******", "SG_ENGINE_ADMIN_PWD": "notsosecure", } engine = PostgresEngine(name="test", conn_params=conn_params) assert engine.run_sql("SELECT * FROM splitgraph_meta.images") == [] engine.close() # List running engines result = runner.invoke(list_engines_c) assert result.exit_code == 0 assert TEST_ENGINE_NAME in result.stdout assert "running" in result.stdout # Check engine version # (we didn't put it into the .sgconfig so have to patch instead) with patch("splitgraph.engine.get_engine", return_value=engine): result = runner.invoke(version_engine_c, [TEST_ENGINE_NAME]) assert result.exit_code == 0 assert __version__ in result.stdout # Get engine logs (no --follow since we won't be able to interrupt it) result = runner.invoke(log_engine_c, [TEST_ENGINE_NAME]) assert "database system is ready to accept connections" in result.stdout # Try deleting the engine while it's still running with pytest.raises(docker.errors.APIError): runner.invoke(delete_engine_c, ["-y", TEST_ENGINE_NAME], catch_exceptions=False) # Stop the engine result = runner.invoke(stop_engine_c, [TEST_ENGINE_NAME]) assert result.exit_code == 0 # Check it's not running for c in client.containers.list(filters={"ancestor": "splitgraph/engine"}, all=False): assert c.name != "splitgraph_test_engine_" + TEST_ENGINE_NAME result = runner.invoke(list_engines_c) assert TEST_ENGINE_NAME not in result.stdout result = runner.invoke(list_engines_c, ["-a"]) assert TEST_ENGINE_NAME in result.stdout # Bring it back up result = runner.invoke(start_engine_c, [TEST_ENGINE_NAME]) assert result.exit_code == 0 # Check it's running result = runner.invoke(list_engines_c) assert result.exit_code == 0 assert TEST_ENGINE_NAME in result.stdout assert "running" in result.stdout # Try upgrading it to the same engine version as a smoke test with patch("splitgraph.engine.get_engine", return_value=engine): # Make sure the connection is closed as the client will use this Engine reference # after the upgrade to initialize it. engine.close() result = runner.invoke( upgrade_engine_c, [ TEST_ENGINE_NAME, "--image", _get_test_engine_image(), "--no-pull" ], catch_exceptions=False, ) assert result.exit_code == 0 assert "Upgraded engine %s to %s" % (TEST_ENGINE_NAME, __version__) in result.stdout # Check the engine is running and has the right version result = runner.invoke(list_engines_c) assert result.exit_code == 0 assert TEST_ENGINE_NAME in result.stdout assert "running" in result.stdout result = runner.invoke(version_engine_c, [TEST_ENGINE_NAME]) assert result.exit_code == 0 assert __version__ in result.stdout # Force delete it result = runner.invoke(delete_engine_c, ["-f", "--with-volumes", TEST_ENGINE_NAME], input="y\n") assert result.exit_code == 0 # Check the engine (and the volumes) are gone for c in client.containers.list(filters={"ancestor": "splitgraph/engine"}, all=False): assert c.name != "splitgraph_test_engine_" + TEST_ENGINE_NAME for v in client.volumes.list(): assert not v.name.startswith("splitgraph_test_engine_" + TEST_ENGINE_NAME)
def write(self, value_: Any, **kwargs: Any) -> Result: """ Writes the result to a repository on Splitgraph Args: - value_ (Any): the value to write; will then be stored as the `value` attribute of the returned `Result` instance - **kwargs (optional): if provided, will be used to format the `table`, `comment`, and `tag` Returns: - Result: returns a new `Result` with both `value`, `comment`, `table`, and `tag` attributes """ cfg = patch_config(create_config_dict(), self.env or dict()) engine = PostgresEngine(name='SplitgraphResult', conn_params=cfg) engine.initialize() repo = Repository(namespace=self.namespace, repository=self.repo_name, engine=engine) assert isinstance(value_, pd.DataFrame) assert engine.connected if not repository_exists(repo) and self.auto_init_repo: self.logger.info("Creating repo {}/{}...".format(repo.namespace, repo.repository)) repo.init() # TODO: Retrieve the repo from bedrock first new = self.format(**kwargs) new.value = value_ self.logger.info("Starting to upload result to {}...".format(new.table)) with self.atomic(engine): self.logger.info("checkout") img = repo.head img.checkout(force=True) self.logger.info("df to table") df_to_table(new.value, repository=repo, table=new.table, if_exists='replace') self.logger.info("commit") new_img = repo.commit(comment=new.comment, chunk_size=10000) new_img.tag(new.tag) # if (repo.diff(new.table, img, new_img)): if self.auto_push: self.logger.info("push") repo.push( self.get_upstream(repo), handler="S3", overwrite_objects=True, overwrite_tags=True, reupload_objects=True, ) engine.close() self.logger.info("Finished uploading result to {}...".format(new.table)) return new