def __init__(self, ctx: PipelineContext, config: RedashConfig): super().__init__(ctx) self.config = config self.report = RedashSourceReport() # Handle trailing slash removal self.config.connect_uri = self.config.connect_uri.strip("/") self.client = Redash(self.config.connect_uri, self.config.api_key) self.client.session.headers.update({ "Content-Type": "application/json", "Accept": "application/json", }) self.api_page_limit = self.config.api_page_limit or math.inf
def refresh_dashboard(baseurl, apikey, slug): client = Redash(baseurl, apikey) todays_dates = get_frontend_vals() queries_dict = get_queries_on_dashboard(client, slug) # loop through each query and its JSON data for idx, qry in queries_dict.items(): params = { p.get("name"): fill_dynamic_val(todays_dates, p) for p in qry["options"].get("parameters", []) } # Pass max_age to ensure a new result is provided. body = {"parameters": params, "max_age": 0} r = client._post(f"api/queries/{idx}/results", json=body) print(f"Query: {idx} -- Code {r.status_code}")
def __init__(self, ctx: PipelineContext, config: RedashConfig): super().__init__(ctx) self.config = config self.report = RedashSourceReport() # Handle trailing slash removal self.config.connect_uri = self.config.connect_uri.strip("/") self.client = Redash(self.config.connect_uri, self.config.api_key) self.client.session.headers.update( { "Content-Type": "application/json", "Accept": "application/json", } ) # Handling retry and backoff retries = 3 backoff_factor = 10 status_forcelist = (500, 503, 502, 504) retry = Retry( total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) self.client.session.mount("http://", adapter) self.client.session.mount("https://", adapter) self.api_page_limit = self.config.api_page_limit or math.inf self.parse_table_names_from_sql = self.config.parse_table_names_from_sql self.sql_parser_path = self.config.sql_parser logger.info( f"Running Redash ingestion with parse_table_names_from_sql={self.parse_table_names_from_sql}" )
def find_table_names(url, key, data_source_id): client = Redash(url, key) schema_tables = [ token.get("name") for token in client._get(f"api/data_sources/{data_source_id}/schema"). json().get("schema", []) ] queries = [ query for query in client.paginate(client.queries) if query.get("data_source_id", None) == int(data_source_id) ] tables_by_qry = { query["id"]: [ table for table in extract_table_names(query["query"]) if table in schema_tables or len(schema_tables) == 0 ] for query in queries } return tables_by_qry
from redash_toolbelt import Redash import redadocs.dashboards as rdd import os URL = os.environ["REDASH_URL"] KEY = os.environ["REDASH_KEY"] print(f" Connecting to {URL} using API key {KEY}...") ## create a client object client = Redash(URL, KEY) rows = rdd.get_all_db_details(client, csv=True) import csv headline = [ "Name", "Tags", "Updated", "Archived", "Description", "Public Link" ] output_file = "output.csv" with open(output_file, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(headline) for row in rows: spamwriter.writerow(row)
def lookup(redash_host, email, api_key): """Search for EMAIL in queries and query results, output query URL if found.""" redash = Redash(redash_host, api_key) lookup = Lookup(redash, email) lookup.lookup()
def main(redash_host, slug, api_key, prefix=""): """Calls the duplicate function using Click commands""" client = Redash(redash_host, api_key) duplicate(client, slug, prefix)