def execute_markdown_sql_blocks(filepath: str) -> str: """ Executes all sql, denoted by ```sql, within the file `filepath` if EXECUTION_FLAG is within. Replace the EXECUTION_FLAG with the results. """ database, _, _, _ = get_table_info_from_path(filepath) with open(filepath, "r") as f: ugc_blob = "".join(f.readlines()) def run_and_append_results(sql, warehouse_name=None) -> str: if EXECUTION_FLAG in sql: results = run(sql, warehouse_name=warehouse_name) sql_with_result = embed_results_as_comment(sql, results) return sql_with_result else: return sql if EXECUTION_FLAG in ugc_blob: ugc_blob = find_blocks_and_process( ugc_blob, run_and_append_results, function_kwargs={"warehouse_name": database}, ) with open(filepath, "w") as f: f.write(ugc_blob) elif EXECUTION_FLAG.strip() in ugc_blob: LOGGER.warning(f"{EXECUTION_FLAG.strip()} must be on its own line.") return ugc_blob
def _get_extract_iter(self): # Loop through all table stubs that contain ```metrics if self.table_stub_paths: super().init(self.sql_alch_conf) for table_stub_path in self.table_stub_paths: database, cluster, schema, table = get_table_info_from_path(table_stub_path) metric_yamls = self._get_metrics_queries_from_table_stub_path( table_stub_path ) # Get all ```metrics definitions in each file (there can be multiple) for metric_yaml in metric_yamls: metric_yaml = yaml.safe_load(metric_yaml) # Loop through all metrics defined in this ```metrics section. for metric_name, metric_details in metric_yaml.items(): execution_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") try: result = list( self.execute( metric_details["sql"], is_dict_return_enabled=False ) ) try: cleaned_result = result[0][0] except: cleaned_result = None if "description" in metric_details: description = metric_details["description"] else: description = None if "is_global" in metric_details: is_global = metric_details["is_global"] else: is_global = False yield MetricValue( database=database, cluster=cluster, schema=schema, table=table, name=metric_name, description=description, execution_time=execution_time, value=cleaned_result, is_global=is_global, ) except: LOGGER.warn( f"Failed execution of metric: {metric_name} in {table_stub_path}. Continuing." )
def _get_extract_iter(self): # Loop through all table stubs that contain ```metrics if self.table_stub_paths: super().init(self.sql_alch_conf) for table_stub_path in self.table_stub_paths: database, cluster, schema, table = get_table_info_from_path( table_stub_path) metric_yamls = self._get_metrics_queries_from_table_stub_path( table_stub_path) # Get all ```metrics definitions in each file (there can be multiple) for metric_yaml in metric_yamls: metric_yaml = yaml.safe_load(metric_yaml) # Loop through all metrics defined in this ```metrics section. for metric_name, metric_details in metric_yaml.items(): execution_time = datetime.now().strftime( "%Y-%m-%d %H:%M:%S") sql_result = self._compute_sql_result( metric_details["sql"], database) description = metric_details.get("description") is_global = metric_details.get("is_global", False) alerts = metric_details.get("alerts") self._send_slack_alerts(alerts, sql_result) yield MetricValue( database=database, cluster=cluster, schema=schema, table=table, name=metric_name, description=description, execution_time=execution_time, value=sql_result, is_global=is_global, )