def get_single_payload(self, query_obj: QueryObject) -> Dict[str, Any]: """Returns a payload of metadata and data""" if self.result_type == utils.ChartDataResultType.QUERY: return { "query": self.datasource.get_query_str(query_obj.to_dict()), "language": self.datasource.query_language, } if self.result_type == utils.ChartDataResultType.SAMPLES: row_limit = query_obj.row_limit or math.inf query_obj = copy.copy(query_obj) query_obj.groupby = [] query_obj.metrics = [] query_obj.post_processing = [] query_obj.row_limit = min(row_limit, config["SAMPLES_ROW_LIMIT"]) query_obj.row_offset = 0 query_obj.columns = [o.column_name for o in self.datasource.columns] payload = self.get_df_payload(query_obj) df = payload["df"] status = payload["status"] if status != utils.QueryStatus.FAILED: if df.empty: payload["error"] = "No data" else: payload["data"] = self.get_data(df) del payload["df"] if self.result_type == utils.ChartDataResultType.RESULTS: return {"data": payload["data"]} return payload
def get_single_payload( self, query_obj: QueryObject, force_cached: Optional[bool] = False, ) -> Dict[str, Any]: """Return results payload for a single quey""" if self.result_type == utils.ChartDataResultType.QUERY: return { "query": self.datasource.get_query_str(query_obj.to_dict()), "language": self.datasource.query_language, } if self.result_type == utils.ChartDataResultType.SAMPLES: row_limit = query_obj.row_limit or math.inf query_obj = copy.copy(query_obj) query_obj.is_timeseries = False query_obj.orderby = [] query_obj.groupby = [] query_obj.metrics = [] query_obj.post_processing = [] query_obj.row_limit = min(row_limit, config["SAMPLES_ROW_LIMIT"]) query_obj.row_offset = 0 query_obj.columns = [ o.column_name for o in self.datasource.columns ] payload = self.get_df_payload(query_obj, force_cached=force_cached) df = payload["df"] status = payload["status"] if status != utils.QueryStatus.FAILED: payload["colnames"] = list(df.columns) payload["coltypes"] = utils.extract_dataframe_dtypes(df) payload["data"] = self.get_data(df) del payload["df"] filters = query_obj.filter filter_columns = cast(List[str], [flt.get("col") for flt in filters]) columns = set(self.datasource.column_names) applied_time_columns, rejected_time_columns = utils.get_time_filter_status( self.datasource, query_obj.applied_time_extras) payload["applied_filters"] = [{ "column": col } for col in filter_columns if col in columns] + applied_time_columns payload["rejected_filters"] = [{ "reason": "not_in_datasource", "column": col } for col in filter_columns if col not in columns ] + rejected_time_columns if (self.result_type == utils.ChartDataResultType.RESULTS and status != utils.QueryStatus.FAILED): return {"data": payload["data"]} return payload
def _get_samples(query_context: QueryContext, query_obj: QueryObject, force_cached: bool = False) -> Dict[str, Any]: datasource = _get_datasource(query_context, query_obj) query_obj = copy.copy(query_obj) query_obj.is_timeseries = False query_obj.orderby = [] query_obj.metrics = None query_obj.post_processing = [] query_obj.columns = [o.column_name for o in datasource.columns] query_obj.from_dttm = None query_obj.to_dttm = None return _get_full(query_context, query_obj, force_cached)
def get_single_payload(self, query_obj: QueryObject) -> Dict[str, Any]: """Returns a payload of metadata and data""" if self.result_type == utils.ChartDataResultType.QUERY: return { "query": self.datasource.get_query_str(query_obj.to_dict()), "language": self.datasource.query_language, } if self.result_type == utils.ChartDataResultType.SAMPLES: row_limit = query_obj.row_limit or math.inf query_obj = copy.copy(query_obj) query_obj.orderby = [] query_obj.groupby = [] query_obj.metrics = [] query_obj.post_processing = [] query_obj.row_limit = min(row_limit, config["SAMPLES_ROW_LIMIT"]) query_obj.row_offset = 0 query_obj.columns = [ o.column_name for o in self.datasource.columns ] payload = self.get_df_payload(query_obj) # TODO: implement payload["annotation_data"] = [] df = payload["df"] status = payload["status"] if status != utils.QueryStatus.FAILED: payload["data"] = self.get_data(df) del payload["df"] filters = query_obj.filter filter_columns = cast(List[str], [flt.get("col") for flt in filters]) columns = set(self.datasource.column_names) applied_time_columns, rejected_time_columns = utils.get_time_filter_status( self.datasource, query_obj.applied_time_extras) payload["applied_filters"] = [{ "column": col } for col in filter_columns if col in columns] + applied_time_columns payload["rejected_filters"] = [{ "reason": "not_in_datasource", "column": col } for col in filter_columns if col not in columns ] + rejected_time_columns if self.result_type == utils.ChartDataResultType.RESULTS: return {"data": payload["data"]} return payload
def _get_samples( query_context: QueryContext, query_obj: QueryObject, force_cached: bool = False ) -> Dict[str, Any]: datasource = _get_datasource(query_context, query_obj) query_obj = copy.copy(query_obj) query_obj.is_timeseries = False query_obj.orderby = [] query_obj.metrics = None query_obj.post_processing = [] qry_obj_cols = [] for o in datasource.columns: if isinstance(o, dict): qry_obj_cols.append(o.get("column_name")) else: qry_obj_cols.append(o.column_name) query_obj.columns = qry_obj_cols query_obj.from_dttm = None query_obj.to_dttm = None return _get_full(query_context, query_obj, force_cached)
def _get_drill_detail( query_context: QueryContext, query_obj: QueryObject, force_cached: bool = False ) -> Dict[str, Any]: # todo(yongjie): Remove this function, # when determining whether samples should be applied to the time filter. datasource = _get_datasource(query_context, query_obj) query_obj = copy.copy(query_obj) query_obj.is_timeseries = False query_obj.orderby = [] query_obj.metrics = None query_obj.post_processing = [] qry_obj_cols = [] for o in datasource.columns: if isinstance(o, dict): qry_obj_cols.append(o.get("column_name")) else: qry_obj_cols.append(o.column_name) query_obj.columns = qry_obj_cols return _get_full(query_context, query_obj, force_cached)