def test_get_form_data_request_form_with_queries(self) -> None: # the CSV export uses for requests, even when sending requests to # /api/v1/chart/data with app.test_request_context(data={ "form_data": json.dumps({"queries": [{ "url_params": { "foo": "bar" } }]}) }): form_data, slc = get_form_data() self.assertEqual( form_data, { "url_params": { "foo": "bar" }, "time_range_endpoints": get_time_range_endpoints(form_data={}), }, ) self.assertEqual(slc, None)
def test_get_time_range_endpoints(self): self.assertEqual( get_time_range_endpoints(form_data={}), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE), ) self.assertEqual( get_time_range_endpoints( form_data={"time_range_endpoints": ["inclusive", "inclusive"] }), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE), ) self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1_druid"}), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE), ) slc = Mock() slc.datasource.database.get_extra.return_value = {} self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.UNKNOWN, TimeRangeEndpoint.INCLUSIVE), ) slc.datasource.database.get_extra.return_value = { "time_range_endpoints": ["inclusive", "inclusive"] } self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE), ) self.assertIsNone(get_time_range_endpoints(form_data={}, slc=slc)) with app.app_context(): app.config["SIP_15_GRACE_PERIOD_END"] = date.today() + timedelta( days=1) self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE), ) app.config["SIP_15_GRACE_PERIOD_END"] = date.today() self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE), )
def test_get_form_data_default(self) -> None: with app.test_request_context(): form_data, slc = get_form_data() self.assertEqual( form_data, {"time_range_endpoints": get_time_range_endpoints(form_data={})}, ) self.assertEqual(slc, None)
def __init__( self, granularity: str, metrics: List[Union[Dict, str]], groupby: Optional[List[str]] = None, filters: Optional[List[str]] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, is_timeseries: bool = False, timeseries_limit: int = 0, row_limit: int = app.config["ROW_LIMIT"], timeseries_limit_metric: Optional[Dict] = None, order_desc: bool = True, extras: Optional[Dict] = None, columns: Optional[List[str]] = None, orderby: Optional[List[List]] = None, relative_start: str = app.config["DEFAULT_RELATIVE_START_TIME"], relative_end: str = app.config["DEFAULT_RELATIVE_END_TIME"], ): self.granularity = granularity self.from_dttm, self.to_dttm = utils.get_since_until( relative_start=relative_start, relative_end=relative_end, time_range=time_range, time_shift=time_shift, ) self.is_timeseries = is_timeseries self.time_range = time_range self.time_shift = utils.parse_human_timedelta(time_shift) self.groupby = groupby or [] # Temporal solution for backward compatability issue due the new format of # non-ad-hoc metric which needs to adhere to superset-ui per # https://git.io/Jvm7P. self.metrics = [ metric if "expressionType" in metric else metric["label"] # type: ignore for metric in metrics ] self.row_limit = row_limit self.filter = filters or [] self.timeseries_limit = timeseries_limit self.timeseries_limit_metric = timeseries_limit_metric self.order_desc = order_desc self.extras = extras or {} if app.config[ "SIP_15_ENABLED"] and "time_range_endpoints" not in self.extras: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data={}) self.columns = columns or [] self.orderby = orderby or []
def test_get_form_data_request_form(self) -> None: with app.test_request_context(data={"form_data": json.dumps({"foo": "bar"})}): form_data, slc = get_form_data() self.assertEqual( form_data, { "foo": "bar", "time_range_endpoints": get_time_range_endpoints(form_data={}), }, ) self.assertEqual(slc, None)
def test_get_form_data_corrupted_json(self) -> None: with app.test_request_context( data={"form_data": "{x: '2324'}"}, query_string={"form_data": '{"baz": "bar"'}, ): form_data, slc = get_form_data() self.assertEqual( form_data, {"time_range_endpoints": get_time_range_endpoints(form_data={})}, ) self.assertEqual(slc, None)
def test_get_form_data_globals(self) -> None: with app.test_request_context(): g.form_data = {"foo": "bar"} form_data, slc = get_form_data() delattr(g, "form_data") self.assertEqual( form_data, { "foo": "bar", "time_range_endpoints": get_time_range_endpoints(form_data={}), }, ) self.assertEqual(slc, None)
def test_get_time_range_endpoints(self): self.assertEqual( get_time_range_endpoints(form_data={}, slc=None), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE), ) self.assertEqual( get_time_range_endpoints( form_data={"time_range_endpoints": ["inclusive", "inclusive"]}, slc=None), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE), ) self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1_druid"}, slc=None), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE), ) slc = Mock() slc.datasource.database.get_extra.return_value = {} self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.UNKNOWN, TimeRangeEndpoint.INCLUSIVE), ) slc.datasource.database.get_extra.return_value = { "time_range_endpoints": ["inclusive", "inclusive"] } self.assertEqual( get_time_range_endpoints(form_data={"datasource": "1__table"}, slc=slc), (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE), ) self.assertIsNone(get_time_range_endpoints(form_data={}, slc=slc))
def __init__( self, annotation_layers: Optional[List[Dict[str, Any]]] = None, applied_time_extras: Optional[Dict[str, str]] = None, granularity: Optional[str] = None, metrics: Optional[List[Union[Dict[str, Any], str]]] = None, groupby: Optional[List[str]] = None, filters: Optional[List[Dict[str, Any]]] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, is_timeseries: Optional[bool] = None, timeseries_limit: int = 0, row_limit: Optional[int] = None, row_offset: Optional[int] = None, timeseries_limit_metric: Optional[Metric] = None, order_desc: bool = True, extras: Optional[Dict[str, Any]] = None, columns: Optional[List[str]] = None, orderby: Optional[List[List[str]]] = None, post_processing: Optional[List[Optional[Dict[str, Any]]]] = None, **kwargs: Any, ): annotation_layers = annotation_layers or [] metrics = metrics or [] extras = extras or {} is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE") self.annotation_layers = [ layer for layer in annotation_layers # formula annotations don't affect the payload, hence can be dropped if layer["annotationType"] != "FORMULA" ] self.applied_time_extras = applied_time_extras or {} self.granularity = granularity self.from_dttm, self.to_dttm = get_since_until( relative_start=extras.get("relative_start", config["DEFAULT_RELATIVE_START_TIME"]), relative_end=extras.get("relative_end", config["DEFAULT_RELATIVE_END_TIME"]), time_range=time_range, time_shift=time_shift, ) # is_timeseries is True if time column is in groupby self.is_timeseries = (is_timeseries if is_timeseries is not None else (DTTM_ALIAS in groupby if groupby else False)) self.time_range = time_range self.time_shift = parse_human_timedelta(time_shift) self.post_processing = [ post_proc for post_proc in post_processing or [] if post_proc ] if not is_sip_38: self.groupby = groupby or [] # Support metric reference/definition in the format of # 1. 'metric_name' - name of predefined metric # 2. { label: 'label_name' } - legacy format for a predefined metric # 3. { expressionType: 'SIMPLE' | 'SQL', ... } - adhoc metric self.metrics = [ metric if isinstance(metric, str) or "expressionType" in metric else metric["label"] # type: ignore for metric in metrics ] self.row_limit = row_limit or config["ROW_LIMIT"] self.row_offset = row_offset or 0 self.filter = filters or [] self.timeseries_limit = timeseries_limit self.timeseries_limit_metric = timeseries_limit_metric self.order_desc = order_desc self.extras = extras if config[ "SIP_15_ENABLED"] and "time_range_endpoints" not in self.extras: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data={}) self.columns = columns or [] if is_sip_38 and groupby: self.columns += groupby logger.warning( "The field `groupby` is deprecated. Viz plugins should " "pass all selectables via the `columns` field") self.orderby = orderby or [] # rename deprecated fields for field in DEPRECATED_FIELDS: if field.old_name in kwargs: logger.warning( "The field `%s` is deprecated, please use `%s` instead.", field.old_name, field.new_name, ) value = kwargs[field.old_name] if value: if hasattr(self, field.new_name): logger.warning( "The field `%s` is already populated, " "replacing value with contents from `%s`.", field.new_name, field.old_name, ) setattr(self, field.new_name, value) # move deprecated extras fields to extras for field in DEPRECATED_EXTRAS_FIELDS: if field.old_name in kwargs: logger.warning( "The field `%s` is deprecated and should " "be passed to `extras` via the `%s` property.", field.old_name, field.new_name, ) value = kwargs[field.old_name] if value: if hasattr(self.extras, field.new_name): logger.warning( "The field `%s` is already populated in " "`extras`, replacing value with contents " "from `%s`.", field.new_name, field.old_name, ) self.extras[field.new_name] = value
def __init__( self, datasource: Optional[DatasourceDict] = None, result_type: Optional[ChartDataResultType] = None, annotation_layers: Optional[List[Dict[str, Any]]] = None, applied_time_extras: Optional[Dict[str, str]] = None, apply_fetch_values_predicate: bool = False, granularity: Optional[str] = None, metrics: Optional[List[Union[Dict[str, Any], str]]] = None, groupby: Optional[List[str]] = None, filters: Optional[List[Dict[str, Any]]] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, is_timeseries: Optional[bool] = None, timeseries_limit: int = 0, row_limit: Optional[int] = None, row_offset: Optional[int] = None, timeseries_limit_metric: Optional[Metric] = None, order_desc: bool = True, extras: Optional[Dict[str, Any]] = None, columns: Optional[List[str]] = None, orderby: Optional[List[OrderBy]] = None, post_processing: Optional[List[Optional[Dict[str, Any]]]] = None, is_rowcount: bool = False, **kwargs: Any, ): columns = columns or [] groupby = groupby or [] extras = extras or {} annotation_layers = annotation_layers or [] self.is_rowcount = is_rowcount self.datasource = None if datasource: self.datasource = ConnectorRegistry.get_datasource( str(datasource["type"]), int(datasource["id"]), db.session) self.result_type = result_type self.apply_fetch_values_predicate = apply_fetch_values_predicate or False self.annotation_layers = [ layer for layer in annotation_layers # formula annotations don't affect the payload, hence can be dropped if layer["annotationType"] != "FORMULA" ] self.applied_time_extras = applied_time_extras or {} self.granularity = granularity self.from_dttm, self.to_dttm = get_since_until( relative_start=extras.get("relative_start", config["DEFAULT_RELATIVE_START_TIME"]), relative_end=extras.get("relative_end", config["DEFAULT_RELATIVE_END_TIME"]), time_range=time_range, time_shift=time_shift, ) # is_timeseries is True if time column is in either columns or groupby # (both are dimensions) self.is_timeseries = (is_timeseries if is_timeseries is not None else DTTM_ALIAS in columns + groupby) self.time_range = time_range self.time_shift = parse_human_timedelta(time_shift) self.post_processing = [ post_proc for post_proc in post_processing or [] if post_proc ] # Support metric reference/definition in the format of # 1. 'metric_name' - name of predefined metric # 2. { label: 'label_name' } - legacy format for a predefined metric # 3. { expressionType: 'SIMPLE' | 'SQL', ... } - adhoc metric self.metrics = metrics and [ x if isinstance(x, str) or is_adhoc_metric(x) else x["label"] # type: ignore for x in metrics ] self.row_limit = config["ROW_LIMIT"] if row_limit is None else row_limit self.row_offset = row_offset or 0 self.filter = filters or [] self.timeseries_limit = timeseries_limit self.timeseries_limit_metric = timeseries_limit_metric self.order_desc = order_desc self.extras = extras if config["SIP_15_ENABLED"]: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data=self.extras) self.columns = columns self.groupby = groupby or [] self.orderby = orderby or [] # rename deprecated fields for field in DEPRECATED_FIELDS: if field.old_name in kwargs: logger.warning( "The field `%s` is deprecated, please use `%s` instead.", field.old_name, field.new_name, ) value = kwargs[field.old_name] if value: if hasattr(self, field.new_name): logger.warning( "The field `%s` is already populated, " "replacing value with contents from `%s`.", field.new_name, field.old_name, ) setattr(self, field.new_name, value) # move deprecated extras fields to extras for field in DEPRECATED_EXTRAS_FIELDS: if field.old_name in kwargs: logger.warning( "The field `%s` is deprecated and should " "be passed to `extras` via the `%s` property.", field.old_name, field.new_name, ) value = kwargs[field.old_name] if value: if hasattr(self.extras, field.new_name): logger.warning( "The field `%s` is already populated in " "`extras`, replacing value with contents " "from `%s`.", field.new_name, field.old_name, ) self.extras[field.new_name] = value
def __init__( # pylint: disable=too-many-arguments,too-many-locals self, query_context: "QueryContext", annotation_layers: Optional[List[Dict[str, Any]]] = None, applied_time_extras: Optional[Dict[str, str]] = None, apply_fetch_values_predicate: bool = False, columns: Optional[List[str]] = None, datasource: Optional[DatasourceDict] = None, extras: Optional[Dict[str, Any]] = None, filters: Optional[List[QueryObjectFilterClause]] = None, granularity: Optional[str] = None, is_rowcount: bool = False, is_timeseries: Optional[bool] = None, metrics: Optional[List[Metric]] = None, order_desc: bool = True, orderby: Optional[List[OrderBy]] = None, post_processing: Optional[List[Optional[Dict[str, Any]]]] = None, result_type: Optional[ChartDataResultType] = None, row_limit: Optional[int] = None, row_offset: Optional[int] = None, series_columns: Optional[List[str]] = None, series_limit: int = 0, series_limit_metric: Optional[Metric] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, **kwargs: Any, ): columns = columns or [] extras = extras or {} annotation_layers = annotation_layers or [] self.time_offsets = kwargs.get("time_offsets", []) self.inner_from_dttm = kwargs.get("inner_from_dttm") self.inner_to_dttm = kwargs.get("inner_to_dttm") if series_columns: self.series_columns = series_columns elif is_timeseries and metrics: self.series_columns = columns else: self.series_columns = [] self.is_rowcount = is_rowcount self.datasource = None if datasource: self.datasource = ConnectorRegistry.get_datasource( str(datasource["type"]), int(datasource["id"]), db.session) self.result_type = result_type or query_context.result_type self.apply_fetch_values_predicate = apply_fetch_values_predicate or False self.annotation_layers = [ layer for layer in annotation_layers # formula annotations don't affect the payload, hence can be dropped if layer["annotationType"] != "FORMULA" ] self.applied_time_extras = applied_time_extras or {} self.granularity = granularity self.from_dttm, self.to_dttm = get_since_until( relative_start=extras.get("relative_start", config["DEFAULT_RELATIVE_START_TIME"]), relative_end=extras.get("relative_end", config["DEFAULT_RELATIVE_END_TIME"]), time_range=time_range, time_shift=time_shift, ) # is_timeseries is True if time column is in either columns or groupby # (both are dimensions) self.is_timeseries = (is_timeseries if is_timeseries is not None else DTTM_ALIAS in columns) self.time_range = time_range self.time_shift = parse_human_timedelta(time_shift) self.post_processing = [ post_proc for post_proc in post_processing or [] if post_proc ] # Support metric reference/definition in the format of # 1. 'metric_name' - name of predefined metric # 2. { label: 'label_name' } - legacy format for a predefined metric # 3. { expressionType: 'SIMPLE' | 'SQL', ... } - adhoc metric self.metrics = metrics and [ x if isinstance(x, str) or is_adhoc_metric(x) else x["label"] # type: ignore for x in metrics ] default_row_limit = (config["SAMPLES_ROW_LIMIT"] if self.result_type == ChartDataResultType.SAMPLES else config["ROW_LIMIT"]) self.row_limit = apply_max_row_limit(row_limit or default_row_limit) self.row_offset = row_offset or 0 self.filter = filters or [] self.series_limit = series_limit self.series_limit_metric = series_limit_metric self.order_desc = order_desc self.extras = extras if config["SIP_15_ENABLED"]: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data=self.extras) self.columns = columns self.orderby = orderby or [] self._rename_deprecated_fields(kwargs) self._move_deprecated_extra_fields(kwargs)
def __init__( self, granularity: Optional[str] = None, metrics: Optional[List[Union[Dict[str, Any], str]]] = None, groupby: Optional[List[str]] = None, filters: Optional[List[Dict[str, Any]]] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, is_timeseries: bool = False, timeseries_limit: int = 0, row_limit: int = app.config["ROW_LIMIT"], timeseries_limit_metric: Optional[Metric] = None, order_desc: bool = True, extras: Optional[Dict[str, Any]] = None, columns: Optional[List[str]] = None, orderby: Optional[List[List[str]]] = None, post_processing: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ): metrics = metrics or [] extras = extras or {} is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE") self.granularity = granularity self.from_dttm, self.to_dttm = utils.get_since_until( relative_start=extras.get( "relative_start", app.config["DEFAULT_RELATIVE_START_TIME"]), relative_end=extras.get("relative_end", app.config["DEFAULT_RELATIVE_END_TIME"]), time_range=time_range, time_shift=time_shift, ) self.is_timeseries = is_timeseries self.time_range = time_range self.time_shift = utils.parse_human_timedelta(time_shift) self.post_processing = post_processing or [] if not is_sip_38: self.groupby = groupby or [] # Temporary solution for backward compatibility issue due the new format of # non-ad-hoc metric which needs to adhere to superset-ui per # https://git.io/Jvm7P. self.metrics = [ metric if "expressionType" in metric else metric["label"] # type: ignore for metric in metrics ] self.row_limit = row_limit self.filter = filters or [] self.timeseries_limit = timeseries_limit self.timeseries_limit_metric = timeseries_limit_metric self.order_desc = order_desc self.extras = extras if app.config[ "SIP_15_ENABLED"] and "time_range_endpoints" not in self.extras: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data={}) self.columns = columns or [] if is_sip_38 and groupby: self.columns += groupby logger.warning( f"The field `groupby` is deprecated. Viz plugins should " f"pass all selectables via the `columns` field") self.orderby = orderby or [] # rename deprecated fields for field in DEPRECATED_FIELDS: if field.old_name in kwargs: logger.warning( f"The field `{field.old_name}` is deprecated, please use " f"`{field.new_name}` instead.") value = kwargs[field.old_name] if value: if hasattr(self, field.new_name): logger.warning( f"The field `{field.new_name}` is already populated, " f"replacing value with contents from `{field.old_name}`." ) setattr(self, field.new_name, value) # move deprecated extras fields to extras for field in DEPRECATED_EXTRAS_FIELDS: if field.old_name in kwargs: logger.warning( f"The field `{field.old_name}` is deprecated and should be " f"passed to `extras` via the `{field.new_name}` property.") value = kwargs[field.old_name] if value: if hasattr(self.extras, field.new_name): logger.warning( f"The field `{field.new_name}` is already populated in " f"`extras`, replacing value with contents " f"from `{field.old_name}`.") self.extras[field.new_name] = value
def __init__( self, granularity: str, metrics: List[Union[Dict, str]], groupby: Optional[List[str]] = None, filters: Optional[List[str]] = None, time_range: Optional[str] = None, time_shift: Optional[str] = None, is_timeseries: bool = False, timeseries_limit: int = 0, row_limit: int = app.config["ROW_LIMIT"], timeseries_limit_metric: Optional[Dict] = None, order_desc: bool = True, extras: Optional[Dict] = None, columns: Optional[List[str]] = None, orderby: Optional[List[List]] = None, post_processing: Optional[List[Dict[str, Any]]] = None, relative_start: str = app.config["DEFAULT_RELATIVE_START_TIME"], relative_end: str = app.config["DEFAULT_RELATIVE_END_TIME"], ): is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE") self.granularity = granularity self.from_dttm, self.to_dttm = utils.get_since_until( relative_start=relative_start, relative_end=relative_end, time_range=time_range, time_shift=time_shift, ) self.is_timeseries = is_timeseries self.time_range = time_range self.time_shift = utils.parse_human_timedelta(time_shift) self.post_processing = post_processing or [] if not is_sip_38: self.groupby = groupby or [] # Temporary solution for backward compatibility issue due the new format of # non-ad-hoc metric which needs to adhere to superset-ui per # https://git.io/Jvm7P. self.metrics = [ metric if "expressionType" in metric else metric["label"] # type: ignore for metric in metrics ] self.row_limit = row_limit self.filter = filters or [] self.timeseries_limit = timeseries_limit self.timeseries_limit_metric = timeseries_limit_metric self.order_desc = order_desc self.extras = extras or {} if app.config[ "SIP_15_ENABLED"] and "time_range_endpoints" not in self.extras: self.extras["time_range_endpoints"] = get_time_range_endpoints( form_data={}) self.columns = columns or [] if is_sip_38 and groupby: self.columns += groupby logger.warning( f"The field groupby is deprecated. Viz plugins should " f"pass all selectables via the columns field") self.orderby = orderby or []