def validate(self) -> None: exceptions = list() owner_ids: Optional[List[int]] = self._properties.get("owners") # Validate/populate model exists self._model = DatasetDAO.find_by_id(self._model_id) if not self._model: raise DatasetNotFoundError() # Check ownership try: check_ownership(self._model) except SupersetSecurityException: raise DatasetForbiddenError() database_id = self._properties.get("database", None) table_name = self._properties.get("table_name", None) # Validate uniqueness if not DatasetDAO.validate_update_uniqueness( self._model.database_id, self._model_id, table_name): exceptions.append(DatasetExistsValidationError(table_name)) # Validate/Populate database not allowed to change if database_id and database_id != self._model: exceptions.append(DatabaseChangeValidationError()) # Validate/Populate owner try: owners = populate_owners(self._actor, owner_ids) self._properties["owners"] = owners except ValidationError as e: exceptions.append(e) if exceptions: exception = DatasetInvalidError() exception.add_list(exceptions) raise exception
def dataset_macro( dataset_id: int, include_metrics: bool = False, columns: Optional[List[str]] = None, ) -> str: """ Given a dataset ID, return the SQL that represents it. The generated SQL includes all columns (including computed) by default. Optionally the user can also request metrics to be included, and columns to group by. """ # pylint: disable=import-outside-toplevel from superset.datasets.dao import DatasetDAO dataset = DatasetDAO.find_by_id(dataset_id) if not dataset: raise DatasetNotFoundError(f"Dataset {dataset_id} not found!") columns = columns or [column.column_name for column in dataset.columns] metrics = [metric.metric_name for metric in dataset.metrics] query_obj = { "is_timeseries": False, "filter": [], "metrics": metrics if include_metrics else None, "columns": columns, } sqla_query = dataset.get_query_str_extended(query_obj) sql = sqla_query.sql return f"({sql}) AS dataset_{dataset_id}"
def _export( model: Dashboard, export_related: bool = True ) -> Iterator[Tuple[str, str]]: dashboard_slug = secure_filename(model.dashboard_title) file_name = f"dashboards/{dashboard_slug}.yaml" payload = model.export_to_dict( recursive=False, include_parent_ref=False, include_defaults=True, export_uuids=True, ) # TODO (betodealmeida): move this logic to export_to_dict once this # becomes the default export endpoint for key, new_name in JSON_KEYS.items(): value: Optional[str] = payload.pop(key, None) if value: try: payload[new_name] = json.loads(value) except (TypeError, json.decoder.JSONDecodeError): logger.info("Unable to decode `%s` field: %s", key, value) payload[new_name] = {} # Extract all native filter datasets and replace native # filter dataset references with uuid for native_filter in payload.get("metadata", {}).get( "native_filter_configuration", [] ): for target in native_filter.get("targets", []): dataset_id = target.pop("datasetId", None) if dataset_id is not None: dataset = DatasetDAO.find_by_id(dataset_id) target["datasetUuid"] = str(dataset.uuid) if export_related: yield from ExportDatasetsCommand([dataset_id]).run() # the mapping between dashboard -> charts is inferred from the position # attribute, so if it's not present we need to add a default config if not payload.get("position"): payload["position"] = get_default_position(model.dashboard_title) # if any charts or not referenced in position, we need to add them # in a new row referenced_charts = find_chart_uuids(payload["position"]) orphan_charts = { chart for chart in model.slices if str(chart.uuid) not in referenced_charts } if orphan_charts: payload["position"] = append_charts(payload["position"], orphan_charts) payload["version"] = EXPORT_VERSION file_content = yaml.safe_dump(payload, sort_keys=False) yield file_name, file_content if export_related: chart_ids = [chart.id for chart in model.slices] yield from ExportChartsCommand(chart_ids).run()
def check_dataset_access(dataset_id: int) -> Optional[bool]: if dataset_id: dataset = DatasetDAO.find_by_id(dataset_id) if dataset: can_access_datasource = security_manager.can_access_datasource(dataset) if can_access_datasource: return True raise DatasetAccessDeniedError() raise DatasetNotFoundError()
def validate(self) -> None: # Validate/populate model exists self._model = DatasetDAO.find_by_id(self._model_id) if not self._model: raise DatasetNotFoundError() # Check ownership try: check_ownership(self._model) except SupersetSecurityException: raise DatasetForbiddenError()
def test_datasource_find_by_id_skip_base_filter_not_found( session_with_data: Session, ) -> None: from superset.datasets.dao import DatasetDAO result = DatasetDAO.find_by_id( 125326326, session=session_with_data, skip_base_filter=True, ) assert result is None
def check_dataset_access(dataset_id: int) -> Optional[bool]: if dataset_id: # Access checks below, no need to validate them twice as they can be expensive. dataset = DatasetDAO.find_by_id(dataset_id, skip_base_filter=True) if dataset: can_access_datasource = security_manager.can_access_datasource(dataset) if can_access_datasource: return True raise DatasetAccessDeniedError() raise DatasetNotFoundError()
def related_objects(self, pk: int) -> Response: """Get charts and dashboards count associated to a dataset --- get: description: Get charts and dashboards count associated to a dataset parameters: - in: path name: pk schema: type: integer responses: 200: 200: description: Query result content: application/json: schema: $ref: "#/components/schemas/DatasetRelatedObjectsResponse" 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' """ dataset = DatasetDAO.find_by_id(pk) if not dataset: return self.response_404() data = DatasetDAO.get_related_objects(pk) charts = [{ "id": chart.id, "slice_name": chart.slice_name, "viz_type": chart.viz_type, } for chart in data["charts"]] dashboards = [{ "id": dashboard.id, "json_metadata": dashboard.json_metadata, "slug": dashboard.slug, "title": dashboard.dashboard_title, } for dashboard in data["dashboards"]] return self.response( 200, charts={ "count": len(charts), "result": charts }, dashboards={ "count": len(dashboards), "result": dashboards }, )
def test_datasource_find_by_id_skip_base_filter( session_with_data: Session) -> None: from superset.connectors.sqla.models import SqlaTable from superset.datasets.dao import DatasetDAO result = DatasetDAO.find_by_id( 1, session=session_with_data, skip_base_filter=True, ) assert result assert 1 == result.id assert "my_sqla_table" == result.table_name assert isinstance(result, SqlaTable)
def validate(self) -> None: exceptions: List[ValidationError] = [] owner_ids: Optional[List[int]] = self._properties.get("owners") # Validate/populate model exists self._model = DatasetDAO.find_by_id(self._model_id) if not self._model: raise DatasetNotFoundError() # Check ownership try: security_manager.raise_for_ownership(self._model) except SupersetSecurityException as ex: raise DatasetForbiddenError() from ex database_id = self._properties.get("database", None) table_name = self._properties.get("table_name", None) # Validate uniqueness if not DatasetDAO.validate_update_uniqueness( self._model.database_id, self._model_id, table_name): exceptions.append(DatasetExistsValidationError(table_name)) # Validate/Populate database not allowed to change if database_id and database_id != self._model: exceptions.append(DatabaseChangeValidationError()) # Validate/Populate owner try: owners = self.populate_owners(owner_ids) self._properties["owners"] = owners except ValidationError as ex: exceptions.append(ex) # Validate columns columns = self._properties.get("columns") if columns: self._validate_columns(columns, exceptions) # Validate metrics metrics = self._properties.get("metrics") if metrics: self._validate_metrics(metrics, exceptions) if exceptions: exception = DatasetInvalidError() exception.add_list(exceptions) raise exception