示例#1
0
    def create_conformed_rollup(self, cube, dimension, level=None, hierarchy=None,
                                replace=False, **options):
        """Extracts dimension values at certain level into a separate table.
        The new table name will be composed of `dimension_prefix`, dimension
        name and suffixed by dimension level. For example a product dimension
        at category level with prefix `dim_` will be called
        ``dim_product_category``

        Attributes:

        * `dimension` – dimension to be extracted
        * `level` – grain level
        * `hierarchy` – hierarchy to be used
        * `schema` – target schema
        * `dimension_prefix` – prefix used for the dimension table
        * `dimension_suffix` – suffix used for the dimension table
        * `replace` – if ``True`` then existing table will be replaced,
          otherwise an exception is raised if table already exists.
        """

        # TODO: 1.1 refactoring
        raise NotImplementedError("Requires to be updated to new query builder")

        naming = distill_naming(options)
        context = QueryContext(cube, mapper, schema=schema, metadata=self.metadata)

        dimension = cube.dimension(dimension)
        hierarchy = dimension.hierarchy(hierarchy)
        if level:
            depth = hierarchy.level_index(dimension.level(level)) + 1
        else:
            depth = len(hierarchy)

        if depth == 0:
            raise ArgumentError("Depth for dimension values should not be 0")
        elif depth is not None:
            levels = hierarchy.levels[0:depth]

        attributes = []
        for level in levels:
            attributes.extend(level.attributes)

        statement = context.denormalized_statement(attributes=attributes,
                                                   include_fact_key=False)

        group_by = [context.column(attr) for attr in attributes]
        statement = statement.group_by(*group_by)

        table_name = "%s%s%s_%s" % (dimension_prefix or "", dimension_suffix or "",
                                    str(dimension), str(level))
        self.create_table_from_statement(table_name, statement, schema,
                                         replace, insert=True)
示例#2
0
文件: store.py 项目: wxiang7/cubes
    def create_conformed_rollup(self, cube, dimension, level=None, hierarchy=None,
                                replace=False, **options):
        """Extracts dimension values at certain level into a separate table.
        The new table name will be composed of `dimension_prefix`, dimension
        name and suffixed by dimension level. For example a product dimension
        at category level with prefix `dim_` will be called
        ``dim_product_category``

        Attributes:

        * `dimension` – dimension to be extracted
        * `level` – grain level
        * `hierarchy` – hierarchy to be used
        * `schema` – target schema
        * `dimension_prefix` – prefix used for the dimension table
        * `dimension_suffix` – suffix used for the dimension table
        * `replace` – if ``True`` then existing table will be replaced,
          otherwise an exception is raised if table already exists.
        """

        # TODO: 1.1 refactoring
        raise NotImplementedError("Requires to be updated to new query builder")

        naming = distill_naming(options)
        context = QueryContext(cube, mapper, schema=schema, metadata=self.metadata)

        dimension = cube.dimension(dimension)
        hierarchy = dimension.hierarchy(hierarchy)
        if level:
            depth = hierarchy.level_index(dimension.level(level)) + 1
        else:
            depth = len(hierarchy)

        if depth == 0:
            raise ArgumentError("Depth for dimension values should not be 0")
        elif depth is not None:
            levels = hierarchy.levels[0:depth]

        attributes = []
        for level in levels:
            attributes.extend(level.attributes)

        statement = context.denormalized_statement(attributes=attributes,
                                                   include_fact_key=False)

        group_by = [context.column(attr) for attr in attributes]
        statement = statement.group_by(*group_by)

        table_name = "%s%s%s_%s" % (dimension_prefix or "", dimension_suffix or "",
                                    str(dimension), str(level))
        self.create_table_from_statement(table_name, statement, schema,
                                         replace, insert=True)
示例#3
0
 def _compile_context(self, **kw):
     if self._precompiled_context is not None:
         # Clone the precompiled context and attach it to the current session and query
         # context = self._precompiled_context
         context = QueryContext.__new__(QueryContext)
         context.__dict__.update(self._precompiled_context.__dict__)
         context.query = self
         context.session = self.session
         context.attributes = context.attributes.copy()
         return context
     else:
         return super(PrecompiledQuery, self)._compile_context(**kw)
示例#4
0
        def batch_load_fn(self, parents):  # pylint: disable=method-hidden
            """
            Batch loads the relationship of all the parents as one SQL statement.

            There is no way to do this out-of-the-box with SQLAlchemy but
            we can piggyback on some internal APIs of the `selectin`
            eager loading strategy. It's a bit hacky but it's preferable
            than re-implementing and maintainnig a big chunk of the `selectin`
            loader logic ourselves.

            The approach here is to build a regular query that
            selects the parent and `selectin` load the relationship.
            But instead of having the query emits 2 `SELECT` statements
            when callling `all()`, we skip the first `SELECT` statement
            and jump right before the `selectin` loader is called.
            To accomplish this, we have to construct objects that are
            normally built in the first part of the query in order
            to call directly `SelectInLoader._load_for_path`.

            TODO Move this logic to a util in the SQLAlchemy repo as per
              SQLAlchemy's main maitainer suggestion.
              See https://git.io/JewQ7
            """
            session = Session.object_session(parents[0])

            # These issues are very unlikely to happen in practice...
            for parent in parents:
                assert parent.__mapper__ is parent_mapper
                # All instances must share the same session
                assert session is Session.object_session(parent)
                # The behavior of `selectin` is undefined if the parent is dirty
                assert parent not in session.dirty

            loader = strategies.SelectInLoader(relationship_prop,
                                               (('lazy', 'selectin'), ))

            # Should the boolean be set to False? Does it matter for our purposes?
            states = [(sqlalchemy.inspect(parent), True) for parent in parents]

            # For our purposes, the query_context will only used to get the session
            query_context = QueryContext(session.query(parent_mapper.entity))

            loader._load_for_path(
                query_context,
                parent_mapper._path_registry,
                states,
                None,
                child_mapper,
            )

            return promise.Promise.resolve(
                [getattr(parent, model_attr) for parent in parents])