Exemple #1
0
 def set_rollback(self, rollback):
     """
     Set or unset the "needs rollback" flag -- for *advanced use* only.
     """
     if not self.in_atomic_block:
         raise TransactionManagementError(
             "The rollback flag doesn't work outside of an 'atomic' block.")
     self.needs_rollback = rollback
Exemple #2
0
 def on_commit(self, func):
     if self.in_atomic_block:
         # Transaction in progress; save for execution on commit.
         self.run_on_commit.append((set(self.savepoint_ids), func))
     elif not self.get_autocommit():
         raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
     else:
         # No transaction in progress and in autocommit mode; execute
         # immediately.
         func()
Exemple #3
0
 def leave_transaction_management(self):
     """
     Leaves transaction management for a running thread. A dirty flag is carried
     over to the surrounding block, as a commit will commit all changes, even
     those from outside. (Commits are on connection level.)
     """
     if self.transaction_state:
         del self.transaction_state[-1]
     else:
         raise TransactionManagementError(
             "This code isn't under transaction management")
     # We will pass the next status (after leaving the previous state
     # behind) to subclass hook.
     self._leave_transaction_management(self.is_managed())
     if self._dirty:
         self.rollback()
         raise TransactionManagementError(
             "Transaction managed block ended with pending COMMIT/ROLLBACK")
     self._dirty = False
Exemple #4
0
 def leave_transaction_management(self):
     """
     Leaves transaction management for a running thread. A dirty flag is carried
     over to the surrounding block, as a commit will commit all changes, even
     those from outside. (Commits are on connection level.)
     """
     self._leave_transaction_management(self.is_managed())
     if self.transaction_state:
         del self.transaction_state[-1]
     else:
         raise TransactionManagementError(
             "This code isn't under transaction "
             "management")
     if self._dirty:
         self.rollback()
         raise TransactionManagementError(
             "Transaction managed block ended with "
             "pending COMMIT/ROLLBACK")
     self._dirty = False
Exemple #5
0
 def set_dirty(self):
     """
     Sets a dirty flag for the current thread and code streak. This can be used
     to decide in a managed block of code to decide whether there are open
     changes waiting for commit.
     """
     if self._dirty is not None:
         self._dirty = True
     else:
         raise TransactionManagementError("This code isn't under transaction "
             "management")
Exemple #6
0
 def leave_transaction_management(self):
     """
     On leaving a transaction restore autocommit behavior
     """
     try:
         if self.transaction_state:
             del self.transaction_state[-1]
         else:
             raise TransactionManagementError("This code isn't under transaction "
                                              "management")
         if self._dirty:
             self.rollback()
             raise TransactionManagementError("Transaction managed block ended with "
                                              "pending COMMIT/ROLLBACK")
     except:
         raise
     finally:
         # restore autocommit behavior
         self.connection.setautocommit(auto=True)
     self._dirty = False
Exemple #7
0
 def set_clean(self):
     """
     Resets a dirty flag for the current thread and code streak. This can be used
     to decide in a managed block of code to decide whether a commit or rollback
     should happen.
     """
     if self._dirty is not None:
         self._dirty = False
     else:
         raise TransactionManagementError("This code isn't under transaction management")
     self.clean_savepoints()
Exemple #8
0
 def __enter__(self):
     if len(self.hooks) > 0:
         # Capture a textual description of the hooks to help us understand
         # why this is about to blow oodles of egg custard in our faces.
         description = "\n".join(gen_description_of_hooks(self.hooks))
         # Crash when there are orphaned post-commit hooks. These might
         # only turn up in testing, where transactions are managed by the
         # test framework instead of this decorator. We need to fail hard
         # -- not just warn about it -- to ensure it gets fixed.
         self.reset()
         raise TransactionManagementError(
             "Orphaned post-commit hooks found:\n" + description)
Exemple #9
0
 def test_on_signal__no_transaction(self, partial, on_commit):
     # test with signal_honor_transaction and not in transaction
     event = self.mock_event('x.y', sender_field=None)
     event.signal_honors_transaction = True
     event._on_signal = Mock(name='_on_signal')
     instance = self.Model()
     on_commit.side_effect = TransactionManagementError()
     assert django.TransactionManagementError is TransactionManagementError
     event.on_signal(instance, kw=1)
     partial.assert_called_once_with(event._on_signal, instance, {'kw': 1})
     on_commit.assert_called_once_with(partial())
     partial.return_value.assert_called_once_with()
Exemple #10
0
def pre_commit(func, using=None):
    connection = get_connection(using)

    if connection.in_atomic_block:
        # Transaction in progress; save for execution on commit.
        func_hash = hash(func) if isinstance(func, UniquePreCommitCallable) else None
        if (func_hash is None
                or func_hash not in {func_hash for sids, func_hash, func in connection.run_pre_commit}):
            connection.run_pre_commit.append((set(connection.savepoint_ids), func_hash, func))

    elif not connection.get_autocommit():
        raise TransactionManagementError('pre_commit() cannot be used in manual transaction management')
    else:
        # No transaction in progress and in autocommit mode; execute immediately.
        func()
Exemple #11
0
 def managed(self, flag=True):
     """
     Puts the transaction manager into a manual state: managed transactions have
     to be committed explicitly by the user. If you switch off transaction
     management and there is a pending commit/rollback, the data will be
     commited.
     """
     top = self.transaction_state
     if top:
         top[-1] = flag
         if not flag and self.is_dirty():
             self._commit()
             self.set_clean()
     else:
         raise TransactionManagementError("This code isn't under transaction "
             "management")
    def acquire(self):
        connection = connections[self.db]
        qn = connection.ops.quote_name
        with connection.cursor() as cursor:
            if not connection.get_autocommit():
                raise TransactionManagementError(
                    "InnoDB requires that we not be in a transaction when "
                    "gaining a table lock.")

            # Begin transaction - does 'SET autocommit = 0'
            self._atomic = atomic(using=self.db)
            self._atomic.__enter__()

            locks = ["{} READ".format(qn(name)) for name in self.read]
            for name in self.write:
                locks.append("{} WRITE".format(qn(name)))
            cursor.execute("LOCK TABLES {}".format(", ".join(locks)))
Exemple #13
0
def validate_in_transaction(connection):
    """Ensure that `connection` is within a transaction.

    This only enquires as to Django's perspective on the situation. It does
    not actually check that the database agrees with Django.

    :raise TransactionManagementError: If no transaction is in progress.
    """
    if not in_transaction(connection):
        raise TransactionManagementError(
            # XXX: GavinPanella 2015-08-07 bug=1482563: This error message is
            # specific to lobjects, but this lives in a general utils module.
            "PostgreSQL's large object support demands that all interactions "
            "are done in a transaction. Further, lobject() has been known to "
            "segfault when used outside of a transaction. This assertion has "
            "prevented the use of lobject() outside of a transaction. Please "
            "investigate.")
Exemple #14
0
 def execute(self, sql, params=()):
     """Execute the given SQL statement, with optional parameters."""
     # Don't perform the transactional DDL check if SQL is being collected
     # as it's not going to be executed anyway.
     if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl:
         raise TransactionManagementError(
             "Executing DDL statements while in a transaction on databases "
             "that can't perform a rollback is prohibited."
         )
     # Log the command we're running, then run it
     logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql})
     if self.collect_sql:
         ending = "" if sql.endswith(";") else ";"
         if params is not None:
             self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
         else:
             self.collected_sql.append(sql + ending)
     else:
         with self.connection.cursor() as cursor:
             cursor.execute(sql, params)
Exemple #15
0
 def execute(self, sql, params=(), has_result=False):
     """
     Executes the given SQL statement, with optional parameters.
     """
     result = None
     # Don't perform the transactional DDL check if SQL is being collected
     # as it's not going to be executed anyway.
     if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl:
         raise TransactionManagementError(
             "Executing DDL statements while in a transaction on databases "
             "that can't perform a rollback is prohibited.")
     # Account for non-string statement objects.
     sql = str(sql)
     # Log the command we're running, then run it
     logger.debug("%s; (params %r)",
                  sql,
                  params,
                  extra={
                      'params': params,
                      'sql': sql
                  })
     if self.collect_sql:
         ending = "" if sql.endswith(";") else ";"
         if params is not None:
             self.collected_sql.append(
                 (sql % tuple(map(self.quote_value, params))) + ending)
         else:
             self.collected_sql.append(sql + ending)
     else:
         cursor = self.connection.cursor()
         cursor.execute(sql, params)
         if has_result:
             result = cursor.fetchall()
         # the cursor can be closed only when the driver supports opening
         # multiple cursors on a connection because the migration command
         # has already opened a cursor outside this method
         if self.connection.supports_mars:
             cursor.close()
     return result
Exemple #16
0
def savepoint():
    """Context manager to wrap the code within a savepoint.

    This also enters a savepoint context for post-commit hooks, and so should
    always be used in preference to `transaction.atomic()` when only a
    savepoint is needed.

    If either a transaction or a savepoint within a transaction is what you
    want, use the `transactional` decorator.

    If you want a _decorator_ specifically, use the `transactional` decorator.

    If you want a _savepoint decorator_ specifically, write one, or adapt
    this to do it.

    """
    if connection.in_atomic_block:
        with post_commit_hooks.savepoint():
            with transaction.atomic():
                yield
    else:
        raise TransactionManagementError(
            "Savepoints cannot be created outside of a transaction.")
 def set_rollback(self, rollback):
     if not self.in_atomic_block:
         raise TransactionManagementError(
             "The rollback flag doesn't work outside of an 'atomic' block.")
     self.needs_rollback = rollback
Exemple #18
0
 def validate_no_broken_transaction(self):
     if self.needs_rollback:
         raise TransactionManagementError(
             "An error occurred in the current transaction. You can't "
             "execute queries until the end of the 'atomic' block.")
Exemple #19
0
 def validate_no_atomic_block(self):
     """Raise an error if an atomic block is active."""
     if self.in_atomic_block:
         raise TransactionManagementError(
             "This is forbidden when an 'atomic' block is active.")
Exemple #20
0
    def as_sql(self, with_limits=True, with_col_aliases=False):
        """
        Creates the SQL for this query. Returns the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        if with_limits and self.query.low_mark == self.query.high_mark:
            return '', ()

        self.pre_sql_setup()
        # After executing the query, we must get rid of any joins the query
        # setup created. So, take note of alias counts before the query ran.
        # However we do not want to get rid of stuff done in pre_sql_setup(),
        # as the pre_sql_setup will modify query state in a way that forbids
        # another run of it.
        refcounts_before = self.query.alias_refcount.copy()
        out_cols, s_params = self.get_columns(with_col_aliases)
        ordering, o_params, ordering_group_by = self.get_ordering()

        distinct_fields = self.get_distinct()

        # This must come after 'select', 'ordering' and 'distinct' -- see
        # docstring of get_from_clause() for details.
        from_, f_params = self.get_from_clause()

        where, w_params = self.compile(self.query.where)
        having, h_params = self.compile(self.query.having)
        having_group_by = self.query.having.get_group_by_cols()
        params = []
        for val in six.itervalues(self.query.extra_select):
            params.extend(val[1])

        result = ['SELECT']

        if self.query.distinct:
            result.append(self.connection.ops.distinct_sql(distinct_fields))

        result.append(', '.join(out_cols + self.ordering_aliases))
        params.extend(s_params)
        params.extend(self.ordering_params)

        result.append('FROM')
        result.extend(from_)
        params.extend(f_params)

        if where:
            result.append('WHERE %s' % where)
            params.extend(w_params)

        grouping, gb_params = self.get_grouping(having_group_by,
                                                ordering_group_by)
        if grouping:
            if distinct_fields:
                raise NotImplementedError(
                    "annotate() + distinct(fields) not implemented.")
            if not ordering:
                ordering = self.connection.ops.force_no_ordering()
            result.append('GROUP BY %s' % ', '.join(grouping))
            params.extend(gb_params)

        if having:
            result.append('HAVING %s' % having)
            params.extend(h_params)

        if ordering:
            result.append('ORDER BY %s' % ', '.join(ordering))
            params.extend(o_params)

        if with_limits:
            if self.query.high_mark is not None:
                result.append('LIMIT %d' %
                              (self.query.high_mark - self.query.low_mark))
            if self.query.low_mark:
                if self.query.high_mark is None:
                    val = self.connection.ops.no_limit_value()
                    if val:
                        result.append('LIMIT %d' % val)
                result.append('OFFSET %d' % self.query.low_mark)

        if self.query.select_for_update and self.connection.features.has_select_for_update:
            if self.connection.get_autocommit():
                raise TransactionManagementError(
                    "select_for_update cannot be used outside of a transaction."
                )

            # If we've been asked for a NOWAIT query but the backend does not support it,
            # raise a DatabaseError otherwise we could get an unexpected deadlock.
            nowait = self.query.select_for_update_nowait
            if nowait and not self.connection.features.has_select_for_update_nowait:
                raise DatabaseError(
                    'NOWAIT is not supported on this database backend.')
            result.append(self.connection.ops.for_update_sql(nowait=nowait))

        # Finally do cleanup - get rid of the joins we created above.
        self.query.reset_refcounts(refcounts_before)
        return ' '.join(result), tuple(params)
Exemple #21
0
    def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):  # pylint:disable=arguments-differ
        # the argument `subquery` is only for old Django 1.10
        # pylint:disable=too-many-locals,too-many-branches,too-many-statements
        """
        Creates the SQL for this query. Returns the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        # After executing the query, we must get rid of any joins the query
        # setup created. So, take note of alias counts before the query ran.
        # However we do not want to get rid of stuff done in pre_sql_setup(),
        # as the pre_sql_setup will modify query state in a way that forbids
        # another run of it.
        if with_limits and self.query.low_mark == self.query.high_mark:
            return '', ()
        self.subquery = subquery
        refcounts_before = self.query.alias_refcount.copy()
        soql_trans = self.query_topology()
        try:
            extra_select, order_by, group_by = self.pre_sql_setup()
            if with_limits and self.query.low_mark == self.query.high_mark:
                return '', ()
            distinct_fields = self.get_distinct()

            # This must come after 'select', 'ordering', and 'distinct' -- see
            # docstring of get_from_clause() for details.
            from_, f_params = self.get_from_clause()

            where, w_params = self.compile(
                self.where) if self.where is not None else ("", [])
            having, h_params = self.compile(
                self.having) if self.having is not None else ("", [])
            params = []
            result = ['SELECT']

            if self.query.distinct:
                result.append(
                    self.connection.ops.distinct_sql(distinct_fields))

            out_cols = []
            col_idx = 1
            for _, (s_sql, s_params), alias in self.select + extra_select:
                if alias:
                    # fixed by removing 'AS'
                    s_sql = '%s %s' % (s_sql,
                                       self.connection.ops.quote_name(alias))
                elif with_col_aliases and not isinstance(
                        with_col_aliases,
                        salesforce.backend.base.DatabaseWrapper):
                    s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
                    col_idx += 1
                if soql_trans and re.match(r'^\w+\.\w+$', s_sql):
                    tab_name, col_name = s_sql.split('.')
                    s_sql = '%s.%s' % (soql_trans[tab_name], col_name)
                params.extend(s_params)
                out_cols.append(s_sql)

            result.append(', '.join(out_cols))

            result.append('FROM')
            result.extend(from_)
            params.extend(f_params)

            if where:
                result.append('WHERE %s' % where)
                params.extend(w_params)

            grouping = []
            for g_sql, g_params in group_by:
                grouping.append(g_sql)
                params.extend(g_params)
            if grouping:
                if distinct_fields:
                    raise NotImplementedError(
                        "annotate() + distinct(fields) is not implemented.")
                if not order_by:
                    order_by = self.connection.ops.force_no_ordering()
                result.append('GROUP BY %s' % ', '.join(grouping))

            if having:
                result.append('HAVING %s' % having)
                params.extend(h_params)

            if order_by:
                ordering = []
                for _, (o_sql, o_params, _) in order_by:
                    ordering.append(o_sql)
                    params.extend(o_params)
                result.append('ORDER BY %s' % ', '.join(ordering))

            if with_limits:
                if self.query.high_mark is not None:
                    result.append('LIMIT %d' %
                                  (self.query.high_mark - self.query.low_mark))
                if self.query.low_mark:
                    if self.query.high_mark is None:
                        val = self.connection.ops.no_limit_value()
                        if val:
                            result.append('LIMIT %d' % val)
                    result.append('OFFSET %d' % self.query.low_mark)

            if self.query.select_for_update and self.connection.features.has_select_for_update:
                if self.connection.get_autocommit():
                    raise TransactionManagementError(
                        "select_for_update cannot be used outside of a transaction."
                    )

                # If we've been asked for a NOWAIT query but the backend does
                # not support it, raise a DatabaseError otherwise we could get
                # an unexpected deadlock.
                nowait = self.query.select_for_update_nowait
                if nowait and not self.connection.features.has_select_for_update_nowait:
                    raise DatabaseError(
                        'NOWAIT is not supported on this database backend.')
                result.append(
                    self.connection.ops.for_update_sql(nowait=nowait))

            return ' '.join(result), tuple(params)
        finally:
            # Finally do cleanup - get rid of the joins we created above.
            self.query.reset_refcounts(refcounts_before)
Exemple #22
0
    def as_sql(self, with_limits=True, with_col_aliases=False):
        """
        Create the SQL for this query. Return the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        refcounts_before = self.query.alias_refcount.copy()
        try:
            extra_select, order_by, group_by = self.pre_sql_setup()
            for_update_part = None
            # Is a LIMIT/OFFSET clause needed?
            with_limit_offset = with_limits and (
                self.query.high_mark is not None or self.query.low_mark)
            combinator = self.query.combinator
            features = self.connection.features

            # The do_offset flag indicates whether we need to construct
            # the SQL needed to use limit/offset w/SQL Server.
            high_mark = self.query.high_mark
            low_mark = self.query.low_mark
            do_limit = with_limits and high_mark is not None
            do_offset = with_limits and low_mark != 0
            # SQL Server 2012 or newer supports OFFSET/FETCH clause
            supports_offset_clause = self.connection.sql_server_version >= 2012
            do_offset_emulation = do_offset and not supports_offset_clause

            if combinator:
                if not getattr(features,
                               'supports_select_{}'.format(combinator)):
                    raise NotSupportedError(
                        '{} is not supported on this database backend.'.format(
                            combinator))
                result, params = self.get_combinator_sql(
                    combinator, self.query.combinator_all)
            else:
                distinct_fields, distinct_params = self.get_distinct()
                # This must come after 'select', 'ordering', and 'distinct' -- see
                # docstring of get_from_clause() for details.
                from_, f_params = self.get_from_clause()
                where, w_params = self.compile(
                    self.where) if self.where is not None else ("", [])
                having, h_params = self.compile(
                    self.having) if self.having is not None else ("", [])
                params = []
                result = ['SELECT']

                if self.query.distinct:
                    distinct_result, distinct_params = self.connection.ops.distinct_sql(
                        distinct_fields,
                        distinct_params,
                    )
                    result += distinct_result
                    params += distinct_params

                # SQL Server requires the keword for limitting at the begenning
                if do_limit and not do_offset:
                    result.append('TOP %d' % high_mark)

                out_cols = []
                col_idx = 1
                for _, (s_sql, s_params), alias in self.select + extra_select:
                    if alias:
                        s_sql = '%s AS %s' % (
                            s_sql, self.connection.ops.quote_name(alias))
                    elif with_col_aliases or do_offset_emulation:
                        s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
                        col_idx += 1
                    params.extend(s_params)
                    out_cols.append(s_sql)

                # SQL Server requires an order-by clause for offsetting
                if do_offset:
                    meta = self.query.get_meta()
                    qn = self.quote_name_unless_alias
                    offsetting_order_by = '%s.%s' % (qn(
                        meta.db_table), qn(meta.pk.db_column
                                           or meta.pk.column))
                    if do_offset_emulation:
                        if order_by:
                            ordering = []
                            for expr, (o_sql, o_params, _) in order_by:
                                # value_expression in OVER clause cannot refer to
                                # expressions or aliases in the select list. See:
                                # http://msdn.microsoft.com/en-us/library/ms189461.aspx
                                src = next(iter(expr.get_source_expressions()))
                                if isinstance(src, Ref):
                                    src = next(
                                        iter(src.get_source_expressions()))
                                    o_sql, _ = src.as_sql(
                                        self, self.connection)
                                    odir = 'DESC' if expr.descending else 'ASC'
                                    o_sql = '%s %s' % (o_sql, odir)
                                ordering.append(o_sql)
                                params.extend(o_params)
                            offsetting_order_by = ', '.join(ordering)
                            order_by = []
                        out_cols.append(
                            'ROW_NUMBER() OVER (ORDER BY %s) AS [rn]' %
                            offsetting_order_by)
                    elif not order_by:
                        order_by.append(
                            ((None, ('%s ASC' % offsetting_order_by, [],
                                     None))))

                if self.query.select_for_update and self.connection.features.has_select_for_update:
                    if self.connection.get_autocommit():
                        raise TransactionManagementError(
                            'select_for_update cannot be used outside of a transaction.'
                        )

                    if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
                        raise NotSupportedError(
                            'LIMIT/OFFSET is not supported with '
                            'select_for_update on this database backend.')
                    nowait = self.query.select_for_update_nowait
                    skip_locked = self.query.select_for_update_skip_locked
                    of = self.query.select_for_update_of
                    # If it's a NOWAIT/SKIP LOCKED/OF query but the backend
                    # doesn't support it, raise NotSupportedError to prevent a
                    # possible deadlock.
                    if nowait and not self.connection.features.has_select_for_update_nowait:
                        raise NotSupportedError(
                            'NOWAIT is not supported on this database backend.'
                        )
                    elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
                        raise NotSupportedError(
                            'SKIP LOCKED is not supported on this database backend.'
                        )
                    elif of and not self.connection.features.has_select_for_update_of:
                        raise NotSupportedError(
                            'FOR UPDATE OF is not supported on this database backend.'
                        )
                    for_update_part = self.connection.ops.for_update_sql(
                        nowait=nowait,
                        skip_locked=skip_locked,
                        of=self.get_select_for_update_of_arguments(),
                    )

                if for_update_part and self.connection.features.for_update_after_from:
                    from_.insert(1, for_update_part)

                result += [', '.join(out_cols), 'FROM', *from_]
                params.extend(f_params)

                if where:
                    result.append('WHERE %s' % where)
                    params.extend(w_params)

                grouping = []
                for g_sql, g_params in group_by:
                    grouping.append(g_sql)
                    params.extend(g_params)
                if grouping:
                    if distinct_fields:
                        raise NotImplementedError(
                            'annotate() + distinct(fields) is not implemented.'
                        )
                    order_by = order_by or self.connection.ops.force_no_ordering(
                    )
                    result.append('GROUP BY %s' % ', '.join(grouping))

                if having:
                    result.append('HAVING %s' % having)
                    params.extend(h_params)

            if self.query.explain_query:
                result.insert(
                    0,
                    self.connection.ops.explain_query_prefix(
                        self.query.explain_format,
                        **self.query.explain_options))

            if order_by:
                ordering = []
                for _, (o_sql, o_params, _) in order_by:
                    ordering.append(o_sql)
                    params.extend(o_params)
                result.append('ORDER BY %s' % ', '.join(ordering))

            # SQL Server requires the backend-specific emulation (2008 or earlier)
            # or an offset clause (2012 or newer) for offsetting
            if do_offset:
                if do_offset_emulation:
                    # Construct the final SQL clause, using the initial select SQL
                    # obtained above.
                    result = [
                        'SELECT * FROM (%s) AS X WHERE X.rn' % ' '.join(result)
                    ]
                    # Place WHERE condition on `rn` for the desired range.
                    if do_limit:
                        result.append('BETWEEN %d AND %d' %
                                      (low_mark + 1, high_mark))
                    else:
                        result.append('>= %d' % (low_mark + 1))
                    if not self.query.subquery:
                        result.append('ORDER BY X.rn')
                else:
                    result.append(
                        self.connection.ops.limit_offset_sql(
                            self.query.low_mark, self.query.high_mark))

            if self.query.subquery and extra_select:
                # If the query is used as a subquery, the extra selects would
                # result in more columns than the left-hand side expression is
                # expecting. This can happen when a subquery uses a combination
                # of order_by() and distinct(), forcing the ordering expressions
                # to be selected as well. Wrap the query in another subquery
                # to exclude extraneous selects.
                sub_selects = []
                sub_params = []
                for index, (select, _, alias) in enumerate(self.select,
                                                           start=1):
                    if not alias and with_col_aliases:
                        alias = 'col%d' % index
                    if alias:
                        sub_selects.append("%s.%s" % (
                            self.connection.ops.quote_name('subquery'),
                            self.connection.ops.quote_name(alias),
                        ))
                    else:
                        select_clone = select.relabeled_clone(
                            {select.alias: 'subquery'})
                        subselect, subparams = select_clone.as_sql(
                            self, self.connection)
                        sub_selects.append(subselect)
                        sub_params.extend(subparams)
                return 'SELECT %s FROM (%s) subquery' % (
                    ', '.join(sub_selects),
                    ' '.join(result),
                ), tuple(sub_params + params)

            return ' '.join(result), tuple(params)
        finally:
            # Finally do cleanup - get rid of the joins we created above.
            self.query.reset_refcounts(refcounts_before)
Exemple #23
0
def populate_tags(tag):
    """Evaluate `tag` for all nodes.

    This returns a `Deferred` that will fire when all tags have been
    evaluated. The return value is intended FOR TESTING ONLY because:

    - You must not use the `Deferred` in the calling thread; it must only be
      manipulated in the reactor thread. Pretending it's not there is safer
      than chaining code onto it because it's easy to get wrong.

    - The call may not finish for 10 minutes or more. It is therefore not a
      good thing to be waiting for in a web request.

    """
    # This function cannot be called inside a transaction. The function manages
    # its own transaction.
    if in_transaction():
        raise TransactionManagementError(
            "`populate_tags` cannot be called inside an existing transaction.")

    logger.debug('Evaluating the "%s" tag for all nodes.', tag.name)

    clients = getAllClients()
    if len(clients) == 0:
        # We have no clients so we need to do the work locally.
        @transactional
        def _populate_tag():
            return populate_tag_for_multiple_nodes(tag, Node.objects.all())

        return _populate_tag()
    else:
        # Split the work between the connected rack controllers.
        @transactional
        def _generate_work():
            node_ids = Node.objects.all().values_list("system_id", flat=True)
            node_ids = [{"system_id": node_id} for node_id in node_ids]
            chunked_node_ids = list(chunk_list(node_ids, len(clients)))
            connected_racks = []
            for idx, client in enumerate(clients):
                rack = RackController.objects.get(system_id=client.ident)
                token = _get_or_create_auth_token(rack.owner)
                creds = convert_tuple_to_string(get_creds_tuple(token))
                if len(chunked_node_ids) > idx:
                    connected_racks.append({
                        "system_id":
                        rack.system_id,
                        "hostname":
                        rack.hostname,
                        "client":
                        client,
                        "tag_name":
                        tag.name,
                        "tag_definition":
                        tag.definition,
                        "tag_nsmap": [{
                            "prefix": prefix,
                            "uri": uri
                        } for prefix, uri in tag_nsmap.items()],
                        "credentials":
                        creds,
                        "nodes":
                        list(chunked_node_ids[idx]),
                    })
            return connected_racks

        return _do_populate_tags(_generate_work())
Exemple #24
0
        self.autocommit = autocommit

        if autocommit and self.run_commit_hooks_on_set_autocommit_on:
            self.run_and_clear_commit_hooks()
            self.run_commit_hooks_on_set_autocommit_on = False

    def get_rollback(self):
<<<<<<< HEAD
        """
        Get the "needs rollback" flag -- for *advanced use* only.
        """
=======
        """Get the "needs rollback" flag -- for *advanced use* only."""
>>>>>>> 37c99181c9a6b95433d60f8c8ef9af5731096435
        if not self.in_atomic_block:
            raise TransactionManagementError(
                "The rollback flag doesn't work outside of an 'atomic' block.")
        return self.needs_rollback

    def set_rollback(self, rollback):
        """
        Set or unset the "needs rollback" flag -- for *advanced use* only.
        """
        if not self.in_atomic_block:
            raise TransactionManagementError(
                "The rollback flag doesn't work outside of an 'atomic' block.")
        self.needs_rollback = rollback

    def validate_no_atomic_block(self):
<<<<<<< HEAD
        """
        Raise an error if an atomic block is active.
Exemple #25
0
    def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
        """
        Creates the SQL for this query. Returns the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        # After executing the query, we must get rid of any joins the query
        # setup created. So, take note of alias counts before the query ran.
        # However we do not want to get rid of stuff done in pre_sql_setup(),
        # as the pre_sql_setup will modify query state in a way that forbids
        # another run of it.
        self.subquery = subquery
        refcounts_before = self.query.alias_refcount.copy()
        try:
            extra_select, order_by, group_by = self.pre_sql_setup()
            if with_limits and self.query.low_mark == self.query.high_mark:
                return '', ()

            # The do_offset flag indicates whether we need to construct
            # the SQL needed to use limit/offset w/SQL Server.
            high_mark = self.query.high_mark
            low_mark = self.query.low_mark
            do_limit = with_limits and high_mark is not None
            do_offset = with_limits and low_mark != 0
            # SQL Server 2012 or newer supports OFFSET/FETCH clause
            supports_offset_clause = self.connection.sql_server_version >= 2012
            do_offset_emulation = do_offset and not supports_offset_clause

            distinct_fields = self.get_distinct()

            # This must come after 'select', 'ordering', and 'distinct' -- see
            # docstring of get_from_clause() for details.
            from_, f_params = self.get_from_clause()

            where, w_params = self.compile(self.query.where)
            having, h_params = self.compile(self.query.having)
            params = []
            result = ['SELECT']

            if self.query.distinct:
                result.append(self.connection.ops.distinct_sql(distinct_fields))

            # SQL Server requires the keword for limitting at the begenning
            if do_limit and not do_offset:
                result.append('TOP %d' % high_mark)

            out_cols = []
            col_idx = 1
            for _, (s_sql, s_params), alias in self.select + extra_select:
                if alias:
                    s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
                elif with_col_aliases or do_offset_emulation:
                    s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
                    col_idx += 1
                params.extend(s_params)
                out_cols.append(s_sql)

            # SQL Server requires an order-by clause for offsetting
            if do_offset:
                meta = self.query.get_meta()
                qn = self.quote_name_unless_alias
                offsetting_order_by = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
                if do_offset_emulation:
                    if order_by:
                        ordering = []
                        for expr, (o_sql, o_params, _) in order_by:
                            # value_expression in OVER clause cannot refer to
                            # expressions or aliases in the select list. See:
                            # http://msdn.microsoft.com/en-us/library/ms189461.aspx
                            src = next(iter(expr.get_source_expressions()))
                            if isinstance(src, Ref):
                                src = next(iter(src.get_source_expressions()))
                                o_sql, _  = src.as_sql(self, self.connection)
                                odir = 'DESC' if expr.descending else 'ASC'
                                o_sql = '%s %s' % (o_sql, odir)
                            ordering.append(o_sql)
                            params.extend(o_params)
                        offsetting_order_by = ', '.join(ordering)
                        order_by = []
                    out_cols.append('ROW_NUMBER() OVER (ORDER BY %s) AS [rn]' % offsetting_order_by)
                elif not order_by:
                    order_by.append(((None, ('%s ASC' % offsetting_order_by, [], None))))

            result.append(', '.join(out_cols))

            result.append('FROM')
            result.extend(from_)
            params.extend(f_params)

            if self.query.select_for_update and self.connection.features.has_select_for_update:
                if self.connection.get_autocommit():
                    raise TransactionManagementError(
                        "select_for_update cannot be used outside of a transaction."
                    )

                # If we've been asked for a NOWAIT query but the backend does
                # not support it, raise a DatabaseError otherwise we could get
                # an unexpected deadlock.
                nowait = self.query.select_for_update_nowait
                if nowait and not self.connection.features.has_select_for_update_nowait:
                    raise DatabaseError('NOWAIT is not supported on this database backend.')
                result.append(self.connection.ops.for_update_sql(nowait=nowait))

            if where:
                result.append('WHERE %s' % where)
                params.extend(w_params)

            grouping = []
            for g_sql, g_params in group_by:
                grouping.append(g_sql)
                params.extend(g_params)
            if grouping:
                if distinct_fields:
                    raise NotImplementedError(
                        "annotate() + distinct(fields) is not implemented.")
                if not order_by:
                    order_by = self.connection.ops.force_no_ordering()
                result.append('GROUP BY %s' % ', '.join(grouping))

            if having:
                result.append('HAVING %s' % having)
                params.extend(h_params)

            if order_by:
                ordering = []
                for _, (o_sql, o_params, _) in order_by:
                    ordering.append(o_sql)
                    params.extend(o_params)
                result.append('ORDER BY %s' % ', '.join(ordering))

            # SQL Server requires the backend-specific emulation (2008 or earlier)
            # or an offset clause (2012 or newer) for offsetting
            if do_offset:
                if do_offset_emulation:
                    # Construct the final SQL clause, using the initial select SQL
                    # obtained above.
                    result = ['SELECT * FROM (%s) AS X WHERE X.rn' % ' '.join(result)]
                    # Place WHERE condition on `rn` for the desired range.
                    if do_limit:
                        result.append('BETWEEN %d AND %d' % (low_mark+1, high_mark))
                    else:
                        result.append('>= %d' % (low_mark+1))
                    if not subquery:
                        result.append('ORDER BY X.rn')
                else:
                    result.append('OFFSET %d ROWS' % low_mark)
                    if do_limit:
                        result.append('FETCH FIRST %d ROWS ONLY' % (high_mark - low_mark))

            return ' '.join(result), tuple(params)
        finally:
            # Finally do cleanup - get rid of the joins we created above.
            self.query.reset_refcounts(refcounts_before)
Exemple #26
0
    def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
        """
        Creates the SQL for this query. Returns the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        if with_limits and self.query.low_mark == self.query.high_mark:
            return '', ()
        self.subquery = subquery
        refcounts_before = self.query.alias_refcount.copy()
        try:
            extra_select, order_by, group_by = self.pre_sql_setup()
            if with_limits and self.query.low_mark == self.query.high_mark:
                return '', ()
            distinct_fields = self.get_distinct()

            # This must come after 'select', 'ordering', and 'distinct' -- see
            # docstring of get_from_clause() for details.
            from_, f_params = self.get_from_clause()

            where, w_params = self.compile(
                self.where) if self.where is not None else ("", [])
            having, h_params = self.compile(
                self.having) if self.having is not None else ("", [])
            params = []
            result = ['SELECT']

            if self.query.distinct:
                result.append(
                    self.connection.ops.distinct_sql(distinct_fields))

            out_cols = []
            col_idx = 1
            for _, (s_sql, s_params), alias in self.select + extra_select:
                if alias:
                    s_sql = '%s AS %s' % (
                        s_sql, self.connection.ops.quote_name(alias))
                elif with_col_aliases:
                    s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
                    col_idx += 1
                params.extend(s_params)
                out_cols.append(s_sql)

            result.append(', '.join(out_cols))

            result.append('FROM')
            result.extend(from_)
            params.extend(f_params)

            if where:
                result.append('WHERE %s' % where)
                params.extend(w_params)

            grouping = []
            for g_sql, g_params in group_by:
                grouping.append(g_sql)
                params.extend(g_params)
            if grouping:
                if distinct_fields:
                    raise NotImplementedError(
                        "annotate() + distinct(fields) is not implemented.")
                if not order_by:
                    order_by = self.connection.ops.force_no_ordering()
                result.append('GROUP BY %s' % ', '.join(grouping))

            if having:
                result.append('HAVING %s' % having)
                params.extend(h_params)

            if order_by:
                ordering = []
                for _, (o_sql, o_params, _) in order_by:
                    ordering.append(o_sql)
                    params.extend(o_params)
                result.append('ORDER BY %s' % ', '.join(ordering))

            if with_limits:
                if self.query.high_mark is not None:
                    result.append('LIMIT %d' %
                                  (self.query.high_mark - self.query.low_mark))
                if self.query.low_mark:
                    if self.query.high_mark is None:
                        val = self.connection.ops.no_limit_value()
                        if val:
                            result.append('LIMIT %d' % val)
                    result.append('OFFSET %d' % self.query.low_mark)

            if self.query.select_for_update and self.connection.features.has_select_for_update:
                if self.connection.get_autocommit():
                    raise TransactionManagementError(
                        "select_for_update cannot be used outside of a transaction."
                    )

                # If we've been asked for a NOWAIT query but the backend does
                # not support it, raise a DatabaseError otherwise we could get
                # an unexpected deadlock.
                nowait = self.query.select_for_update_nowait
                if nowait and not self.connection.features.has_select_for_update_nowait:
                    raise DatabaseError(
                        'NOWAIT is not supported on this database backend.')
                result.append(
                    self.connection.ops.for_update_sql(nowait=nowait))

            return ' '.join(result), tuple(params)
        finally:
            # Finally do cleanup - get rid of the joins we created above.
            self.query.reset_refcounts(refcounts_before)
Exemple #27
0
        def side_effect(*args, **kwargs):
            # This could occur if a slightly earlier POST or PUT still had
            # the database locked during a DB transaction.
            from django.db.transaction import TransactionManagementError

            raise TransactionManagementError()
Exemple #28
0
 def __enter__(self):
     if get_connection(self.using).in_atomic_block:
         raise TransactionManagementError(
             "Cannot use DurableAtomic inside Atomic")
     super(DurableAtomic, self).__enter__()
Exemple #29
0
    def run_sql(self, sql, capture=False, execute=False):
        """Run (execute and/or capture) a list of SQL statements.

        Args:
            sql (list):
                A list of SQL statements. Each entry might be a string, a
                tuple consisting of a format string and formatting arguments,
                or a subclass of :py:class:`BaseGroupedSQL`, or a callable
                that returns a list of the above.

            capture (bool, optional):
                Whether to capture any processed SQL statements.

            execute (bool, optional):
                Whether to execute any executed SQL statements and return them.

        Returns:
            list of unicode:
            The list of SQL statements executed, if passing
            ``capture=True``. Otherwise, this will just be an empty list.

        Raises:
            django.db.transaction.TransactionManagementError:
                Could not execute a batch of SQL statements inside of an
                existing transaction.
        """
        qp = self._evolver_backend.quote_sql_param
        cursor = self._cursor

        statement = None
        params = None
        out_sql = []

        try:
            batches = self._prepare_transaction_batches(self._prepare_sql(sql))

            if execute and self._connection.in_atomic_block:
                # Check if there are any statements that must run outside of
                # a transaction.
                batches = list(batches)

                for batch, use_transaction in batches:
                    if not use_transaction:
                        logging.error(
                            'Unable to execute the following SQL inside of a '
                            'transaction: %r', batch)

                        raise TransactionManagementError(
                            'Unable to execute SQL inside of an existing '
                            'transaction. See the logging for more '
                            'information.')

            for i, (batch, use_transaction) in enumerate(batches):
                if execute:
                    if use_transaction:
                        self.new_transaction()
                    else:
                        self.finish_transaction()

                if capture and i > 0:
                    if use_transaction:
                        out_sql.append('-- Start of a new transaction:')
                    else:
                        out_sql.append('-- Run outside of a transaction:')

                for statement, params in batch:
                    if capture:
                        if params:
                            out_sql.append(
                                statement %
                                tuple(qp(param) for param in params))
                        else:
                            out_sql.append(statement)

                    if execute:
                        cursor.execute(statement, params)
        except Exception as e:
            # Augment the exception so that callers can get the SQL statement
            # that failed.
            e.last_sql_statement = (statement, params)

            raise

        return out_sql
    def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
        """
        Creates the SQL for this query. Returns the SQL string and list of
        parameters.

        If 'with_limits' is False, any limit/offset information is not included
        in the query.
        """
        self.subquery = subquery
        refcounts_before = self.query.alias_refcount.copy()

        try:
            extra_select, order_by, group_by = self.pre_sql_setup()
            distinct_fields = self.get_distinct()
            from_, f_params = self.get_from_clause()
            where, w_params = self.compile(
                self.where) if self.where is not None else ("", [])
            having, h_params = self.compile(
                self.having) if self.having is not None else ("", [])

            params = []
            result = ['FOR', ITEM_ALIAS, 'IN']
            result.extend(from_)

            # Append the FILTER (sql where).
            if where:
                result.append('FILTER')
                # quoted_params = quote_params(w_params)
                # where_partial = where % tuple(quoted_params)
                where_partial = where % tuple(w_params)
                result.append(where_partial)

                # result.append('FILTER %s ' % where % '"%s"' % w_params[0])  # FIXME: Looks like a hack right now...
                params.extend(w_params)

            result.append('RETURN')

            if self.query.distinct:
                result.append(
                    self.connection.ops.distinct_sql(distinct_fields))

            out_cols = {}
            col_idx = 1

            for _, (s_sql, s_params), alias in self.select + extra_select:
                if alias:
                    s_sql = '%s AS %s' % (
                        s_sql, self.connection.ops.quote_name(alias))
                elif with_col_aliases:
                    s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
                    col_idx += 1
                params.extend(s_params)

                # Prepare the dict output ITEM_ALIAS.
                field_name = s_sql.split('.')[
                    1]  # TODO: The alias is used here.
                prepared_s_sql = ITEM_ALIAS + '.' + field_name
                out_cols[field_name] = prepared_s_sql

            # Transform the dict into a AQL return object.
            out_cols = str(out_cols).replace("'", "")
            result.append(out_cols)
            params.extend(f_params)

            for_update_part = None
            if self.query.select_for_update and self.connection.features.has_select_for_update:
                if self.connection.get_autocommit():
                    raise TransactionManagementError(
                        "select_for_update cannot be used outside of a transaction."
                    )

                nowait = self.query.select_for_update_nowait
                skip_locked = self.query.select_for_update_skip_locked
                # If it's a NOWAIT/SKIP LOCKED query but the backend doesn't
                # support it, raise a DatabaseError to prevent a possible
                # deadlock.
                if nowait and not self.connection.features.has_select_for_update_nowait:
                    raise DatabaseError(
                        'NOWAIT is not supported on this database backend.')
                elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
                    raise DatabaseError(
                        'SKIP LOCKED is not supported on this database backend.'
                    )
                for_update_part = self.connection.ops.for_update_sql(
                    nowait=nowait, skip_locked=skip_locked)

            if for_update_part and self.connection.features.for_update_after_from:
                result.append(for_update_part)

            grouping = []
            for g_sql, g_params in group_by:
                grouping.append(g_sql)
                params.extend(g_params)
            if grouping:
                if distinct_fields:
                    raise NotImplementedError(
                        "annotate() + distinct(fields) is not implemented.")
                if not order_by:
                    order_by = self.connection.ops.force_no_ordering()
                result.append('GROUP BY %s' % ', '.join(grouping))

            if having:
                result.append('HAVING %s' % having)
                params.extend(h_params)

            if order_by:
                ordering = []
                for _, (o_sql, o_params, _) in order_by:
                    ordering.append(o_sql)
                    params.extend(o_params)
                result.append('ORDER BY %s' % ', '.join(ordering))

            if with_limits:
                if self.query.high_mark is not None:
                    result.append('LIMIT %d' %
                                  (self.query.high_mark - self.query.low_mark))
                if self.query.low_mark:
                    if self.query.high_mark is None:
                        val = self.connection.ops.no_limit_value()
                        if val:
                            result.append('LIMIT %d' % val)
                    result.append('OFFSET %d' % self.query.low_mark)

            if for_update_part and not self.connection.features.for_update_after_from:
                result.append(for_update_part)

            result = ' '.join(result), tuple(params)
            return result
        finally:
            # Finally do cleanup - get rid of the joins we created above.
            self.query.reset_refcounts(refcounts_before)