Пример #1
0
def run_commit_hooks():
    """
    Fake transaction commit to run delayed on_commit functions
    https://medium.com/gitux/speed-up-django-transaction-hooks-tests-6de4a558ef96
    """
    with mock.patch(
            "django.db.backends.base.base.BaseDatabaseWrapper.validate_no_atomic_block",
            lambda a: False,
    ):
        transaction.get_connection().run_and_clear_commit_hooks()
Пример #2
0
def run_commit_hooks(testcase):
    """
    Fake transaction commit to run delayed on_commit functions
    :return:
    """
    import mock

    for db_name in reversed(testcase._databases_names()):
        with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.validate_no_atomic_block', lambda a: False):
            transaction.get_connection(using=db_name).run_and_clear_commit_hooks()
 def run_commit_hooks(self):
     """
     Fake transaction commit to run delayed on_commit functions
     https://medium.com/gitux/speed-up-django-transaction-hooks-tests-6de4a558ef96
     """
     for db_name in reversed(self._databases_names()):
         with mock.patch(
                 'django.db.backends.base.base.BaseDatabaseWrapper.'
                 'validate_no_atomic_block', lambda a: False):
             transaction.get_connection(
                 using=db_name, ).run_and_clear_commit_hooks()
Пример #4
0
    def run_commit_hooks(self):
        """
        This is to force on_commit and other commit hooks. Because, in testcases,
        those hooks are not run as whole test case is wrapped by outer transaction.

        Source: https://medium.com/gitux/speed-up-django-transaction-hooks-tests-6de4a558ef96
        """
        for db_name in reversed(self._databases_names()):
            with mock.patch(
                    'django.db.backends.base.base.BaseDatabaseWrapper.validate_no_atomic_block',
                    lambda a: False):
                transaction.get_connection(
                    using=db_name).run_and_clear_commit_hooks()
Пример #5
0
    def __enter__(self):

        connection = transaction.get_connection(self.using)

        cache = get_cache(OUTER_ATOMIC_CACHE_NAME)

        # By default it is enabled.
        enable = True
        # If name is set it is only enabled if requested by calling enable_named_outer_atomic().
        if self.name:
            enable = cache.get(self.name, False)

        if enable:
            # TestCase setup nests tests in two atomics - one for the test class and one for the individual test.
            # The outermost atomic starts a transaction - so does not have a savepoint.
            # The inner atomic starts a savepoint around the test.
            # So, for tests only, there should be exactly one savepoint_id and two atomic_for_testcase_calls.
            # atomic_for_testcase_calls below is added in a monkey-patch for tests only.
            if self.ALLOW_NESTED and (self.atomic_for_testcase_calls - len(connection.savepoint_ids)) < 1:  # lint-amnesty, pylint: disable=no-member
                raise transaction.TransactionManagementError('Cannot be inside an atomic block.')

            # Otherwise, this shouldn't be nested in any atomic block.
            if not self.ALLOW_NESTED and connection.in_atomic_block:
                raise transaction.TransactionManagementError('Cannot be inside an atomic block.')

        super().__enter__()
Пример #6
0
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).

        celery_eager = _get_celery_settings('CELERY_ALWAYS_EAGER')

        # New setting to run eager task post transaction
        # defaults to `not CELERY_ALWAYS_EAGER`
        eager_transaction = _get_celery_settings('CELERY_EAGER_TRANSACTION',
                                                 not celery_eager)

        if django.VERSION < (1, 6):

            if transaction.is_managed() and eager_transaction:
                if not transaction.is_dirty():
                    # Always mark the transaction as dirty
                    # because we push task in queue that must be fired or discarded
                    if 'using' in kwargs:
                        transaction.set_dirty(using=kwargs['using'])
                    else:
                        transaction.set_dirty()
                _get_task_queue().append((self, args, kwargs))
            else:
                apply_async_orig = super(PostTransactionTask, self).apply_async
                return apply_async_orig(*args, **kwargs)

        else:

            connection = get_connection()
            if connection.in_atomic_block and eager_transaction:
                _get_task_queue().append((self, args, kwargs))
            else:
                return self.original_apply_async(*args, **kwargs)
Пример #7
0
    def test_always_rollback(self, _email_user):
        connection = transaction.get_connection()
        with patch.object(connection, 'rollback', wraps=connection.rollback) as mock_rollback:
            with self.assertRaises(TestException):
                confirm_email_change(self.request, self.key)

            mock_rollback.assert_called_with()
Пример #8
0
 def __enter__(self, *args, **kwargs):
     connection = transaction.get_connection(self.using)
     connection._setlistspy_celery_atomic = True
     if not hasattr(connection, '_setlistspy_celery_on_commit'):
         connection._setlistspy_celery_on_commit = []
     connection._setlistspy_celery_on_commit.append([])
     super().__enter__(*args, **kwargs)
Пример #9
0
def table(request, database, table, page, per_page):
    # TODO: unit test
    conn = get_connection(using=database)
    cursor = conn.cursor()

    cursor.execute("""SELECT COUNT('id') FROM {table};""".format(table=table))
    total_count = cursor.fetchone()[0]

    offset = (page - 1) * per_page
    to = min(offset + per_page, total_count)
    count = to - offset

    cursor.execute(
        """SELECT * FROM {table} OFFSET {offset} LIMIT {limit};""".format(
            table=table, offset=offset, limit=per_page))

    return JsonResponse(
        data={
            'page':
            page,
            'total_page':
            1 + (total_count // per_page),
            'from':
            offset + 1,
            'to':
            to,
            'count':
            count,
            'total_count':
            total_count,
            'columns': [column.name for column in cursor.description],
            'results': [[str(column) for column in record]
                        for record in cursor.fetchall()]
        })
    def __enter__(self):
        connection = transaction.get_connection(self.using)

        if not connection.in_atomic_block:
            # Reset state when entering an outermost atomic block.
            connection.commit_on_exit = True
            connection.needs_rollback = False
            if not connection.get_autocommit():
                # Pretend we're already in an atomic block to bypass the code
                # that disables autocommit to enter a transaction, and make a
                # note to deal with this case in __exit__.
                connection.in_atomic_block = True
                connection.commit_on_exit = False

        if connection.in_atomic_block:
            # We're already in a transaction; create a savepoint, unless we
            # were told not to or we're already waiting for a rollback. The
            # second condition avoids creating useless savepoints and prevents
            # overwriting needs_rollback until the rollback is performed.
            if self.savepoint and not connection.needs_rollback:
                sid = connection.savepoint()
                connection.savepoint_ids.append(sid)
            else:
                connection.savepoint_ids.append(None)
        else:
            if self.immediate:
                connection.set_autocommit(False)
                connection.cursor().execute('BEGIN IMMEDIATE')

            else:
                connection.set_autocommit(
                    False, force_begin_transaction_with_broken_autocommit=True)

            connection.in_atomic_block = True
 def inner(self, exc_type, exc_value, traceback):
     needs_rollback = get_connection(self.using).needs_rollback
     try:
         original(self, exc_type, exc_value, traceback)
     finally:
         cachalot_caches.exit_atomic(
             self.using, exc_type is None and not needs_rollback)
Пример #12
0
 def _execute_processing_script(self,
                                base_module,
                                script_file_path,
                                sql_params,
                                sql_renderer=DEFAULT_SQL_RENDERER_METHOD):
     conn = transaction.get_connection()
     sql = pkgutil.get_data(base_module, script_file_path).decode("utf-8")
     for sql_stmt in sql_split(sql):
         sql_stmt = sql_stmt.strip()
         if sql_stmt:
             sql_stmt, params = sql_renderer(sql_stmt, sql_params)
             with conn.cursor() as cur:
                 try:
                     cur.execute(sql_stmt, params)
                 except (ProgrammingError, IndexError) as exc:
                     if isinstance(sql_stmt, bytes):
                         sql_stmt = sql_stmt.decode("utf-8")
                     msg = [
                         f"ERROR in SQL statement: '{exc}'",
                         f"Script file {os.path.join(base_module.replace('.', os.path.sep), script_file_path)}",
                         f"STATEMENT: {sql_stmt}",
                         f"PARAMS: {params}",
                         f"INPUT_PARAMS: {sql_params}",
                     ]
                     LOG.error(os.linesep.join(msg))
                     raise exc
Пример #13
0
 def _inner(*args, **kwargs):
     connection = transaction.get_connection(using=using)
     with connection.cursor() as c:
         c.execute('SET session_replication_role = replica')
         ret = func(*args, **kwargs)
         c.execute('SET session_replication_role = DEFAULT')
     return ret
Пример #14
0
 def __exit__(self, exc_type, exc_value, traceback):
     connection = get_connection(self.using)
     output_logged_requests = connection.output_logged_requests.pop()
     if connection.output_logged_requests:
         connection.output_logged_requests[-1] += output_logged_requests
     else:
         [output_logged_request.create() for output_logged_request in output_logged_requests]
Пример #15
0
def check_select_for_update(request=None):
    if not settings.SELECT_FOR_UPDATE_ENABLED:
        return False
    atomic_transaction = transaction.get_connection().in_atomic_block
    if request:
        return request.method not in SAFE_METHODS and atomic_transaction
    return atomic_transaction
Пример #16
0
    def _handle_related(self, instance) -> None:
        """
        Handle related instances changing by sending a group of tasks,
        assuming 'get_instances_from_related' document method always returns list of ids or None
        """

        if not DEDConfig.autosync_enabled():
            return

        sync_group = []
        for doc in registry._get_related_doc(instance):
            doc_instance = doc(related_instance_to_ignore=instance)
            related_model = doc_instance.Django.model
            if (not self._is_sync_allowed(related_model) or not isinstance(
                    instance, doc_instance.Django.related_models)):
                continue
            related = doc_instance.get_instances_from_related(instance)
            if related is None:
                continue
            task = related_model.get_sync_task()
            sync_group += [task.s(obj_id) for obj_id in related]
        if not sync_group:
            return
        sync_group = group(sync_group)
        connection = transaction.get_connection()
        if not connection.in_atomic_block:
            sync_group()
        else:
            transaction.on_commit(lambda: sync_group())
Пример #17
0
    def __enter__(self):
        connection = get_connection(self.using)

        if not hasattr(connection, 'transaction_signals_context_list'):
            connection.transaction_signals_context_list = []

        connection.transaction_signals_context_list.append(TransactionSignalsContext())
Пример #18
0
def _rebuild_kb_chunk(data):
    """Re-render a chunk of documents.

    Note: Don't use host components when making redirects to wiki pages; those
    redirects won't be auto-pruned when they're 404s.

    """
    log.info('Rebuilding %s documents.' % len(data))

    pin_this_thread()  # Stick to master.

    messages = []
    start = time.time()
    for pk in data:
        message = None
        try:
            document = Document.objects.get(pk=pk)

            # If we know a redirect link to be broken (i.e. if it looks like a
            # link to a document but the document isn't there), log an error:
            url = document.redirect_url()
            if (url and points_to_document_view(url) and
                    not document.redirect_document()):
                log.warn('Invalid redirect document: %d' % pk)

            html = document.parse_and_calculate_links()
            if document.html != html:
                # We are calling update here to so we only update the html
                # column instead of all of them. This bypasses post_save
                # signal handlers like the one that triggers reindexing.
                # See bug 797038 and bug 797352.
                Document.objects.filter(pk=pk).update(html=html)
                statsd.incr('wiki.rebuild_chunk.change')
            else:
                statsd.incr('wiki.rebuild_chunk.nochange')
        except Document.DoesNotExist:
            message = 'Missing document: %d' % pk
        except Revision.DoesNotExist:
            message = 'Missing revision for document: %d' % pk
        except ValidationError as e:
            message = 'ValidationError for %d: %s' % (pk, e.messages[0])
        except SlugCollision:
            message = 'SlugCollision: %d' % pk
        except TitleCollision:
            message = 'TitleCollision: %d' % pk

        if message:
            log.debug(message)
            messages.append(message)
    d = time.time() - start
    statsd.timing('wiki.rebuild_chunk', int(round(d * 1000)))

    if messages:
        subject = ('[%s] Exceptions raised in _rebuild_kb_chunk()' %
                   settings.PLATFORM_NAME)
        mail_admins(subject=subject, message='\n'.join(messages))
    if not transaction.get_connection().in_atomic_block:
        transaction.commit()

    unpin_this_thread()  # Not all tasks need to do use the master.
Пример #19
0
def auto_archive_old_questions():
    """Archive all questions that were created over 180 days ago"""
    # Set up logging so it doesn't send Ricky email.
    logging.basicConfig(level=logging.ERROR)

    # Get a list of ids of questions we're going to go change. We need
    # a list of ids so that we can feed it to the update, but then
    # also know what we need to update in the index.
    days_180 = datetime.now() - timedelta(days=180)
    q_ids = list(Question.objects.filter(is_archived=False)
                                 .filter(created__lte=days_180)
                                 .values_list('id', flat=True))

    if q_ids:
        log.info('Updating %d questions', len(q_ids))

        sql = """
            UPDATE questions_question
            SET is_archived = 1
            WHERE id IN (%s)
            """ % ','.join(map(str, q_ids))

        cursor = connection.cursor()
        cursor.execute(sql)
        if not transaction.get_connection().in_atomic_block:
            transaction.commit()

        if settings.ES_LIVE_INDEXING:
            try:
                # So... the first time this runs, it'll handle 160K
                # questions or so which stresses everything. Thus we
                # do it in chunks because otherwise this won't work.
                #
                # After we've done this for the first time, we can nix
                # the chunking code.

                from kitsune.search.utils import chunked
                for chunk in chunked(q_ids, 100):

                    # Fetch all the documents we need to update.
                    es_docs = get_documents(QuestionMappingType, chunk)

                    log.info('Updating %d index documents', len(es_docs))

                    documents = []

                    # For each document, update the data and stick it
                    # back in the index.
                    for doc in es_docs:
                        doc[u'question_is_archived'] = True
                        doc[u'indexed_on'] = int(time.time())
                        documents.append(doc)

                    QuestionMappingType.bulk_index(documents)

            except ES_EXCEPTIONS:
                # Something happened with ES, so let's push index
                # updating into an index_task which retries when it
                # fails because of ES issues.
                index_task.delay(QuestionMappingType, q_ids)
Пример #20
0
    def save(self, **kwargs):
        obj = super().save(**kwargs)
        fields_changed_dict = self.tracker.changed()

        # not sending post_commit signal if fields are not changed
        date_updated_only = (len(fields_changed_dict)
                             == 1) and ('date_updated' in fields_changed_dict)
        no_changes = not fields_changed_dict or date_updated_only
        if no_changes:
            return obj

        connection = transaction.get_connection()
        if connection.in_atomic_block:
            if model_tracker_cache.get(self.cache_key):
                # Maintain the oldest values, and adds any new ones.
                fields_changed_dict.update(model_tracker_cache[self.cache_key])
                model_tracker_cache[self.cache_key] = fields_changed_dict
            else:
                model_tracker_cache[self.cache_key] = fields_changed_dict

            transaction.on_commit(self.post_atomic_commit_handler)

        else:
            created = 'id' in fields_changed_dict
            self.post_commit_hook(fields_changed_dict.keys(), created,
                                  fields_changed_dict)

        return obj
Пример #21
0
 def inner(self, exc_type, exc_value, traceback):
     needs_rollback = get_connection(self.using).needs_rollback
     try:
         original(self, exc_type, exc_value, traceback)
     finally:
         cachalot_caches.exit_atomic(
             self.using, exc_type is None and not needs_rollback)
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).

        if django.VERSION < (1, 6):

            if transaction.is_managed() and not current_app.conf.CELERY_ALWAYS_EAGER:
                if not transaction.is_dirty():
                    # Always mark the transaction as dirty
                    # because we push task in queue that must be fired or discarded
                    if 'using' in kwargs:
                        transaction.set_dirty(using=kwargs['using'])
                    else:
                        transaction.set_dirty()
                _get_task_queue().append((self, args, kwargs))
            else:
                apply_async_orig = super(PostTransactionTask, self).apply_async
                return apply_async_orig(*args, **kwargs)

        else:

            connection = get_connection()
            if connection.in_atomic_block and not getattr(current_app.conf, 'CELERY_ALWAYS_EAGER', False):
                _get_task_queue().append((self, args, kwargs))
            else:
                return self.original_apply_async(*args, **kwargs)
Пример #23
0
def _transactions_mark_order_dirty(order_id, using=None):
    if "PYTEST_CURRENT_TEST" in os.environ:
        # We don't care about Order.objects.create() calls in test code so let's try to figure out if this is test code
        # or not.
        for frame in inspect.stack():
            if 'pretix/base/models/orders' in frame.filename:
                continue
            elif 'test_' in frame.filename or 'conftest.py in frame.filename':
                return
            elif 'pretix/' in frame.filename or 'pretix_' in frame.filename:
                # This went through non-test code, let's consider it non-test
                break

    if order_id is None:
        return

    conn = transaction.get_connection(using)
    if not conn.in_atomic_block:
        _fail(
            "You modified an Order, OrderPosition, or OrderFee object in a way that should create "
            "a new Transaction object within the same database transaction, however you are not "
            "doing it inside a database transaction!")

    if getattr(dirty_transactions, 'order_ids', None) is None:
        dirty_transactions.order_ids = set()

    if _check_for_dirty_orders not in [
            func for savepoint_id, func in conn.run_on_commit
    ]:
        transaction.on_commit(_check_for_dirty_orders, using)
        dirty_transactions.order_ids.clear(
        )  # This is necessary to clean up after old threads with rollbacked transactions

    dirty_transactions.order_ids.add(order_id)
Пример #24
0
def import_communes_test_data():
    with get_connection().cursor() as cursor:
        cursor.execute(
            "ALTER TABLE data_france_commune DROP CONSTRAINT IF EXISTS commune_departement_constraint;"
        )

        with (DATA_DIR / "commune.csv").open() as f:
            cursor.copy_from(
                f,
                "data_france_commune",
                columns=[
                    "id",
                    "code",
                    "type",
                    "nom",
                    "type_nom",
                    "population_municipale",
                    "population_cap",
                    "geometry",
                    "search",
                    "commune_parent_id",
                ],
            )
        with (DATA_DIR / "codepostal.csv").open() as f:
            cursor.copy_from(
                f,
                "data_france_codepostal",
                columns=["id", "code"],
            )
        with (DATA_DIR / "codepostal_communes.csv").open() as f:
            cursor.copy_from(
                f,
                "data_france_codepostal_communes",
                columns=["id", "codepostal_id", "commune_id"],
            )
Пример #25
0
 def prepare_transaction(self):
     # django DB connection 은 thread safe 함
     connection = get_connection(self.using)
     self.connection = connection
     outermost_transaction = getattr(connection, "outermost_transaction",
                                     None)
     is_self_outermost_transaction = outermost_transaction is None
     if is_self_outermost_transaction:
         assert getattr(connection, "tran_stack", []) == []
         self.is_outermost = True
         connection.outermost_transaction = self
         connection.tran_stack = [self]
         self.key_gen = self.create_key_generator()
         self.id = self.key_gen()
         checkin_actor, login_user = self.checkin_actor, self.login_user
         if checkin_actor:
             assert checkin_actor.status in (Status.NORMAL, Status.NEW)
             self.set(checkin_actor)
         if login_user:
             assert login_user.status in (Status.NORMAL, Status.NEW)
             self.set(login_user)
     else:
         self.is_outermost = False
         tran_stack = connection.tran_stack
         parent = tran_stack[-1]
         if not self.is_readonly:
             assert not parent.is_readonly, "ReadonlyTransaction 안에는 ReadonlyTransaction 만 들어올 수 있습니다"
         tran_stack.append(self)
         self.key_gen = outermost_transaction.key_gen
         self.id = outermost_transaction.id
         if not self._checkin_actor:
             self._checkin_actor = parent.checkin_actor
         if not self._login_user:
             self._login_user = parent._login_user
         self.connect_storage(outermost_transaction)
Пример #26
0
 def get_tenant(self, model, hostname, request):
     """Override the tenant selection logic."""
     connections["default"].set_schema_to_public()
     tenant_schema = create_schema_name(request.user.account)
     tenant = TENANTS.get_tenant(tenant_schema)
     if tenant is None:
         if request.user.system:
             try:
                 tenant = Tenant.objects.get(schema_name=tenant_schema)
             except Tenant.DoesNotExist:
                 raise Http404()
         else:
             with transaction.atomic():
                 try:
                     tenant = Tenant.objects.get(schema_name=tenant_schema)
                 except Tenant.DoesNotExist:
                     cursor = transaction.get_connection().cursor()
                     cursor.execute("LOCK TABLE public.api_tenant in SHARE ROW EXCLUSIVE MODE")
                     tenant, created = Tenant.objects.get_or_create(schema_name=tenant_schema)
                     if created:
                         seed_permissions(tenant=tenant)
                         seed_roles(tenant=tenant)
                         seed_group(tenant=tenant)
         TENANTS.save_tenant(tenant)
     return tenant
Пример #27
0
    def test_always_rollback(self, _email_user):
        connection = transaction.get_connection()
        with patch.object(connection, 'rollback', wraps=connection.rollback) as mock_rollback:
            with self.assertRaises(TestException):
                confirm_email_change(self.request, self.key)

            mock_rollback.assert_called_with()
Пример #28
0
def check_select_for_update(request=None):
    if not settings.SELECT_FOR_UPDATE_ENABLED:
        return False
    atomic_transaction = transaction.get_connection().in_atomic_block
    if request:
        return request.method not in SAFE_METHODS and atomic_transaction
    return atomic_transaction
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).

        celery_eager = _get_celery_settings('CELERY_ALWAYS_EAGER')

        # New setting to run eager task post transaction
        # defaults to `not CELERY_ALWAYS_EAGER`
        eager_transaction = _get_celery_settings('CELERY_EAGER_TRANSACTION',
                                                 not celery_eager)

        if django.VERSION < (1, 6):

            if transaction.is_managed() and eager_transaction:
                if not transaction.is_dirty():
                    # Always mark the transaction as dirty
                    # because we push task in queue that must be fired or discarded
                    if 'using' in kwargs:
                        transaction.set_dirty(using=kwargs['using'])
                    else:
                        transaction.set_dirty()
                _get_task_queue().append((self, args, kwargs))
            else:
                apply_async_orig = super(PostTransactionTask, self).apply_async
                return apply_async_orig(*args, **kwargs)

        else:

            connection = get_connection()
            if connection.in_atomic_block and eager_transaction:
                _get_task_queue().append((self, args, kwargs))
            else:
                return self.original_apply_async(*args, **kwargs)
Пример #30
0
    def handle(self, **options):
        # Set up logging so it doesn't send Ricky email.
        logging.basicConfig(level=logging.ERROR)

        # Get a list of ids of questions we're going to go change. We need
        # a list of ids so that we can feed it to the update, but then
        # also know what we need to update in the index.
        days_180 = datetime.now() - timedelta(days=180)
        q_ids = list(
            Question.objects.filter(is_archived=False).filter(
                created__lte=days_180).values_list("id", flat=True))

        if q_ids:
            log.info("Updating %d questions", len(q_ids))

            sql = """
                UPDATE questions_question
                SET is_archived = 1
                WHERE id IN (%s)
                """ % ",".join(map(str, q_ids))

            cursor = connection.cursor()
            cursor.execute(sql)
            if not transaction.get_connection().in_atomic_block:
                transaction.commit()

            if settings.ES_LIVE_INDEXING:
                # elastic v7 code:
                answer_ids = list(
                    Answer.objects.filter(question_id__in=q_ids).values_list(
                        "id", flat=True))
                index_objects_bulk.delay("QuestionDocument", q_ids)
                index_objects_bulk.delay("AnswerDocument", answer_ids)
Пример #31
0
    def __enter__(self):

        if not self.ENABLED:
            return

        connection = transaction.get_connection(self.using)

        if connection.in_atomic_block:
            raise transaction.TransactionManagementError('Cannot be inside an atomic block.')

        if getattr(connection, 'commit_on_success_block_level', 0) == 0:
            connection.commit_on_success_block_level = 1

            # This will set the transaction isolation level to READ COMMITTED for the next transaction.
            if self.read_committed is True:
                if connection.vendor == 'mysql':
                    cursor = connection.cursor()
                    cursor.execute("SET TRANSACTION ISOLATION LEVEL READ COMMITTED")

            # We aren't in a transaction yet; create one.
            # The usual way to start a transaction is to turn autocommit off.
            # However, some database adapters (namely sqlite3) don't handle
            # transactions and savepoints properly when autocommit is off.
            # In such cases, start an explicit transaction instead, which has
            # the side-effect of disabling autocommit.
            if connection.features.autocommits_when_autocommit_is_off:
                connection._start_transaction_under_autocommit()  # pylint: disable=protected-access
                connection.autocommit = False
            else:
                connection.set_autocommit(False)
        else:
            if self.read_committed is True:
                raise transaction.TransactionManagementError('Cannot change isolation level when nested.')

            connection.commit_on_success_block_level += 1
Пример #32
0
 def transaction_committed(self):
     conn = transaction.get_connection()
     if conn.in_atomic_block:
         # committed nested transaction - ensure hook is attached
         self._transaction_savepts = conn.savepoint_ids
         conn.on_commit(self.transaction_committed)
     else:
         for using, instances in self._transaction_removed.items():
             if instances:
                 LOGGER.debug(
                     "Committing %d deferred Solr delete(s) after transaction",
                     len(instances))
                 if self._backend_queue:
                     self._backend_queue.delete(self.__class__, using,
                                                list(instances.values()))
                 else:
                     backend = self.get_backend(using)
                     if backend is not None:
                         for instance in instances.values():
                             backend.remove(instance)
         for using, instances in self._transaction_added.items():
             if instances:
                 LOGGER.debug(
                     "Committing %d deferred Solr update(s) after transaction",
                     len(instances))
                 if self._backend_queue:
                     self._backend_queue.add(self.__class__, using,
                                             list(instances.values()))
                 else:
                     backend = self.get_backend(using)
                     if backend is not None:
                         backend.update(self, instances.values())
         self.reset()
Пример #33
0
    def __enter__(self):
        connection = get_connection(self.using)
        if not connection.in_atomic_block:
            connection.begin_immediate = True
            self.set_begin_immediate = True

        super().__enter__()
Пример #34
0
 def save(self, *args, **kwargs):
     # Wrap around a transaction only if we're not already in a transaction.
     connection = transaction.get_connection()
     if not connection.in_atomic_block:
         with transaction.atomic():
             return super().save(*args, **kwargs)
     return super().save(*args, **kwargs)
Пример #35
0
    def test_reprepare_query_in_transaction(self):
        """
        Given an ORM query is prepared
        And   a database transaction is started
        And   the query has been deallocated in the database
        When  the query is executed
        Then  it should re-prepare without errors
        And   it should execute as expected
        """
        def all_species():
            return Species.prepare.all()

        PreparedStatementController().register_qs("all_species", all_species)
        PreparedStatementController().prepare_qs_stmt("all_species",
                                                      force=True)

        # deallocate the query to simulate changing database session
        PreparedStatementController(
        ).prepared_statements["all_species"].deallocate()

        with transaction.atomic():
            qs = execute_stmt("all_species")
            cxn = transaction.get_connection()
            self.assertTrue(cxn.in_atomic_block)

        self.assertEqual(len(qs), 3)
def _send_tasks(**kwargs):
    """Sends all delayed Celery tasks.

    Called after a transaction is committed or we leave a transaction
    management block in which no changes were made (effectively a commit).
    """

    # Detect test mode through CELERY_ALWAYS_EAGER settings
    # We assume all celery transactions tests on 1.8+ are running with TestCase, otherwise we'd get atomic exceptions
    celery_eager = _get_celery_settings('CELERY_ALWAYS_EAGER')

    # If we detect higher up nested atomic block, continue
    connection = get_connection()
    if django.VERSION >= (1, 8):
        min_number_transactions = 1 if celery_eager else 0
        if (not celery_eager and connection.in_atomic_block) or len(
                connection.savepoint_ids) > min_number_transactions:
            return
    elif connection.in_atomic_block:
        return

    queue = _get_task_queue()
    while queue:
        tsk, args, kwargs = queue.pop(0)
        tsk.original_apply_async(*args, **kwargs)
Пример #37
0
    def __enter__(self):

        connection = transaction.get_connection(self.using)

        # TestCase setup nests tests in two atomics - one for the test class and one for the individual test.
        # The outermost atomic starts a transaction - so does not have a savepoint.
        # The inner atomic starts a savepoint around the test.
        # So, for tests only, there should be exactly one savepoint_id and two atomic_for_testcase_calls.
        # atomic_for_testcase_calls below is added in a monkey-patch for tests only.
        if self.ALLOW_NESTED and (self.atomic_for_testcase_calls -
                                  len(connection.savepoint_ids)) < 1:
            raise transaction.TransactionManagementError(
                'Cannot be inside an atomic block.')

        # Otherwise, this shouldn't be nested in any atomic block.
        if not self.ALLOW_NESTED and connection.in_atomic_block:
            raise transaction.TransactionManagementError(
                'Cannot be inside an atomic block.')

        # This will set the transaction isolation level to READ COMMITTED for the next transaction.
        if self.read_committed is True:
            if connection.vendor == 'mysql':
                cursor = connection.cursor()
                cursor.execute(
                    "SET TRANSACTION ISOLATION LEVEL READ COMMITTED")

        super(OuterAtomic, self).__enter__()
Пример #38
0
    def backup_instance(self, storage, instance, query_args=None):
        """ query_args is a dict in a form of
        {
            'model_backup_name': [id_list],
            'model_backup_name3': [], #no data
        }
        If there is no key in query_args, queryset is not filtered
        """
        with instance_context(instance):
            db = get_instance_db(instance)

            # get migrations
            targets = self.get_instance_migrations(instance)
            storage.start_model(self.MIGRATIONS_STORAGE)
            for target in targets:
                storage.append(target)
            storage.end_model()

            with transaction.atomic(using=db):
                cursor = transaction.get_connection(db).cursor()
                cursor.execute(
                    'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;')
                for model in self.default_sorted:
                    options = self.get_options_for_model(model)
                    storage.start_model(options.get_name())
                    try:
                        options.backup(storage, query_args)
                    except Exception:
                        logger.warning('Exception for model %s',
                                       model,
                                       exc_info=1)
                        raise
                    storage.end_model()
Пример #39
0
def get_df(lname):
    conn = get_connection('datastore')

    gc_df = pd.read_sql('select * from geometry_columns',
                        conn,
                        index_col='f_table_name')
    srid = gc_df.loc[lname].srid
    gtype = gc_df.loc[lname].type
    gname = gc_df.loc[lname].f_geometry_column

    if gtype in ('POLYGON', 'MULTIPOLYGON'):
        sql = "select *, st_buffer({}, 0) as the_geom_clean from {} where {} is not null".format(
            gname, lname, gname)
    else:
        sql = "select *, {} as the_geom_clean from {} where {} is not null".format(
            gname, lname, gname)
    # print sql
    gdf = gpd.GeoDataFrame.from_postgis(sql,
                                        conn,
                                        geom_col='the_geom_clean',
                                        crs={
                                            'init': 'epsg:{}'.format(srid),
                                            'no_defs': True
                                        })
    # print lname, srid
    gdf.to_crs(epsg="3035", inplace=True)
    return gdf
Пример #40
0
    def __enter__(self):

        connection = transaction.get_connection(self.using)

        cache = get_cache(OUTER_ATOMIC_CACHE_NAME)

        # By default it is enabled.
        enable = True
        # If name is set it is only enabled if requested by calling enable_named_outer_atomic().
        if self.name:
            enable = cache.get(self.name, False)

        if enable:
            # TestCase setup nests tests in two atomics - one for the test class and one for the individual test.
            # The outermost atomic starts a transaction - so does not have a savepoint.
            # The inner atomic starts a savepoint around the test.
            # So, for tests only, there should be exactly one savepoint_id and two atomic_for_testcase_calls.
            # atomic_for_testcase_calls below is added in a monkey-patch for tests only.
            if self.ALLOW_NESTED and (self.atomic_for_testcase_calls - len(connection.savepoint_ids)) < 1:
                raise transaction.TransactionManagementError('Cannot be inside an atomic block.')

            # Otherwise, this shouldn't be nested in any atomic block.
            if not self.ALLOW_NESTED and connection.in_atomic_block:
                raise transaction.TransactionManagementError('Cannot be inside an atomic block.')

            # This will set the transaction isolation level to READ COMMITTED for the next transaction.
            if self.read_committed is True:
                if connection.vendor == 'mysql':
                    cursor = connection.cursor()
                    cursor.execute("SET TRANSACTION ISOLATION LEVEL READ COMMITTED")

        super(OuterAtomic, self).__enter__()
 def apply_async(cls, *args, **kwargs):
     # Delay the task unless the client requested otherwise or transactions
     # aren't being managed (i.e. the signal handlers won't send the task).
     connection = get_connection()
     if connection.in_atomic_block and not getattr(current_app.conf, 'CELERY_ALWAYS_EAGER', False):
         _get_task_queue().append((cls, args, kwargs))
     else:
         return cls.original_apply_async(*args, **kwargs)
Пример #42
0
 def __exit__(self, exc_type, exc_value, traceback):
     self._no_monkey.__exit__(self, exc_type, exc_value, traceback)
     connection = get_connection(self.using)
     if not connection.closed_in_transaction and exc_type is None and \
             not connection.needs_rollback:
         transaction_state.commit()
     else:
         transaction_state.rollback()
Пример #43
0
 def __exit__(self, exc_type, exc_value, traceback):
     connection = get_connection(self.using)
     transaction_signals_context = connection.transaction_signals_context_list.pop()
     if not exc_value:
         if len(connection.transaction_signals_context_list) == 0:
             transaction_signals_context.handle_all()
         else:
             connection.transaction_signals_context_list[-1].join(transaction_signals_context)
Пример #44
0
 def commit_on_success(using=None):  # noqa
     connection = transaction.get_connection(using)
     if connection.features.autocommits_when_autocommit_is_off:
         # ignore stupid warnings and errors
         yield
     else:
         with transaction.atomic(using):
             yield
Пример #45
0
 def _on_exit_without_update(self, using):
     '''
     Clear signals on transaction exit, even if neither commit nor rollback
     happened.
     '''
     if self._has_signals():
         connection = get_connection(using)
         self._remove_signals(connection.savepoint_ids)
Пример #46
0
def update_question_vote_chunk(data):
    """Update num_votes_past_week for a number of questions."""

    # First we recalculate num_votes_past_week in the db.
    log.info('Calculating past week votes for %s questions.' % len(data))

    ids = ','.join(map(str, data))
    sql = """
        UPDATE questions_question q
        SET num_votes_past_week = (
            SELECT COUNT(created)
            FROM questions_questionvote qv
            WHERE qv.question_id = q.id
            AND qv.created >= DATE(SUBDATE(NOW(), 7))
        )
        WHERE q.id IN (%s);
        """ % ids
    cursor = connection.cursor()
    cursor.execute(sql)
    if not transaction.get_connection().in_atomic_block:
        transaction.commit()

    # Next we update our index with the changes we made directly in
    # the db.
    if data and settings.ES_LIVE_INDEXING:
        # Get the data we just updated from the database.
        sql = """
            SELECT id, num_votes_past_week
            FROM questions_question
            WHERE id in (%s);
            """ % ids
        cursor = connection.cursor()
        cursor.execute(sql)

        # Since this returns (id, num_votes_past_week) tuples, we can
        # convert that directly to a dict.
        id_to_num = dict(cursor.fetchall())

        try:
            # Fetch all the documents we need to update.
            from kitsune.questions.models import QuestionMappingType
            from kitsune.search import es_utils
            es_docs = es_utils.get_documents(QuestionMappingType, data)

            # For each document, update the data and stick it back in the
            # index.
            for doc in es_docs:
                # Note: Need to keep this in sync with
                # Question.extract_document.
                num = id_to_num[int(doc[u'id'])]
                doc[u'question_num_votes_past_week'] = num

                QuestionMappingType.index(doc, id_=doc['id'])
        except ES_EXCEPTIONS:
            # Something happened with ES, so let's push index updating
            # into an index_task which retries when it fails because
            # of ES issues.
            index_task.delay(QuestionMappingType, id_to_num.keys())
def __patched__exit__(self, exc_type, exc_value, trackback):
    connection = get_connection(self.using)

    if connection.savepoint_ids:
        sid = connection.savepoint_ids.pop()
    else:
        # Prematurely unset this flag to allow using commit or rollback.
        connection.in_atomic_block = False

    try:
        if exc_type is None and not connection.needs_rollback:
            if connection.in_atomic_block:
                # Release savepoint if there is one
                if sid is not None:
                    try:
                        connection.savepoint_commit(sid)
                        transaction.signals.post_commit.send(None)
                    except DatabaseError:
                        connection.savepoint_rollback(sid)
                        transaction.signals.post_rollback.send(None)
                        raise
            else:
                # Commit transaction
                try:
                    connection.commit()
                    transaction.signals.post_commit.send(None)
                except DatabaseError:
                    connection.rollback()
                    transaction.signals.post_rollback.send(None)
                    raise
        else:
            # This flag will be set to True again if there isn't a savepoint
            # allowing to perform the rollback at this level.
            connection.needs_rollback = False
            if connection.in_atomic_block:
                # Roll back to savepoint if there is one, mark for rollback
                # otherwise.
                if sid is None:
                    connection.needs_rollback = True
                else:
                    connection.savepoint_rollback(sid)
                    transaction.signals.post_rollback.send(None)
            else:
                # Roll back transaction
                connection.rollback()
                transaction.signals.post_rollback.send(None)

    finally:
        # Outermost block exit when autocommit was enabled.
        if not connection.in_atomic_block:
            if connection.features.autocommits_when_autocommit_is_off:
                connection.autocommit = True
            else:
                connection.set_autocommit(True)
            # Outermost block exit when autocommit was disabled.
        elif not connection.savepoint_ids and not connection.commit_on_exit:
            connection.in_atomic_block = False
Пример #48
0
 def __enter__(self):
     assert self.cxn is None, "lock already acquired"
     self.cxn = transaction.get_connection(self.using)
     if not self.cxn.in_atomic_block:
         self.txn = transaction.atomic(using=self.using)
         self.txn.__enter__()
     self.cur = self.cxn.cursor()
     self.cur.execute("LOCK TABLE %s IN %s MODE" %(
         self.model._meta.db_table, self.mode,
     ))
Пример #49
0
    def test_mark_for_rollback_on_error_in_autocommit(self):
        self.assertTrue(transaction.get_autocommit())

        # Swallow the intentional error raised.
        with self.assertRaisesMessage(Exception, "Oops"):

            # Wrap in `mark_for_rollback_on_error` to check if the transaction is marked broken.
            with transaction.mark_for_rollback_on_error():

                # Ensure that we are still in a good state.
                self.assertFalse(transaction.get_connection().needs_rollback)

                raise Exception("Oops")

            # Ensure that `mark_for_rollback_on_error` did not mark the transaction
            # as broken, since we are in autocommit mode …
            self.assertFalse(transaction.get_connection().needs_rollback)

        # … and further queries work nicely.
        Reporter.objects.create()
Пример #50
0
    def __enter__(self):
        """Context manager for locking table."""
        super().__enter__()

        connection = get_connection(self.using)
        with connection.cursor() as cursor:
            cursor.execute(
                'LOCK TABLE {db_table_name} IN {lock_mode} MODE'.format(
                    db_table_name=self.model._meta.db_table,
                    lock_mode=self.lock_mode
                )
            )
Пример #51
0
def log_output_request(data, related_objects=None, using=None):
    """
    Helper for logging output requests
    :param data: dict of input attributes of OutputLoggedRequest model
    :param related_objects: objects that will be related to OutputLoggedRequest object
    :param using: database alias
    """
    if is_active_logged_requests(using):
        output_logged_requests = get_connection(using).output_logged_requests[-1]
        output_logged_requests.append(OutputLoggedRequestContext(data, related_objects))
    else:
        output_logged_request = OutputLoggedRequest.objects.create(**data)
        if related_objects:
            [output_logged_request.related_objects.create(content_object=obj) for obj in related_objects]
Пример #52
0
    def schedule_changed(self):
        try:
            # If MySQL is running with transaction isolation level
            # REPEATABLE-READ (default), then we won't see changes done by
            # other transactions until the current transaction is
            # committed (Issue #41).
            try:
                transaction.commit()
            except transaction.TransactionManagementError:
                pass  # not in transaction management.

            last, ts = self._last_timestamp, self.Changes.last_change()
        except DATABASE_ERRORS as exc:
            # as per https://github.com/celery/django-celery/commit/f2ad0848e3f55ee9a4c5d3ffeff62f4d54036ed7
            transaction.get_connection().close_if_unusable_or_obsolete()
            error('Database gave error: %r', exc, exc_info=1)
            return False
        try:
            if ts and ts > (last if last else ts):
                return True
        finally:
            self._last_timestamp = ts
        return False
Пример #53
0
 def process_view(self, request, view_func, view_args, view_kwargs):
     # This uses undocumented django APIS:
     # - transaction.get_connection() followed by in_atomic_block property,
     #   which we need to make sure we're not messing with a transaction
     #   that has already started (which happens in tests using the regular
     #   TestCase class)
     # - _non_atomic_requests(), which set the property to prevent the
     #   transaction on the view itself. We can't use non_atomic_requests
     #   (without the '_') as it returns a *new* view, and we can't do that
     #   in a middleware, we need to modify it in place and return None so
     #   that the rest of the middlewares are run.
     is_method_safe = request.method in ('HEAD', 'GET', 'OPTIONS', 'TRACE')
     if is_method_safe and not transaction.get_connection().in_atomic_block:
         transaction._non_atomic_requests(view_func, using='default')
     return None
Пример #54
0
    def __enter__(self):
        super(LockedAtomicTransaction, self).__enter__()

        # Make sure not to lock, when sqlite is used, or you'll run into problems while running tests!!!
        if settings.DATABASES[self.using]["ENGINE"] != "django.db.backends.sqlite3":
            cursor = None
            try:
                cursor = get_connection(self.using).cursor()
                for model in self.models:
                    cursor.execute(
                        "LOCK TABLE {table_name}".format(table_name=model._meta.db_table)
                    )
            finally:
                if cursor and not cursor.closed:
                    cursor.close()
Пример #55
0
def pg_version(using=None):
    """
    Return tuple with PostgreSQL version of a specific connection
    :type using: str
    :param using: Connection name
    :rtype: tuple
    :return: PostgreSQL version
    """
    connection = get_connection(using)
    cursor = connection.cursor()

    cursor.execute('SHOW server_version')
    row = cursor.fetchone()

    return tuple([int(i) for i in row[0].split('.')])
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).

        celery_eager = _get_celery_settings("CELERY_ALWAYS_EAGER")

        # New setting to run eager task post transaction
        # defaults to `not CELERY_ALWAYS_EAGER`
        eager_transaction = _get_celery_settings("CELERY_EAGER_TRANSACTION", not celery_eager)

        connection = get_connection()
        if connection.in_atomic_block and eager_transaction:
            _get_task_queue().append((self, args, kwargs))
        else:
            return self.original_apply_async(*args, **kwargs)
Пример #57
0
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).
        using = kwargs['using'] if 'using' in kwargs else None
        con = transaction.get_connection(using)

        if con.get_autocommit() or con.in_atomic_block:
            if not transaction.is_dirty():
                # Always mark the transaction as dirty
                # because we push task in queue that must be fired or discarded
                transaction.set_dirty(using=using)

            task = lambda: self.original_apply_async(*args, **kwargs)
            connection.on_commit(task)
        else:
            return self.original_apply_async(*args, **kwargs)
Пример #58
0
def defer(f, *args, **kwargs):
    '''
    Wrapper that defers a function's execution until the current transaction
    commits, if a transaction is active.  Otherwise, executes as usual. Note
    that a deferred function will NOT be called if the transaction completes
    without committing (e.g. when transaction.is_dirty() is False upon exiting
    the transaction).

    An implicit assumption is that a deferred function does not return an
    important value, since there is no way to retrieve the return value in
    the normal execution order.

    Before being connected to the 'post_commit' signal of an existing managed
    transaction, the deferred function is wrapped by the @commit_on_success
    decorator to ensure that it behaves properly by committing or rolling back
    any updates it makes to a current transaction.

    >>> from django.db import transaction
    >>> from django_transaction_signals import defer
    >>>
    >>> def log_success(msg):
    >>>     print 'logging success'
    >>>
    >>> @transaction.atomic
    >>> def transactional_update(value):
    >>>     print 'starting transaction'
    >>>     defer(log_success, 'The transaction was successful')
    >>>     print 'finishing transaction'
    >>>
    >>> transactional_update('foo')
    ... starting transaction
    ... finishing transaction
    ... logging success

    '''
    connection = get_connection(kwargs.pop('using', None))
    if not connection.get_autocommit() or connection.in_atomic_block:
        def f_deferred(*a, **kw):
            f(*args, **kwargs)
        if connection.savepoint_ids:
            savepoint_id = connection.savepoint_ids[-1]
        else:
            savepoint_id = None
        dispatch_uid = (savepoint_id, _make_id(f_deferred))
        transaction.signals.post_commit.connect(f_deferred, weak=False, dispatch_uid=dispatch_uid)
    else:
        return f(*args, **kwargs)
Пример #59
0
def on_success(handler, using=None):
    """
    Register a handler or a function to be called after successful code pass.
    If transaction signals are not active the handler/function is called immediately.
    :param handler: handler or function that will be called.
    :param using: name of the database
    """

    connection = get_connection(using)
    if getattr(connection, 'transaction_signals_context_list', False):
        connection.transaction_signals_context_list[-1].register(handler)
    else:
        if settings.DEBUG:
            logger.warning(
                'For on success signal should be activated transaction signals via transaction_signals decorator.'
                'Function is called immediately now.'
            )
        handler()