Exemplo n.º 1
0
 def test_sqlite_locking(self):
     try:
         with db_lock():
             self.assertTrue(SQLiteLock.objects.all().exists())
             raise Exception("An error")
     except Exception:
         pass
     self.assertFalse(SQLiteLock.objects.all().exists())
Exemplo n.º 2
0
def delete_metadata(channel, node_ids, exclude_node_ids, force_delete):
    # Only delete all metadata if we are not doing selective deletion
    delete_all_metadata = not (node_ids or exclude_node_ids)

    if node_ids or exclude_node_ids:
        # If we have been passed node ids do not do a full deletion pass
        set_content_invisible(channel.id, node_ids, exclude_node_ids)
        # If everything has been made invisible, delete all the metadata
        delete_all_metadata = not channel.root.available

    if force_delete:
        # Do this before we delete all the metadata, as otherwise we lose
        # track of which local files were associated with the channel we
        # just deleted.
        _, unused_files, _ = get_import_export_data(
            channel.id,
            node_ids,
            exclude_node_ids,
            # Don't filter by availability as we have set nodes invisible
            # above, but the localfiles we are trying to delete are still
            # available
            None,
            renderable_only=False,
            topic_thumbnails=False,
        )

        with db_lock():
            propagate_forced_localfile_removal(unused_files)
        # Separate these operations as running the SQLAlchemy code in the latter
        # seems to cause the Django ORM interactions in the former to roll back
        # Not quite sure what is causing it, but presumably due to transaction
        # scopes.
        reannotate_all_channels()

    if delete_all_metadata:
        logger.info("Deleting all channel metadata")
        with db_lock():
            channel.delete_content_tree_and_files()

    # Clear any previously set channel availability stats for this channel
    clear_channel_stats(channel.id)

    return delete_all_metadata
Exemplo n.º 3
0
 def test_postgres_locking(self):
     try:
         with db_lock():
             raise Exception("An error")
     except Exception:
         pass
     query = "SELECT pg_try_advisory_lock({key}) AS lock;".format(key=1)
     with connection.cursor() as c:
         c.execute(query)
         results = c.fetchone()
         self.assertTrue(results[0])
Exemplo n.º 4
0
def set_channel_metadata_fields(channel_id, public=None):
    with db_lock():
        channel = ChannelMetadata.objects.get(id=channel_id)
        calculate_published_size(channel)
        calculate_total_resource_count(channel)
        calculate_included_languages(channel)
        calculate_next_order(channel)

        if public is not None:
            channel.public = public
            channel.save()
Exemplo n.º 5
0
def create_and_update_notifications(data, source):
    messages = [obj for obj in data.get("messages", []) if obj.get("msg_id")]
    excluded_ids = [obj.get("msg_id") for obj in messages]
    with db_lock():
        PingbackNotification.objects.filter(source=source).exclude(
            id__in=excluded_ids).update(active=False)

    for msg in messages:
        new_msg = {
            "id": msg["msg_id"],
            "version_range": msg.get("version_range"),
            "link_url": msg.get("link_url"),
            "i18n": msg.get("i18n"),
            "timestamp": msg.get("timestamp"),
            "source": source,
            "active": True,
        }
        with db_lock():
            PingbackNotification.objects.update_or_create(id=new_msg["id"],
                                                          defaults=new_msg)
Exemplo n.º 6
0
    def _lock(self):
        cancellable = False
        # job can't be cancelled while locked
        if self.job:
            cancellable = self.job.cancellable
            self.job.save_as_cancellable(cancellable=False)

        with db_lock():
            yield

        if self.job:
            self.job.save_as_cancellable(cancellable=cancellable)
Exemplo n.º 7
0
    def handle_async(self, *args, **options):
        channel_id = options["channel_id"]
        node_ids = options["node_ids"]
        exclude_node_ids = options["exclude_node_ids"]
        force_delete = options["force_delete"]

        try:
            channel = ChannelMetadata.objects.get(pk=channel_id)
        except ChannelMetadata.DoesNotExist:
            raise CommandError(
                "Channel matching id {id} does not exist".format(
                    id=channel_id))

        delete_all_metadata = delete_metadata(channel, node_ids,
                                              exclude_node_ids, force_delete)

        unused_files = LocalFile.objects.get_unused_files()

        # Get orphan files that are being deleted
        total_file_deletion_operations = unused_files.count()
        job = get_current_job()
        if job:
            total_file_deletion_size = unused_files.aggregate(
                Sum("file_size")).get("file_size__sum", 0)
            job.extra_metadata["file_size"] = total_file_deletion_size
            job.extra_metadata[
                "total_resources"] = total_file_deletion_operations
            job.save_meta()

        progress_extra_data = {"channel_id": channel_id}

        additional_progress = sum((1, bool(delete_all_metadata)))

        with self.start_progress(total=total_file_deletion_operations +
                                 additional_progress) as progress_update:

            for file in LocalFile.objects.delete_unused_files():
                progress_update(1, progress_extra_data)

            with db_lock():
                LocalFile.objects.delete_orphan_file_objects()

            progress_update(1, progress_extra_data)

            if delete_all_metadata:
                try:
                    os.remove(get_content_database_file_path(channel_id))
                except OSError:
                    pass

                progress_update(1, progress_extra_data)
Exemplo n.º 8
0
def perform_vacuum(database=db.DEFAULT_DB_ALIAS, full=False):
    connection = db.connections[database]
    if connection.vendor == "sqlite":
        try:
            with db_lock():
                db.close_old_connections()
                db.connections.close_all()
                cursor = connection.cursor()
                cursor.execute("vacuum;")
                connection.close()
        except Exception as e:
            logger.error(e)
            new_msg = (
                "Vacuum of database {db_name} couldn't be executed. Possible reasons:\n"
                "  * There is an open transaction in the db.\n"
                "  * There are one or more active SQL statements.\n"
                "The full error: {error_msg}").format(
                    db_name=db.connections[database].settings_dict["NAME"],
                    error_msg=e)
            logger.error(new_msg)
        else:
            logger.info("Sqlite database Vacuum finished.")
    elif connection.vendor == "postgresql":
        if full:
            morango_models = ("morango_recordmaxcounterbuffer",
                              "morango_buffer")
        else:
            morango_models = [
                m for m in apps.get_models(include_auto_created=True)
                if "morango.models" in str(m)
            ]
        cursor = connection.cursor()
        for m in morango_models:
            if full:
                cursor.execute("vacuum full analyze {};".format(m))
            else:
                cursor.execute("vacuum analyze {};".format(m._meta.db_table))
        connection.close()
Exemplo n.º 9
0
 def test_atomic_transaction(self):
     with db_lock():
         self.assertTrue(connection.in_atomic_block)