Example #1
0
 def KfpMigrate(database: SqliteDatabase):
     tables = database.get_tables()
     migrator = SqliteMigrator(database)
     if "rpgcharacter" in tables:
         columns = database.get_columns("rpgcharacter")
         if not KfpMigrator.hasColumn("retired", columns):
             retiredField = BooleanField(default=False)
             migrate(
                 migrator.add_column("rpgcharacter", "retired", retiredField)
             )
         if not KfpMigrator.hasColumn("last_attack", columns):
             lastAttackField = DateTimeField(default=datetime.now() + timedelta(days=-1))
             migrate(
                 migrator.add_column("rpgcharacter", "last_attack", lastAttackField)
             )
     if "member" in tables:
         columns = database.get_columns("member")
         if not KfpMigrator.hasColumn("token", columns):
             tokenField = BigIntegerField(default=100)
             migrate(
                 migrator.add_column("member", 'token', tokenField)
             )
     if "channel" in tables:
         columns = database.get_columns("channel")
         if not KfpMigrator.hasColumn("channel_id", columns):
             guildIdField = IntegerField(default=-1)
             migrate(
                 migrator.add_column('channel', 'channel_guild_id', guildIdField),
                 migrator.rename_column('channel', 'channel_discord_id', 'channel_id'),
             )
     if "item" in tables:
         columns = database.get_columns("item")
         if KfpMigrator.hasColumn("hidden", columns):                
             migrate(
                 migrator.drop_column('item', 'hidden'),
             )
         if KfpMigrator.hasColumn("buff_type", columns):                
             migrate(
                 migrator.drop_column('item', 'buff_type'),
             )
         if KfpMigrator.hasColumn("buff_value", columns):                
             migrate(
                 migrator.drop_column('item', 'buff_value'),
             )
         if not KfpMigrator.hasColumn("type", columns):
             typeField = CharField(default=ItemType.NONE)
             migrate(
                 migrator.add_column('item', 'type', typeField),
             )
         if not KfpMigrator.hasColumn("buff", columns):
             buff = BuffField(default=Buff(BuffType.NONE, 0, -1))
             migrate(
                 migrator.add_column('item', 'buff', buff),
             )
         if not KfpMigrator.hasColumn("description", columns):
             description = CharField(default="")
             migrate(
                 migrator.add_column('item', 'description', description),
             )
     return True
Example #2
0
def apply_v3():
    db = SqliteDatabase(r"data\results.db")
    migrator = SqliteMigrator(db)

    migrate(
        migrator.rename_column("sounddistance", "char_1", "char1"),
        migrator.rename_column("sounddistance", "char_2", "char2")
    )
Example #3
0
def init_db(con):
    db.initialize(con)
    db.connect()
    db.create_tables([
        RepoModel, RepoPassword, BackupProfileModel, SourceDirModel,
        ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion
    ])

    if BackupProfileModel.select().count() == 0:
        default_profile = BackupProfileModel(name='Default Profile')
        default_profile.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - timedelta(days=180)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)

    # Migrations
    # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    current_schema, created = SchemaVersion.get_or_create(
        id=1, defaults={'version': SCHEMA_VERSION})
    current_schema.save()
    if created or current_schema.version == SCHEMA_VERSION:
        return
    else:
        migrator = SqliteMigrator(con)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration',
                                pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size',
                                pw.IntegerField(null=True)))
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema,
            5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name,
                                   'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url',
                                pw.CharField(null=True)))

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceDirModel._meta.table_name,
                                   'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile',
                                pw.CharField(null=True)))
Example #4
0
def init_db(con=None):
    if con is not None:
        os.umask(0o0077)
        db.initialize(con)
        db.connect()
    db.create_tables([
        RepoModel, RepoPassword, BackupProfileModel, SourceFileModel,
        SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel,
        SchemaVersion
    ])

    if BackupProfileModel.select().count() == 0:
        default_profile = BackupProfileModel(name='Default')
        default_profile.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - timedelta(days=180)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)

    # Migrations
    # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    current_schema, created = SchemaVersion.get_or_create(
        id=1, defaults={'version': SCHEMA_VERSION})
    current_schema.save()
    if created or current_schema.version == SCHEMA_VERSION:
        pass
    else:
        migrator = SqliteMigrator(con)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration',
                                pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size',
                                pw.IntegerField(null=True)))
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema,
            5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name,
                                   'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url',
                                pw.CharField(null=True)))

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceFileModel._meta.table_name,
                                   'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile',
                                pw.CharField(null=True)))

    if current_schema.version < 8:
        _apply_schema_update(
            current_schema, 8,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'prune_keep_within', pw.CharField(null=True)))

    if current_schema.version < 9:
        _apply_schema_update(
            current_schema,
            9,
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'new_archive_name',
                pw.CharField(
                    default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}"
                )),
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'prune_prefix',
                pw.CharField(default="{hostname}-{profile_slug}-")),
        )

    if current_schema.version < 10:
        _apply_schema_update(
            current_schema,
            10,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'pre_backup_cmd', pw.CharField(default='')),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'post_backup_cmd', pw.CharField(default='')),
        )

    if current_schema.version < 11:
        _apply_schema_update(current_schema, 11)
        for profile in BackupProfileModel:
            if profile.compression == 'zstd':
                profile.compression = 'zstd,3'
            if profile.compression == 'lzma,6':
                profile.compression = 'auto,lzma,6'
            profile.save()

    if current_schema.version < 12:
        _apply_schema_update(
            current_schema, 12,
            migrator.add_column(RepoModel._meta.table_name,
                                'extra_borg_arguments',
                                pw.CharField(default='')))

    if current_schema.version < 13:
        # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column.
        tables = db.get_tables()
        if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables:
            cursor = db.execute_sql('select * from snapshotmodel;')
            fields = [
                ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name,
                ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration,
                ArchiveModel.size
            ]
            data = [row for row in cursor.fetchall()]
            with db.atomic():
                size = 1000
                for i in range(0, len(data), size):
                    ArchiveModel.insert_many(data[i:i + size],
                                             fields=fields).execute()

        _apply_schema_update(current_schema, 13)

    if current_schema.version < 14:
        _apply_schema_update(
            current_schema, 14,
            migrator.add_column(SettingsModel._meta.table_name, 'str_value',
                                pw.CharField(default='')))

    if current_schema.version < 15:
        _apply_schema_update(
            current_schema, 15,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'dont_run_on_metered_networks',
                                pw.BooleanField(default=True)))

    if current_schema.version < 16:
        _apply_schema_update(
            current_schema, 16,
            migrator.add_column(SourceFileModel._meta.table_name, 'dir_size',
                                pw.BigIntegerField(default=-1)),
            migrator.add_column(SourceFileModel._meta.table_name,
                                'dir_files_count',
                                pw.BigIntegerField(default=-1)),
            migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir',
                                pw.BooleanField(default=False)))

    # Create missing settings and update labels. Leave setting values untouched.
    for setting in get_misc_settings():
        s, created = SettingsModel.get_or_create(key=setting['key'],
                                                 defaults=setting)
        s.label = setting['label']
        s.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - rd(months=3)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)
Example #5
0
def init_db(con):
    db.initialize(con)
    db.connect()
    db.create_tables([RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel,
                      ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion])

    if BackupProfileModel.select().count() == 0:
        default_profile = BackupProfileModel(name='Default')
        default_profile.save()

    # Create missing settings and update labels. Leave setting values untouched.
    for setting in get_misc_settings():
        s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting)
        if created and setting['key'] == "use_dark_theme":
            # Check if macOS with enabled dark mode
            s.value = bool(uses_dark_mode())
        if created and setting['key'] == "use_light_icon":
            # Check if macOS with enabled dark mode or Linux with GNOME DE
            s.value = bool(uses_dark_mode()) or os.environ.get('XDG_CURRENT_DESKTOP', '') == 'GNOME'
        s.label = setting['label']
        s.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - timedelta(days=180)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)

    # Migrations
    # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    current_schema, created = SchemaVersion.get_or_create(id=1, defaults={'version': SCHEMA_VERSION})
    current_schema.save()
    if created or current_schema.version == SCHEMA_VERSION:
        pass
    else:
        migrator = SqliteMigrator(con)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))
        )
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema, 5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))
        )

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))
        )

    if current_schema.version < 8:
        _apply_schema_update(
            current_schema, 8,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'prune_keep_within', pw.CharField(null=True)))

    if current_schema.version < 9:
        _apply_schema_update(
            current_schema, 9,
            migrator.add_column(BackupProfileModel._meta.table_name, 'new_archive_name',
                                pw.CharField(default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}")),
            migrator.add_column(BackupProfileModel._meta.table_name, 'prune_prefix',
                                pw.CharField(default="{hostname}-{profile_slug}-")),
        )

    if current_schema.version < 10:
        _apply_schema_update(
            current_schema, 10,
            migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd',
                                pw.CharField(default='')),
            migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd',
                                pw.CharField(default='')),
        )

    if current_schema.version < 11:
        _apply_schema_update(current_schema, 11)
        for profile in BackupProfileModel:
            if profile.compression == 'zstd':
                profile.compression = 'zstd,3'
            if profile.compression == 'lzma,6':
                profile.compression = 'auto,lzma,6'
            profile.save()

    if current_schema.version < 12:
        _apply_schema_update(
            current_schema, 12,
            migrator.add_column(RepoModel._meta.table_name,
                                'extra_borg_arguments', pw.CharField(default='')))
Example #6
0
def init_db(con):
    db.initialize(con)
    db.connect()
    db.create_tables([
        RepoModel, RepoPassword, BackupProfileModel, SourceFileModel,
        SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel,
        SchemaVersion
    ])

    if BackupProfileModel.select().count() == 0:
        default_profile = BackupProfileModel(name='Default')
        default_profile.save()

    # Default settings for all platforms.
    settings = [{
        'key':
        'use_light_icon',
        'value':
        False,
        'type':
        'checkbox',
        'label':
        'Use light system tray icon (applies after restart, useful for dark themes).'
    }, {
        'key': 'enable_notifications',
        'value': True,
        'type': 'checkbox',
        'label': 'Display notifications when background tasks fail.'
    }, {
        'key': 'enable_notifications_success',
        'value': False,
        'type': 'checkbox',
        'label': 'Also notify about successful background tasks.'
    }]
    if sys.platform == 'darwin':
        settings += [
            {
                'key':
                'autostart',
                'value':
                False,
                'type':
                'checkbox',
                'label':
                'Add Vorta to Login Items in Preferences > Users and Groups > Login Items.'
            },
            {
                'key': 'check_for_updates',
                'value': True,
                'type': 'checkbox',
                'label': 'Check for updates on startup.'
            },
            {
                'key': 'updates_include_beta',
                'value': False,
                'type': 'checkbox',
                'label':
                'Include pre-release versions when checking for updates.'
            },
        ]

    # Create missing settings and update labels. Leave setting values untouched.
    for setting in settings:
        s, created = SettingsModel.get_or_create(key=setting['key'],
                                                 defaults=setting)
        s.label = setting['label']
        s.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - timedelta(days=180)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)

    # Migrations
    # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    current_schema, created = SchemaVersion.get_or_create(
        id=1, defaults={'version': SCHEMA_VERSION})
    current_schema.save()
    if created or current_schema.version == SCHEMA_VERSION:
        pass
    else:
        migrator = SqliteMigrator(con)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration',
                                pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size',
                                pw.IntegerField(null=True)))
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema,
            5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name,
                                   'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url',
                                pw.CharField(null=True)))

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceFileModel._meta.table_name,
                                   'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile',
                                pw.CharField(null=True)))

    if current_schema.version < 8:
        _apply_schema_update(
            current_schema, 8,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'prune_keep_within', pw.CharField(null=True)))

    if current_schema.version < 9:
        _apply_schema_update(
            current_schema,
            9,
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'new_archive_name',
                pw.CharField(
                    default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}"
                )),
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'prune_prefix',
                pw.CharField(default="{hostname}-{profile_slug}-")),
        )

    if current_schema.version < 10:
        _apply_schema_update(
            current_schema,
            10,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'pre_backup_cmd', pw.CharField(default='')),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'post_backup_cmd', pw.CharField(default='')),
        )
Example #7
0
from playhouse.migrate import SqliteMigrator, migrate
from zhlib import zh

if __name__ == '__main__':
    migrator = SqliteMigrator(zh.database)
    migrate(migrator.rename_column('sentence', 'chinese', 'sentence'))
def migrate_up():
    migrator = SqliteMigrator(db.database)
    migrate(migrator.rename_column('article', 'author_id', 'author_name'))
    migrate(migrator.rename_column('article', 'tag_id', 'tag_name'))
Example #9
0
        {
            'name': 'zadowolenie'
        },
        {
            'name': 'combat'
        },
        {
            'name': 'amso'
        },
    ]).on_conflict_ignore().execute()
    migrator = SqliteMigrator(db)
    # TODO: improve migrations!
    migrations = [
        # Add date when promotion ends and add number of items in promotion
        migrator.add_column('promotion', 'end_date',
                            peewee.DateTimeField(default=None, null=True)),
        migrator.add_index('promotion', 'end_date'),
        migrator.add_column('promotion', 'number_of_items',
                            peewee.IntegerField(default=None, null=True)),
        # Redesign items counts
        migrator.rename_column('promotion', 'number_of_items',
                               'items_available'),
        migrator.add_column('promotion', 'items_sold',
                            peewee.IntegerField(default=None, null=True)),
    ]
    for migration in migrations:
        try:
            migrate(migration)
        except peewee.OperationalError:
            pass
Example #10
0
def run_migrations(current_schema, db_connection):
    """
    Apply new schema versions to database.

    See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    """
    migrator = SqliteMigrator(db_connection)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration',
                                pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size',
                                pw.IntegerField(null=True)))
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema,
            5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name,
                                   'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url',
                                pw.CharField(null=True)))

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceFileModel._meta.table_name,
                                   'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile',
                                pw.CharField(null=True)))

    if current_schema.version < 8:
        _apply_schema_update(
            current_schema, 8,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'prune_keep_within', pw.CharField(null=True)))

    if current_schema.version < 9:
        _apply_schema_update(
            current_schema,
            9,
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'new_archive_name',
                pw.CharField(
                    default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}"
                )),
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'prune_prefix',
                pw.CharField(default="{hostname}-{profile_slug}-")),
        )

    if current_schema.version < 10:
        _apply_schema_update(
            current_schema,
            10,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'pre_backup_cmd', pw.CharField(default='')),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'post_backup_cmd', pw.CharField(default='')),
        )

    if current_schema.version < 11:
        _apply_schema_update(current_schema, 11)
        for profile in BackupProfileModel:
            if profile.compression == 'zstd':
                profile.compression = 'zstd,3'
            if profile.compression == 'lzma,6':
                profile.compression = 'auto,lzma,6'
            profile.save()

    if current_schema.version < 12:
        _apply_schema_update(
            current_schema, 12,
            migrator.add_column(RepoModel._meta.table_name,
                                'extra_borg_arguments',
                                pw.CharField(default='')))

    if current_schema.version < 13:
        # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column.
        tables = DB.get_tables()
        if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables:
            cursor = DB.execute_sql('select * from snapshotmodel;')
            fields = [
                ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name,
                ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration,
                ArchiveModel.size
            ]
            data = [row for row in cursor.fetchall()]
            with DB.atomic():
                size = 1000
                for i in range(0, len(data), size):
                    ArchiveModel.insert_many(data[i:i + size],
                                             fields=fields).execute()

        _apply_schema_update(current_schema, 13)

    if current_schema.version < 14:
        _apply_schema_update(
            current_schema, 14,
            migrator.add_column(SettingsModel._meta.table_name, 'str_value',
                                pw.CharField(default='')))

    if current_schema.version < 15:
        _apply_schema_update(
            current_schema, 15,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'dont_run_on_metered_networks',
                                pw.BooleanField(default=True)))

    if current_schema.version < 16:
        _apply_schema_update(
            current_schema, 16,
            migrator.add_column(SourceFileModel._meta.table_name, 'dir_size',
                                pw.BigIntegerField(default=-1)),
            migrator.add_column(SourceFileModel._meta.table_name,
                                'dir_files_count',
                                pw.BigIntegerField(default=-1)),
            migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir',
                                pw.BooleanField(default=False)))

    if current_schema.version < 17:
        _apply_schema_update(
            current_schema, 17,
            migrator.add_column(RepoModel._meta.table_name,
                                'create_backup_cmd', pw.CharField(default='')))

    if current_schema.version < 18:
        _apply_schema_update(
            current_schema, 18,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'schedule_interval_unit',
                                pw.CharField(default='hours')),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'schedule_interval_count',
                                pw.IntegerField(default=3)),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'schedule_make_up_missed',
                                pw.BooleanField(default=False)),
            migrator.add_column(EventLogModel._meta.table_name, 'end_time',
                                pw.DateTimeField(default=datetime.now)))
Example #11
0
def init_db(con):
    os.umask(0o0077)
    db.initialize(con)
    db.connect()
    db.create_tables([
        RepoModel, RepoPassword, BackupProfileModel, SourceFileModel,
        SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel,
        SchemaVersion
    ])

    if BackupProfileModel.select().count() == 0:
        default_profile = BackupProfileModel(name='Default')
        default_profile.save()

    # Create missing settings and update labels. Leave setting values untouched.
    for setting in get_misc_settings():
        s, created = SettingsModel.get_or_create(key=setting['key'],
                                                 defaults=setting)
        if created and setting['key'] == "use_dark_theme":
            # Check if macOS with enabled dark mode
            s.value = bool(uses_dark_mode())
        if created and setting['key'] == "use_light_icon":
            # Check if macOS with enabled dark mode or Linux with GNOME DE
            s.value = bool(uses_dark_mode()) or 'GNOME' in os.environ.get(
                'XDG_CURRENT_DESKTOP', '')
        if created and setting['key'] == "foreground":
            s.value = not bool(is_system_tray_available())
        if created and setting['key'] == "enable_notifications_success":
            s.value = not bool(is_system_tray_available())
        s.label = setting['label']
        s.save()

    # Delete old log entries after 3 months.
    three_months_ago = datetime.now() - timedelta(days=180)
    EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)

    # Migrations
    # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations
    current_schema, created = SchemaVersion.get_or_create(
        id=1, defaults={'version': SCHEMA_VERSION})
    current_schema.save()
    if created or current_schema.version == SCHEMA_VERSION:
        pass
    else:
        migrator = SqliteMigrator(con)

    if current_schema.version < 4:  # version 3 to 4
        _apply_schema_update(
            current_schema, 4,
            migrator.add_column(ArchiveModel._meta.table_name, 'duration',
                                pw.FloatField(null=True)),
            migrator.add_column(ArchiveModel._meta.table_name, 'size',
                                pw.IntegerField(null=True)))
    if current_schema.version < 5:
        _apply_schema_update(
            current_schema,
            5,
            migrator.drop_not_null(WifiSettingModel._meta.table_name,
                                   'last_connected'),
        )

    if current_schema.version < 6:
        _apply_schema_update(
            current_schema, 6,
            migrator.add_column(EventLogModel._meta.table_name, 'repo_url',
                                pw.CharField(null=True)))

    if current_schema.version < 7:
        _apply_schema_update(
            current_schema, 7,
            migrator.rename_column(SourceFileModel._meta.table_name,
                                   'config_id', 'profile_id'),
            migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'),
            migrator.add_column(EventLogModel._meta.table_name, 'profile',
                                pw.CharField(null=True)))

    if current_schema.version < 8:
        _apply_schema_update(
            current_schema, 8,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'prune_keep_within', pw.CharField(null=True)))

    if current_schema.version < 9:
        _apply_schema_update(
            current_schema,
            9,
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'new_archive_name',
                pw.CharField(
                    default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}"
                )),
            migrator.add_column(
                BackupProfileModel._meta.table_name, 'prune_prefix',
                pw.CharField(default="{hostname}-{profile_slug}-")),
        )

    if current_schema.version < 10:
        _apply_schema_update(
            current_schema,
            10,
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'pre_backup_cmd', pw.CharField(default='')),
            migrator.add_column(BackupProfileModel._meta.table_name,
                                'post_backup_cmd', pw.CharField(default='')),
        )

    if current_schema.version < 11:
        _apply_schema_update(current_schema, 11)
        for profile in BackupProfileModel:
            if profile.compression == 'zstd':
                profile.compression = 'zstd,3'
            if profile.compression == 'lzma,6':
                profile.compression = 'auto,lzma,6'
            profile.save()

    if current_schema.version < 12:
        _apply_schema_update(
            current_schema, 12,
            migrator.add_column(RepoModel._meta.table_name,
                                'extra_borg_arguments',
                                pw.CharField(default='')))

    if current_schema.version < 13:
        """
        Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column.
        """
        tables = db.get_tables()
        if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables:
            cursor = db.execute_sql('select * from snapshotmodel;')
            fields = [
                ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name,
                ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration,
                ArchiveModel.size
            ]
            data = [row for row in cursor.fetchall()]
            with db.atomic():
                size = 1000
                for i in range(0, len(data), size):
                    ArchiveModel.insert_many(data[i:i + size],
                                             fields=fields).execute()

        _apply_schema_update(current_schema, 13)