def Migrate(self): migrator = SqliteMigrator(self._db) migrate( migrator.drop_not_null('Contract', 'Account_id'), migrator.add_column('Address', 'IsWatchOnly', BooleanField(default=False)), )
def init_db(con): db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceDirModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default Profile') default_profile.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: return else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceDirModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True)))
def init_db(con=None): if con is not None: os.umask(0o0077) db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. tables = db.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = db.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with db.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13) if current_schema.version < 14: _apply_schema_update( current_schema, 14, migrator.add_column(SettingsModel._meta.table_name, 'str_value', pw.CharField(default=''))) if current_schema.version < 15: _apply_schema_update( current_schema, 15, migrator.add_column(BackupProfileModel._meta.table_name, 'dont_run_on_metered_networks', pw.BooleanField(default=True))) if current_schema.version < 16: _apply_schema_update( current_schema, 16, migrator.add_column(SourceFileModel._meta.table_name, 'dir_size', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'dir_files_count', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir', pw.BooleanField(default=False))) # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - rd(months=3) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)
def init_db(con): db.initialize(con) db.connect() db.create_tables([RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) if created and setting['key'] == "use_dark_theme": # Check if macOS with enabled dark mode s.value = bool(uses_dark_mode()) if created and setting['key'] == "use_light_icon": # Check if macOS with enabled dark mode or Linux with GNOME DE s.value = bool(uses_dark_mode()) or os.environ.get('XDG_CURRENT_DESKTOP', '') == 'GNOME' s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create(id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True)) ) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True)) ) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True)) ) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column(BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField(default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}")), migrator.add_column(BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default='')))
def init_db(con): db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Default settings for all platforms. settings = [{ 'key': 'use_light_icon', 'value': False, 'type': 'checkbox', 'label': 'Use light system tray icon (applies after restart, useful for dark themes).' }, { 'key': 'enable_notifications', 'value': True, 'type': 'checkbox', 'label': 'Display notifications when background tasks fail.' }, { 'key': 'enable_notifications_success', 'value': False, 'type': 'checkbox', 'label': 'Also notify about successful background tasks.' }] if sys.platform == 'darwin': settings += [ { 'key': 'autostart', 'value': False, 'type': 'checkbox', 'label': 'Add Vorta to Login Items in Preferences > Users and Groups > Login Items.' }, { 'key': 'check_for_updates', 'value': True, 'type': 'checkbox', 'label': 'Check for updates on startup.' }, { 'key': 'updates_include_beta', 'value': False, 'type': 'checkbox', 'label': 'Include pre-release versions when checking for updates.' }, ] # Create missing settings and update labels. Leave setting values untouched. for setting in settings: s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), )
def run_migrations(current_schema, db_connection): """ Apply new schema versions to database. See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations """ migrator = SqliteMigrator(db_connection) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. tables = DB.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = DB.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with DB.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13) if current_schema.version < 14: _apply_schema_update( current_schema, 14, migrator.add_column(SettingsModel._meta.table_name, 'str_value', pw.CharField(default=''))) if current_schema.version < 15: _apply_schema_update( current_schema, 15, migrator.add_column(BackupProfileModel._meta.table_name, 'dont_run_on_metered_networks', pw.BooleanField(default=True))) if current_schema.version < 16: _apply_schema_update( current_schema, 16, migrator.add_column(SourceFileModel._meta.table_name, 'dir_size', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'dir_files_count', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir', pw.BooleanField(default=False))) if current_schema.version < 17: _apply_schema_update( current_schema, 17, migrator.add_column(RepoModel._meta.table_name, 'create_backup_cmd', pw.CharField(default=''))) if current_schema.version < 18: _apply_schema_update( current_schema, 18, migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_interval_unit', pw.CharField(default='hours')), migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_interval_count', pw.IntegerField(default=3)), migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_make_up_missed', pw.BooleanField(default=False)), migrator.add_column(EventLogModel._meta.table_name, 'end_time', pw.DateTimeField(default=datetime.now)))
def init_db(con): os.umask(0o0077) db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) if created and setting['key'] == "use_dark_theme": # Check if macOS with enabled dark mode s.value = bool(uses_dark_mode()) if created and setting['key'] == "use_light_icon": # Check if macOS with enabled dark mode or Linux with GNOME DE s.value = bool(uses_dark_mode()) or 'GNOME' in os.environ.get( 'XDG_CURRENT_DESKTOP', '') if created and setting['key'] == "foreground": s.value = not bool(is_system_tray_available()) if created and setting['key'] == "enable_notifications_success": s.value = not bool(is_system_tray_available()) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: """ Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. """ tables = db.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = db.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with db.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13)
def upgrade(version): if version == 3: from models import GridLayer db.execute_sql( "CREATE TEMPORARY TABLE _grid_layer AS SELECT * FROM grid_layer") db.drop_tables([GridLayer]) db.create_tables([GridLayer]) db.execute_sql("INSERT INTO grid_layer SELECT * FROM _grid_layer") Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 4: from models import Location db.foreign_keys = False db.execute_sql( "CREATE TEMPORARY TABLE _location AS SELECT * FROM location") db.execute_sql("DROP TABLE location") db.create_tables([Location]) db.execute_sql("INSERT INTO location SELECT * FROM _location") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 5: from models import Layer from peewee import ForeignKeyField migrator = SqliteMigrator(db) field = ForeignKeyField(Layer, Layer.id, backref="active_users", null=True) with db.atomic(): migrate( migrator.add_column("location_user_option", "active_layer_id", field)) from models import LocationUserOption LocationUserOption._meta.add_field("active_layer", field) for luo in LocationUserOption.select(): luo.active_layer = luo.location.layers.select().where( Layer.name == "tokens")[0] luo.save() migrate( migrator.add_not_null("location_user_option", "active_layer_id")) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 6: migrator = SqliteMigrator(db) migrate( migrator.drop_not_null("location_user_option", "active_layer_id")) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 7: # Remove shape index unique constraint from models import Shape db.foreign_keys = False db.execute_sql("CREATE TEMPORARY TABLE _shape AS SELECT * FROM shape") db.execute_sql("DROP TABLE shape") db.create_tables([Shape]) db.execute_sql("INSERT INTO shape SELECT * FROM _shape") db.foreign_keys = True # Check all indices and reset to 0 index logger.info("Validating all shape indices") from models import Layer with db.atomic(): for layer in Layer.select(): shapes = layer.shapes.order_by(fn.ABS(Shape.index)) for i, shape in enumerate(shapes): shape.index = i shape.save() Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 8: from models import Polygon db.create_tables([Polygon]) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 9: from models import Location db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): migrate( migrator.add_column("location", "vision_mode", Location.vision_mode), migrator.add_column("location", "vision_min_range", Location.vision_min_range), migrator.add_column("location", "vision_max_range", Location.vision_max_range), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 10: from models import Shape db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): migrate( migrator.add_column("shape", "name_visible", Shape.name_visible)) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 11: from models import Label, LocationUserOption, ShapeLabel db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): db.create_tables([Label, ShapeLabel]) migrate( migrator.add_column( "location_user_option", "active_filters", LocationUserOption.active_filters, )) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 12: from models import Label, LabelSelection db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): try: migrate( migrator.add_column("label", "category", Label.category)) except OperationalError as e: if e.args[0] != "duplicate column name: category": raise e db.create_tables([LabelSelection]) with db.atomic(): for label in Label: if ":" not in label.name: continue cat, *name = label.name.split(":") label.category = cat label.name = ":".join(name) label.save() db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 13: from models import LocationUserOption, MultiLine, Polygon db.foreign_keys = False migrator = SqliteMigrator(db) migrate(migrator.drop_column("location_user_option", "active_filters")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 14: db.foreign_keys = False migrator = SqliteMigrator(db) from models import GridLayer, Layer db.execute_sql( 'CREATE TABLE IF NOT EXISTS "base_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "shape_type" ("shape_id" TEXT NOT NULL PRIMARY KEY, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) shape_types = [ "asset_rect", "circle", "circular_token", "line", "multi_line", "polygon", "rect", "text", ] with db.atomic(): for table in shape_types: db.execute_sql( f"CREATE TEMPORARY TABLE _{table} AS SELECT * FROM {table}" ) db.execute_sql(f"DROP TABLE {table}") for query in [ 'CREATE TABLE IF NOT EXISTS "asset_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, "src" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circle" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circular_token" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, "text" TEXT NOT NULL, "font" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "x2" REAL NOT NULL, "y2" REAL NOT NULL, "line_width" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "multi_line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "line_width" INTEGER NOT NULL, "points" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "polygon" ("shape_id" TEXT NOT NULL PRIMARY KEY, "vertices" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "text" ("shape_id" TEXT NOT NULL PRIMARY KEY, "text" TEXT NOT NULL, "font" TEXT NOT NULL, "angle" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', ]: db.execute_sql(query) for table in shape_types: db.execute_sql( f"INSERT INTO {table} SELECT _{table}.* FROM _{table} INNER JOIN shape ON shape.uuid = _{table}.uuid" ) field = ForeignKeyField(Layer, Layer.id, null=True) with db.atomic(): migrate(migrator.add_column("grid_layer", "layer_id", field)) for gl in GridLayer.select(): l = Layer.get_or_none(id=gl.id) if l: gl.layer = l gl.save() else: gl.delete_instance() migrate(migrator.add_not_null("grid_layer", "layer_id")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 15: from peewee import BooleanField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("room", "is_locked", BooleanField(default=False))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 16: from peewee import TextField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("location", "unit_size_unit", TextField(default="ft"))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 17: from peewee import BooleanField, IntegerField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("polygon", "open_polygon", BooleanField(default=False)), migrator.add_column("polygon", "line_width", IntegerField(default=2)), ) db.execute_sql( "INSERT INTO polygon (shape_id, line_width, vertices, open_polygon) SELECT shape_id, line_width, points, 1 FROM multi_line" ) db.execute_sql("DROP TABLE multi_line") db.execute_sql( "UPDATE shape SET type_ = 'polygon' WHERE type_ = 'multiline'") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 18: from peewee import TextField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate(migrator.add_column("user", "email", TextField(null=True))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 19: from peewee import ForeignKeyField db.foreign_keys = False migrator = SqliteMigrator(db) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "floor" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "name" TEXT, "index" INTEGER NOT NULL, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO floor (location_id, name, "index") SELECT id, "ground", 0 FROM location' ) with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _layer AS SELECT * FROM layer") db.execute_sql("DROP TABLE layer") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "layer" ("id" INTEGER NOT NULL PRIMARY KEY, "floor_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "type_" TEXT NOT NULL, "player_visible" INTEGER NOT NULL, "player_editable" INTEGER NOT NULL, "selectable" INTEGER NOT NULL, "index" INTEGER NOT NULL, FOREIGN KEY ("floor_id") REFERENCES "floor" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO layer (id, floor_id, name, type_, player_visible, player_editable, selectable, "index") SELECT _layer.id, floor.id, _layer.name, type_, player_visible, player_editable, selectable, _layer."index" FROM _layer INNER JOIN floor ON floor.location_id = _layer.location_id' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 20: from peewee import BooleanField, BooleanField, IntegerField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("shape", "badge", IntegerField(default=1)), migrator.add_column("shape", "show_badge", BooleanField(default=False)), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() else: raise Exception( f"No upgrade code for save format {version} was found.")