def upgrade(): settings = db.Settings.get() version = settings.info['version'] migrator = SqliteMigrator(db.database) if version < '0.2': migrate( migrator.add_column('deck', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('media', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('model', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('template', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('note', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('card', 'info', sqlite_ext.JSONField(default=dict)), migrator.add_column('card', 'last_review', pv.TimestampField()), ) settings.info['version'] = '0.2' settings.save() if version < '0.2.1': migrate( migrator.drop_column('card', 'last_review'), migrator.add_column('card', 'last_review', pv.DateTimeField(default=datetime.now)), migrator.drop_column('note', 'modified'), migrator.add_column('note', 'modified', pv.DateTimeField(default=datetime.now))) settings.info['version'] = '0.2.1' settings.save()
def db_upgrade(old_version): migrator = SqliteMigrator(_db) if old_version < 2: User_login.create_table() migrate( migrator.add_column('clan_member', 'remaining_status', TextField(null=True)), migrator.add_column('clan_challenge', 'message', TextField(null=True)), migrator.add_column('clan_group', 'boss_lock_type', IntegerField(default=0)), migrator.drop_column('user', 'last_save_slot'), ) if old_version < 3: migrate( migrator.drop_column('user', 'auth_cookie'), migrator.drop_column('user', 'auth_cookie_expire_time'), ) if old_version < 4: migrate( migrator.add_column('user', 'deleted', BooleanField(default=False)), ) if old_version < 5: migrate( migrator.add_column('user', 'must_change_password', BooleanField(default=True)), ) if old_version < 7: migrate( migrator.drop_column('clan_challenge', 'comment'), migrator.add_column('clan_challenge', 'behalf', IntegerField(null=True)), migrator.drop_column('clan_subscribe', 'comment'), migrator.add_column('clan_subscribe', 'message', TextField(null=True)), migrator.add_column('clan_group', 'apikey', CharField(max_length=16, default=rand_string)), ) if old_version < 8: migrate( migrator.add_column('clan_group', 'deleted', BooleanField(default=False)), migrator.add_column('clan_group', 'battle_id', IntegerField(default=0)), migrator.add_column('clan_challenge', 'bid', IntegerField(default=0)), migrator.add_index('clan_challenge', ('bid', 'gid'), False)) if old_version < 9: migrate(migrator.add_index('clan_member', ('qqid', ), False)) if old_version < 10: migrate( migrator.add_index('clan_member', ('group_id', ), False), migrator.add_index('clan_subscribe', ('gid', ), False), migrator.add_index('clan_challenge', ('qqid', ), False), migrator.add_index('clan_challenge', ('qqid', 'challenge_pcrdate'), False), migrator.add_index('clan_challenge', ('bid', 'gid', 'challenge_pcrdate'), False), ) DB_schema.replace(key='version', value=str(_version)).execute()
def KfpMigrate(database: SqliteDatabase): tables = database.get_tables() migrator = SqliteMigrator(database) if "rpgcharacter" in tables: columns = database.get_columns("rpgcharacter") if not KfpMigrator.hasColumn("retired", columns): retiredField = BooleanField(default=False) migrate( migrator.add_column("rpgcharacter", "retired", retiredField) ) if not KfpMigrator.hasColumn("last_attack", columns): lastAttackField = DateTimeField(default=datetime.now() + timedelta(days=-1)) migrate( migrator.add_column("rpgcharacter", "last_attack", lastAttackField) ) if "member" in tables: columns = database.get_columns("member") if not KfpMigrator.hasColumn("token", columns): tokenField = BigIntegerField(default=100) migrate( migrator.add_column("member", 'token', tokenField) ) if "channel" in tables: columns = database.get_columns("channel") if not KfpMigrator.hasColumn("channel_id", columns): guildIdField = IntegerField(default=-1) migrate( migrator.add_column('channel', 'channel_guild_id', guildIdField), migrator.rename_column('channel', 'channel_discord_id', 'channel_id'), ) if "item" in tables: columns = database.get_columns("item") if KfpMigrator.hasColumn("hidden", columns): migrate( migrator.drop_column('item', 'hidden'), ) if KfpMigrator.hasColumn("buff_type", columns): migrate( migrator.drop_column('item', 'buff_type'), ) if KfpMigrator.hasColumn("buff_value", columns): migrate( migrator.drop_column('item', 'buff_value'), ) if not KfpMigrator.hasColumn("type", columns): typeField = CharField(default=ItemType.NONE) migrate( migrator.add_column('item', 'type', typeField), ) if not KfpMigrator.hasColumn("buff", columns): buff = BuffField(default=Buff(BuffType.NONE, 0, -1)) migrate( migrator.add_column('item', 'buff', buff), ) if not KfpMigrator.hasColumn("description", columns): description = CharField(default="") migrate( migrator.add_column('item', 'description', description), ) return True
def db_upgrade(old_version): migrator = SqliteMigrator(_db) if old_version < 2: User_login.create_table() migrate( migrator.add_column('clan_member', 'remaining_status', TextField(null=True)), migrator.add_column('clan_challenge', 'message', TextField(null=True)), migrator.add_column('clan_group', 'boss_lock_type', IntegerField(default=0)), migrator.drop_column('user', 'last_save_slot'), ) if old_version < 3: migrate( migrator.drop_column('user', 'auth_cookie'), migrator.drop_column('user', 'auth_cookie_expire_time'), ) if old_version < 4: migrate( migrator.add_column('user', 'deleted', BooleanField(default=False)), ) if old_version < 5: migrate( migrator.add_column('user', 'must_change_password', BooleanField(default=True)), ) DB_schema.replace(key='version', value=str(_version)).execute()
def init_db(con): db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceDirModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default Profile') default_profile.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: return else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceDirModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True)))
def apply_v2(): db = SqliteDatabase(r"data\results.db") migrator = SqliteMigrator(db) points1 = CharField(max_length=100, null=True) points2 = CharField(max_length=100, null=True) migrate( migrator.add_column("shapedistance", "points1", points1), migrator.add_column("shapedistance", "points2", points2), migrator.drop_column("shapedistance", "bitmap"), )
class TestMigrations(unittest.TestCase): def setUp(self): Food.create_table() self.migrator = SqliteMigrator(database) def tearDown(self): Food.drop_table() def test_add_column(self): Food.another_column = CharField(null=True) migrate(self.migrator.add_column('food', 'another_column', Food.another_column)) # TODO: How to test if another_column now actually exists in both the `food` table as the `foodversioned` table? def test_drop_column(self): del Food.is_tasty migrate(self.migrator.drop_column('food', 'is_tasty'))
def run_migration(): recipes_db = SqliteDatabase(os.path.join(SCRIPT_DIR, '../recipes.db')) migrator = SqliteMigrator(recipes_db) new_picture_field = TextField(null=True) updated_pictures = [(recipe_id, update_picture(picture)) for ( recipe_id, picture) in recipes_db.execute_sql('SELECT id, picture FROM recipe')] migrate( migrator.drop_column('recipe', 'picture'), migrator.add_column('recipe', 'picture', new_picture_field), ) for (recipe_id, picture) in updated_pictures: if picture: recipes_db.execute_sql('UPDATE recipe SET picture=? WHERE id=?', (picture, recipe_id))
def init_db(con): db.initialize(con) db.connect() db.create_tables([RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) if created and setting['key'] == "use_dark_theme": # Check if macOS with enabled dark mode s.value = bool(uses_dark_mode()) if created and setting['key'] == "use_light_icon": # Check if macOS with enabled dark mode or Linux with GNOME DE s.value = bool(uses_dark_mode()) or os.environ.get('XDG_CURRENT_DESKTOP', '') == 'GNOME' s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create(id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True)) ) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True)) ) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True)) ) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column(BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField(default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}")), migrator.add_column(BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default='')))
def init_db(con): db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Default settings for all platforms. settings = [{ 'key': 'use_light_icon', 'value': False, 'type': 'checkbox', 'label': 'Use light system tray icon (applies after restart, useful for dark themes).' }, { 'key': 'enable_notifications', 'value': True, 'type': 'checkbox', 'label': 'Display notifications when background tasks fail.' }, { 'key': 'enable_notifications_success', 'value': False, 'type': 'checkbox', 'label': 'Also notify about successful background tasks.' }] if sys.platform == 'darwin': settings += [ { 'key': 'autostart', 'value': False, 'type': 'checkbox', 'label': 'Add Vorta to Login Items in Preferences > Users and Groups > Login Items.' }, { 'key': 'check_for_updates', 'value': True, 'type': 'checkbox', 'label': 'Check for updates on startup.' }, { 'key': 'updates_include_beta', 'value': False, 'type': 'checkbox', 'label': 'Include pre-release versions when checking for updates.' }, ] # Create missing settings and update labels. Leave setting values untouched. for setting in settings: s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), )
def upgrade(version): if version < 16: raise OldVersionException( f"Upgrade code for this version is >1 year old and is no longer in the active codebase to reduce clutter. You can still find this code on github, contact me for more info." ) elif version == 16: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("location", "unit_size_unit", TextField(default="ft"))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 17: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("polygon", "open_polygon", BooleanField(default=False)), migrator.add_column("polygon", "line_width", IntegerField(default=2)), ) db.execute_sql( "INSERT INTO polygon (shape_id, line_width, vertices, open_polygon) SELECT shape_id, line_width, points, 1 FROM multi_line" ) db.execute_sql("DROP TABLE multi_line") db.execute_sql( "UPDATE shape SET type_ = 'polygon' WHERE type_ = 'multiline'") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 18: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate(migrator.add_column("user", "email", TextField(null=True))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 19: db.foreign_keys = False db.execute_sql( 'CREATE TABLE IF NOT EXISTS "floor" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "name" TEXT, "index" INTEGER NOT NULL, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO floor (location_id, name, "index") SELECT id, "ground", 0 FROM location' ) with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _layer AS SELECT * FROM layer") db.execute_sql("DROP TABLE layer") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "layer" ("id" INTEGER NOT NULL PRIMARY KEY, "floor_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "type_" TEXT NOT NULL, "player_visible" INTEGER NOT NULL, "player_editable" INTEGER NOT NULL, "selectable" INTEGER NOT NULL, "index" INTEGER NOT NULL, FOREIGN KEY ("floor_id") REFERENCES "floor" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO layer (id, floor_id, name, type_, player_visible, player_editable, selectable, "index") SELECT _layer.id, floor.id, _layer.name, type_, player_visible, player_editable, selectable, _layer."index" FROM _layer INNER JOIN floor ON floor.location_id = _layer.location_id' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 20: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("shape", "badge", IntegerField(default=1)), migrator.add_column("shape", "show_badge", BooleanField(default=False)), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 21: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("user", "invert_alt", BooleanField(default=False))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 22: db.foreign_keys = False with db.atomic(): db.execute_sql( 'CREATE TABLE IF NOT EXISTS "marker" ("id" INTEGER NOT NULL PRIMARY KEY, "shape_id" TEXT NOT NULL, "user_id" INTEGER NOT NULL, "location_id" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape"("uuid") ON DELETE CASCADE, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE, FOREIGN KEY ("user_id") REFERENCES "user"("id") ON DELETE CASCADE)' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 23: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("shape_owner", "edit_access", BooleanField(default=True)), migrator.add_column("shape_owner", "vision_access", BooleanField(default=True)), migrator.add_column("shape", "default_edit_access", BooleanField(default=False)), migrator.add_column("shape", "default_vision_access", BooleanField(default=False)), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 24: db.foreign_keys = False with db.atomic(): db.execute_sql( 'DELETE FROM "player_room" WHERE id IN (SELECT pr.id FROM "player_room" pr INNER JOIN "room" r ON r.id = pr.room_id WHERE r.creator_id = pr.player_id )' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 25: # Move Room.dm_location and Room.player_location to PlayerRoom.active_location # Add PlayerRoom.role # Add order index on location from models import Location migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column( "player_room", "active_location_id", ForeignKeyField( Location, Location.id, backref="players", on_delete="CASCADE", null=True, ), ), migrator.add_column("player_room", "role", IntegerField(default=0)), migrator.add_column("location", "index", IntegerField(default=0)), ) db.execute_sql( "UPDATE player_room SET active_location_id = (SELECT location.id FROM room INNER JOIN location ON room.id = location.room_id WHERE location.name = room.player_location AND room.id = player_room.room_id)" ) db.execute_sql( "INSERT INTO player_room (role, player_id, room_id, active_location_id) SELECT 1, u.id, r.id, l.id FROM room r INNER JOIN user u ON u.id = r.creator_id INNER JOIN location l ON l.name = r.dm_location AND l.room_id = r.id" ) db.execute_sql( "UPDATE location SET 'index' = (SELECT COUNT(*) + 1 FROM location l INNER JOIN room r WHERE location.room_id = r.id AND l.room_id = r.id AND l.'index' != 0) " ) migrate( migrator.drop_column("room", "player_location"), migrator.drop_column("room", "dm_location"), migrator.add_not_null("player_room", "active_location_id"), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 26: # Move Location settings to a separate LocationSettings table # Add a default_settings field to Room that refers to such a LocationSettings row from models import LocationOptions migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): db.execute_sql( 'CREATE TABLE IF NOT EXISTS "location_options" ("id" INTEGER NOT NULL PRIMARY KEY, "unit_size" REAL DEFAULT 5, "unit_size_unit" TEXT DEFAULT "ft", "use_grid" INTEGER DEFAULT 1, "full_fow" INTEGER DEFAULT 0, "fow_opacity" REAL DEFAULT 0.3, "fow_los" INTEGER DEFAULT 0, "vision_mode" TEXT DEFAULT "triangle", "vision_min_range" REAL DEFAULT 1640, "vision_max_range" REAL DEFAULT 3281, "grid_size" INTEGER DEFAULT 50)' ) migrate( migrator.add_column( "location", "options_id", ForeignKeyField( LocationOptions, LocationOptions.id, on_delete="CASCADE", null=True, ), ), migrator.add_column( "room", "default_options_id", ForeignKeyField( LocationOptions, LocationOptions.id, on_delete="CASCADE", null=True, ), ), ) data = db.execute_sql( """SELECT l.id, r.id, l.unit_size, l.unit_size_unit, l.use_grid, l.full_fow, l.fow_opacity, l.fow_los, l.vision_mode, l.vision_min_range, l.vision_max_range, g.size AS grid_size FROM location l INNER JOIN room r INNER JOIN floor f ON f.id = (SELECT id FROM floor f2 WHERE f2.location_id = l.id LIMIT 1) INNER JOIN layer la INNER JOIN grid_layer g WHERE r.id = l.room_id AND la.floor_id = f.id AND la.name = 'grid' AND g.layer_id = la.id""" ) room_options = {} descr = data.description mapping = { "unit_size": 0, "unit_size_unit": 1, "use_grid": 2, "full_fow": 3, "fow_opacity": 4, "fow_los": 5, "vision_mode": 6, "vision_min_range": 7, "vision_max_range": 8, "grid_size": 9, } default_row = [ 5, "ft", True, False, 0.3, False, "triangle", 1640, 3281, 50 ] for row in data.fetchall(): new_row = [ None, None, None, None, None, None, None, None, None, None ] if row[1] not in room_options: room_options[row[1]] = db.execute_sql( "INSERT INTO location_options (unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, grid_size) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", default_row, ).lastrowid db.execute_sql( f"UPDATE room SET default_options_id = {room_options[row[1]]} WHERE id = {row[1]}" ) for col, val in zip(descr, row): if col[0] in ["id", "room_id"]: continue idx = mapping[col[0]] if val != default_row[idx]: new_row[idx] = val loc_id = db.execute_sql( "INSERT INTO location_options (unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, grid_size) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", new_row, ).lastrowid db.execute_sql( f"UPDATE location SET options_id = {loc_id} WHERE id = {row[0]}" ) migrate( migrator.add_not_null("room", "default_options_id"), migrator.drop_column("location", "unit_size"), migrator.drop_column("location", "unit_size_unit"), migrator.drop_column("location", "use_grid"), migrator.drop_column("location", "full_fow"), migrator.drop_column("location", "fow_opacity"), migrator.drop_column("location", "fow_los"), migrator.drop_column("location", "vision_mode"), migrator.drop_column("location", "vision_min_range"), migrator.drop_column("location", "vision_max_range"), migrator.drop_index("location", "location_room_id_name"), ) db.execute_sql("DROP TABLE 'grid_layer'") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 27: # Fix broken schemas from older save upgrades db.foreign_keys = False with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _floor AS SELECT * FROM floor") db.execute_sql("DROP TABLE floor") db.execute_sql( 'CREATE TABLE "floor" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "index" INTEGER NOT NULL, "name" TEXT NOT NULL, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "floor_location_id" ON "floor" ("location_id")' ) db.execute_sql( 'INSERT INTO floor (id, location_id, "index", name) SELECT id, location_id, "index", name FROM _floor' ) db.execute_sql( "CREATE TEMPORARY TABLE _label AS SELECT * FROM label") db.execute_sql("DROP TABLE label") db.execute_sql( 'CREATE TABLE "label" ("uuid" TEXT NOT NULL PRIMARY KEY, "user_id" INTEGER NOT NULL, "category" TEXT, "name" TEXT NOT NULL, "visible" INTEGER NOT NULL, FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "label_user_id" ON "label" ("user_id")' ) db.execute_sql( "INSERT INTO label (uuid, user_id, category, name, visible) SELECT uuid, user_id, category, name, visible FROM _label" ) db.execute_sql( "CREATE TEMPORARY TABLE _layer AS SELECT * FROM layer") db.execute_sql("DROP TABLE layer") db.execute_sql( 'CREATE TABLE "layer" ("id" INTEGER NOT NULL PRIMARY KEY, "floor_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "type_" TEXT NOT NULL, "player_visible" INTEGER NOT NULL, "player_editable" INTEGER NOT NULL, "selectable" INTEGER NOT NULL, "index" INTEGER NOT NULL, FOREIGN KEY ("floor_id") REFERENCES "floor" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "layer_floor_id" ON "layer" ("floor_id")' ) db.execute_sql( 'CREATE UNIQUE INDEX "layer_floor_id_index" ON "layer" ("floor_id", "index")' ) db.execute_sql( 'CREATE UNIQUE INDEX "layer_floor_id_name" ON "layer" ("floor_id", "name")' ) db.execute_sql( 'INSERT INTO layer (id, floor_id, name, type_, player_visible, player_editable, selectable, "index") SELECT id, floor_id, name, type_, player_visible, player_editable, selectable, "index" FROM _layer' ) db.execute_sql( "CREATE TEMPORARY TABLE _location AS SELECT * FROM location") db.execute_sql("DROP TABLE location") db.execute_sql( 'CREATE TABLE "location" ("id" INTEGER NOT NULL PRIMARY KEY, "room_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "options_id" INTEGER, "index" INTEGER NOT NULL, FOREIGN KEY ("room_id") REFERENCES "room" ("id") ON DELETE CASCADE, FOREIGN KEY ("options_id") REFERENCES "location_options" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_room_id" ON "location" ("room_id")' ) db.execute_sql( 'INSERT INTO location (id, room_id, name, options_id, "index") SELECT id, room_id, name, options_id, "index" FROM _location' ) db.execute_sql( "CREATE TEMPORARY TABLE _location_options AS SELECT * FROM location_options" ) db.execute_sql("DROP TABLE location_options") db.execute_sql( 'CREATE TABLE "location_options" ("id" INTEGER NOT NULL PRIMARY KEY, "unit_size" REAL, "unit_size_unit" TEXT, "use_grid" INTEGER, "full_fow" INTEGER, "fow_opacity" REAL, "fow_los" INTEGER, "vision_mode" TEXT, "grid_size" INTEGER, "vision_min_range" REAL, "vision_max_range" REAL)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_options_id" ON "location" ("options_id")' ) db.execute_sql( "INSERT INTO location_options (id, unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, grid_size, vision_min_range, vision_max_range) SELECT id, unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, grid_size, vision_min_range, vision_max_range FROM _location_options" ) db.execute_sql( "CREATE TEMPORARY TABLE _location_user_option AS SELECT * FROM location_user_option" ) db.execute_sql("DROP TABLE location_user_option") db.execute_sql( 'CREATE TABLE "location_user_option" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "pan_x" INTEGER NOT NULL, "pan_y" INTEGER NOT NULL, "zoom_factor" REAL NOT NULL, "active_layer_id" INTEGER, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE, FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE, FOREIGN KEY ("active_layer_id") REFERENCES "layer" ("id"))' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_user_option_location_id" ON "location_user_option" ("location_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_user_option_active_layer_id" ON "location_user_option" ("active_layer_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_user_option_user_id" ON "location_user_option" ("user_id")' ) db.execute_sql( 'CREATE UNIQUE INDEX "location_user_option_location_id_user_id" ON "location_user_option" ("location_id", "user_id")' ) db.execute_sql( "INSERT INTO location_user_option (id, location_id, user_id, pan_x, pan_y, zoom_factor, active_layer_id) SELECT id, location_id, user_id, pan_x, pan_y, zoom_factor, active_layer_id FROM _location_user_option" ) db.execute_sql( "CREATE TEMPORARY TABLE _marker AS SELECT * FROM marker") db.execute_sql("DROP TABLE marker") db.execute_sql( 'CREATE TABLE "marker" ("id" INTEGER NOT NULL PRIMARY KEY, "shape_id" TEXT NOT NULL, "user_id" INTEGER NOT NULL, "location_id" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE, FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "marker_location_id" ON "marker" ("location_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "marker_shape_id" ON "marker" ("shape_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "marker_user_id" ON "marker" ("user_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "marker_location_id" ON "marker" ("location_id")' ) db.execute_sql( "INSERT INTO marker (id, shape_id, user_id, location_id) SELECT id, shape_id, user_id, location_id FROM _marker" ) db.execute_sql( "CREATE TEMPORARY TABLE _player_room AS SELECT * FROM player_room" ) db.execute_sql("DROP TABLE player_room") db.execute_sql( 'CREATE TABLE "player_room" ("id" INTEGER NOT NULL PRIMARY KEY, "role" INTEGER NOT NULL, "player_id" INTEGER NOT NULL, "room_id" INTEGER NOT NULL, "active_location_id" INTEGER NOT NULL, FOREIGN KEY ("player_id") REFERENCES "user" ("id") ON DELETE CASCADE, FOREIGN KEY ("room_id") REFERENCES "room" ("id") ON DELETE CASCADE, FOREIGN KEY ("active_location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "player_room_active_location_id" ON "player_room" ("active_location_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "player_room_player_id" ON "player_room" ("player_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "player_room_room_id" ON "player_room" ("room_id")' ) db.execute_sql( "INSERT INTO player_room (id, role, player_id, room_id, active_location_id) SELECT id, role, player_id, room_id, active_location_id FROM _player_room" ) db.execute_sql( "CREATE TEMPORARY TABLE _polygon AS SELECT * FROM polygon") db.execute_sql("DROP TABLE polygon") db.execute_sql( 'CREATE TABLE "polygon" ("shape_id" TEXT NOT NULL PRIMARY KEY, "vertices" TEXT NOT NULL, "line_width" INTEGER NOT NULL, "open_polygon" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) db.execute_sql( "INSERT INTO polygon (shape_id,vertices, line_width, open_polygon) SELECT shape_id,vertices, line_width, open_polygon FROM _polygon" ) db.execute_sql( "CREATE TEMPORARY TABLE _room AS SELECT * FROM room") db.execute_sql("DROP TABLE room") db.execute_sql( 'CREATE TABLE "room" ("id" INTEGER NOT NULL PRIMARY KEY, "name" TEXT NOT NULL, "creator_id" INTEGER NOT NULL, "invitation_code" TEXT NOT NULL, "is_locked" INTEGER NOT NULL, "default_options_id" INTEGER NOT NULL, FOREIGN KEY ("creator_id") REFERENCES "user" ("id") ON DELETE CASCADE, FOREIGN KEY ("default_options_id") REFERENCES "location_options" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "room_creator_id" ON "room" ("creator_id")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "room_default_options_id" ON "room" ("default_options_id")' ) db.execute_sql( 'CREATE UNIQUE INDEX "room_invitation_code" ON "room" ("invitation_code")' ) db.execute_sql( 'CREATE UNIQUE INDEX "room_name_creator_id" ON "room" ("name", "creator_id")' ) db.execute_sql( "INSERT INTO room (id, name, creator_id, invitation_code, is_locked, default_options_id) SELECT id, name, creator_id, invitation_code, is_locked, default_options_id FROM _room" ) db.execute_sql( "CREATE TEMPORARY TABLE _shape AS SELECT * FROM shape") db.execute_sql("DROP TABLE shape") db.execute_sql( 'CREATE TABLE "shape" ("uuid" TEXT NOT NULL PRIMARY KEY, "layer_id" INTEGER NOT NULL, "type_" TEXT NOT NULL, "x" REAL NOT NULL, "y" REAL NOT NULL, "name" TEXT, "name_visible" INTEGER NOT NULL, "fill_colour" TEXT NOT NULL, "stroke_colour" TEXT NOT NULL, "vision_obstruction" INTEGER NOT NULL, "movement_obstruction" INTEGER NOT NULL, "is_token" INTEGER NOT NULL, "annotation" TEXT NOT NULL, "draw_operator" TEXT NOT NULL, "index" INTEGER NOT NULL, "options" TEXT, "badge" INTEGER NOT NULL, "show_badge" INTEGER NOT NULL, "default_edit_access" INTEGER NOT NULL, "default_vision_access" INTEGER NOT NULL, FOREIGN KEY ("layer_id") REFERENCES "layer" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "shape_layer_id" ON "shape" ("layer_id")' ) db.execute_sql( 'INSERT INTO shape (uuid, layer_id, type_, x, y, name, name_visible, fill_colour, stroke_colour, vision_obstruction, movement_obstruction, is_token, annotation, draw_operator, "index", options, badge, show_badge, default_edit_access, default_vision_access) SELECT uuid, layer_id, type_, x, y, name, name_visible, fill_colour, stroke_colour, vision_obstruction, movement_obstruction, is_token, annotation, draw_operator, "index", options, badge, show_badge, default_edit_access, default_vision_access FROM _shape' ) db.execute_sql( "CREATE TEMPORARY TABLE _user AS SELECT * FROM user") db.execute_sql("DROP TABLE user") db.execute_sql( 'CREATE TABLE "user" ("id" INTEGER NOT NULL PRIMARY KEY, "name" TEXT NOT NULL, "email" TEXT, "password_hash" TEXT NOT NULL, "fow_colour" TEXT NOT NULL, "grid_colour" TEXT NOT NULL, "ruler_colour" TEXT NOT NULL, "invert_alt" INTEGER NOT NULL)' ) db.execute_sql( "INSERT INTO user (id, name, email, password_hash, fow_colour, grid_colour, ruler_colour, invert_alt) SELECT id, name, email, password_hash, fow_colour, grid_colour, ruler_colour, invert_alt FROM _user" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 28: # Add invisibility toggle to shapes db.foreign_keys = False with db.atomic(): db.execute_sql( "ALTER TABLE shape ADD COLUMN is_invisible INTEGER NOT NULL DEFAULT 0" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 29: # Add movement access permission migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): db.execute_sql( "ALTER TABLE shape ADD COLUMN default_movement_access INTEGER NOT NULL DEFAULT 0" ) db.execute_sql( "ALTER TABLE shape_owner ADD COLUMN movement_access INTEGER") db.execute_sql( "UPDATE shape_owner SET movement_access = CASE WHEN edit_access = 0 THEN 0 ELSE 1 END" ) migrate(migrator.add_not_null("shape_owner", "movement_access"), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 30: # Add spawn locations db.foreign_keys = False with db.atomic(): db.execute_sql( 'ALTER TABLE location_options ADD COLUMN spawn_locations TEXT NOT NULL DEFAULT "[]"' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 31: # Add shape movement lock db.foreign_keys = False with db.atomic(): db.execute_sql( "ALTER TABLE shape ADD COLUMN is_locked INTEGER NOT NULL DEFAULT 0" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 32: # Add Shape.angle and Shape.stroke_width db.foreign_keys = False with db.atomic(): db.execute_sql( "ALTER TABLE shape ADD COLUMN angle INTEGER NOT NULL DEFAULT 0" ) db.execute_sql( "ALTER TABLE shape ADD COLUMN stroke_width INTEGER NOT NULL DEFAULT 2" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 33: # Add Floor.player_visible db.foreign_keys = False with db.atomic(): db.execute_sql( "ALTER TABLE floor ADD COLUMN player_visible INTEGER NOT NULL DEFAULT 1" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 34: # Fix Floor.index db.foreign_keys = False with db.atomic(): data = db.execute_sql("SELECT id FROM location") for location_id in data.fetchall(): db.execute_sql( f"UPDATE floor SET 'index' = (SELECT COUNT(*)-1 FROM floor f WHERE f.location_id = {location_id[0]} AND f.id <= floor.id ) WHERE location_id = {location_id[0]}" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 35: # Move grid size to client options db.foreign_keys = False with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _location_options AS SELECT * FROM location_options" ) db.execute_sql("DROP TABLE location_options") db.execute_sql( 'CREATE TABLE "location_options" ("id" INTEGER NOT NULL PRIMARY KEY, "unit_size" REAL, "unit_size_unit" TEXT, "use_grid" INTEGER, "full_fow" INTEGER, "fow_opacity" REAL, "fow_los" INTEGER, "vision_mode" TEXT, "vision_min_range" REAL, "vision_max_range" REAL, "spawn_locations" TEXT NOT NULL DEFAULT "[]")' ) db.execute_sql( 'CREATE INDEX IF NOT EXISTS "location_options_id" ON "location" ("options_id")' ) db.execute_sql( "INSERT INTO location_options (id, unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, spawn_locations) SELECT id, unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, spawn_locations FROM _location_options" ) db.execute_sql( "ALTER TABLE user ADD COLUMN grid_size INTEGER NOT NULL DEFAULT 50" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 36: # Change polygon vertices format from { x: number, y: number } to number[] db.foreign_keys = False with db.atomic(): data = db.execute_sql("SELECT shape_id, vertices FROM polygon") for row in data.fetchall(): try: vertices = json.loads(row[1]) if len(vertices) == 0 or isinstance(vertices[0], list): continue vertices = json.dumps([[v["x"], v["y"]] for v in vertices]) db.execute_sql( f"UPDATE 'polygon' SET 'vertices' = '{vertices}' WHERE 'shape_id' = '{row[0]}'" ) except json.decoder.JSONDecodeError: print(f"Failed to update polygon vertices! {row}") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 37: # Change shape.angle from integer field to float field db.foreign_keys = False with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _shape AS SELECT * FROM shape") db.execute_sql("DROP TABLE shape") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "shape" ("uuid" TEXT NOT NULL PRIMARY KEY, "layer_id" INTEGER NOT NULL, "type_" TEXT NOT NULL, "x" REAL NOT NULL, "y" REAL NOT NULL, "name" TEXT, "name_visible" INTEGER NOT NULL, "fill_colour" TEXT NOT NULL, "stroke_colour" TEXT NOT NULL, "vision_obstruction" INTEGER NOT NULL, "movement_obstruction" INTEGER NOT NULL, "is_token" INTEGER NOT NULL, "annotation" TEXT NOT NULL, "draw_operator" TEXT NOT NULL, "index" INTEGER NOT NULL, "options" TEXT, "badge" INTEGER NOT NULL, "show_badge" INTEGER NOT NULL, "default_edit_access" INTEGER NOT NULL, "default_vision_access" INTEGER NOT NULL, is_invisible INTEGER NOT NULL DEFAULT 0, default_movement_access INTEGER NOT NULL DEFAULT 0, is_locked INTEGER NOT NULL DEFAULT 0, angle REAL NOT NULL DEFAULT 0, stroke_width INTEGER NOT NULL DEFAULT 2, FOREIGN KEY ("layer_id") REFERENCES "layer" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE INDEX "shape_layer_id" ON "shape" ("layer_id")') db.execute_sql( "INSERT INTO shape (uuid, layer_id, type_, x, y, name, name_visible, fill_colour, stroke_colour, vision_obstruction, movement_obstruction, is_token, annotation, draw_operator, 'index', options, badge, show_badge, default_edit_access, default_vision_access, is_invisible, default_movement_access, is_locked, angle, stroke_width) SELECT uuid, layer_id, type_, x, y, name, name_visible, fill_colour, stroke_colour, vision_obstruction, movement_obstruction, is_token, annotation, draw_operator, 'index', options, badge, show_badge, default_edit_access, default_vision_access, is_invisible, default_movement_access, is_locked, angle, stroke_width FROM _shape" ) db.execute_sql( "CREATE TEMPORARY TABLE _text AS SELECT * FROM text") db.execute_sql("DROP TABLE text") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "text" ("shape_id" TEXT NOT NULL PRIMARY KEY, "text" TEXT NOT NULL, "font" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE);' ) db.execute_sql( "INSERT INTO text (shape_id, text, font) SELECT shape_id, text, font FROM _text" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 38: # Change polygon vertices format from { x: number, y: number } to number[] db.foreign_keys = False with db.atomic(): data = db.execute_sql("SELECT shape_id, vertices FROM polygon") for row in data.fetchall(): try: vertices = json.loads(row[1]) if len(vertices) == 0 or isinstance(vertices[0], list): continue vertices = json.dumps([[v["x"], v["y"]] for v in vertices]) db.execute_sql( f"UPDATE polygon SET vertices = '{vertices}' WHERE shape_id = '{row[0]}'" ) except json.decoder.JSONDecodeError: print(f"Failed to update polygon vertices! {row}") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 39: # Fix Shape.index being set to 'index' from models import Layer db.foreign_keys = False with db.atomic(): with db.atomic(): for layer in Layer.select(): shapes = layer.shapes.select() for i, shape in enumerate(shapes): shape.index = i shape.save() db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() else: raise UnknownVersionException( f"No upgrade code for save format {version} was found.")
def run_migrations(current_schema, db_connection): """ Apply new schema versions to database. See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations """ migrator = SqliteMigrator(db_connection) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. tables = DB.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = DB.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with DB.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13) if current_schema.version < 14: _apply_schema_update( current_schema, 14, migrator.add_column(SettingsModel._meta.table_name, 'str_value', pw.CharField(default=''))) if current_schema.version < 15: _apply_schema_update( current_schema, 15, migrator.add_column(BackupProfileModel._meta.table_name, 'dont_run_on_metered_networks', pw.BooleanField(default=True))) if current_schema.version < 16: _apply_schema_update( current_schema, 16, migrator.add_column(SourceFileModel._meta.table_name, 'dir_size', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'dir_files_count', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir', pw.BooleanField(default=False))) if current_schema.version < 17: _apply_schema_update( current_schema, 17, migrator.add_column(RepoModel._meta.table_name, 'create_backup_cmd', pw.CharField(default=''))) if current_schema.version < 18: _apply_schema_update( current_schema, 18, migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_interval_unit', pw.CharField(default='hours')), migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_interval_count', pw.IntegerField(default=3)), migrator.add_column(BackupProfileModel._meta.table_name, 'schedule_make_up_missed', pw.BooleanField(default=False)), migrator.add_column(EventLogModel._meta.table_name, 'end_time', pw.DateTimeField(default=datetime.now)))
def init_db(con): os.umask(0o0077) db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) if created and setting['key'] == "use_dark_theme": # Check if macOS with enabled dark mode s.value = bool(uses_dark_mode()) if created and setting['key'] == "use_light_icon": # Check if macOS with enabled dark mode or Linux with GNOME DE s.value = bool(uses_dark_mode()) or 'GNOME' in os.environ.get( 'XDG_CURRENT_DESKTOP', '') if created and setting['key'] == "foreground": s.value = not bool(is_system_tray_available()) if created and setting['key'] == "enable_notifications_success": s.value = not bool(is_system_tray_available()) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: """ Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. """ tables = db.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = db.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with db.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13)
def upgrade(version): if version == 3: from models import GridLayer db.execute_sql( "CREATE TEMPORARY TABLE _grid_layer AS SELECT * FROM grid_layer") db.drop_tables([GridLayer]) db.create_tables([GridLayer]) db.execute_sql("INSERT INTO grid_layer SELECT * FROM _grid_layer") Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 4: from models import Location db.foreign_keys = False db.execute_sql( "CREATE TEMPORARY TABLE _location AS SELECT * FROM location") db.execute_sql("DROP TABLE location") db.create_tables([Location]) db.execute_sql("INSERT INTO location SELECT * FROM _location") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 5: from models import Layer from peewee import ForeignKeyField migrator = SqliteMigrator(db) field = ForeignKeyField(Layer, Layer.id, backref="active_users", null=True) with db.atomic(): migrate( migrator.add_column("location_user_option", "active_layer_id", field)) from models import LocationUserOption LocationUserOption._meta.add_field("active_layer", field) for luo in LocationUserOption.select(): luo.active_layer = luo.location.layers.select().where( Layer.name == "tokens")[0] luo.save() migrate( migrator.add_not_null("location_user_option", "active_layer_id")) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 6: migrator = SqliteMigrator(db) migrate( migrator.drop_not_null("location_user_option", "active_layer_id")) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 7: # Remove shape index unique constraint from models import Shape db.foreign_keys = False db.execute_sql("CREATE TEMPORARY TABLE _shape AS SELECT * FROM shape") db.execute_sql("DROP TABLE shape") db.create_tables([Shape]) db.execute_sql("INSERT INTO shape SELECT * FROM _shape") db.foreign_keys = True # Check all indices and reset to 0 index logger.info("Validating all shape indices") from models import Layer with db.atomic(): for layer in Layer.select(): shapes = layer.shapes.order_by(fn.ABS(Shape.index)) for i, shape in enumerate(shapes): shape.index = i shape.save() Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 8: from models import Polygon db.create_tables([Polygon]) Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 9: from models import Location db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): migrate( migrator.add_column("location", "vision_mode", Location.vision_mode), migrator.add_column("location", "vision_min_range", Location.vision_min_range), migrator.add_column("location", "vision_max_range", Location.vision_max_range), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 10: from models import Shape db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): migrate( migrator.add_column("shape", "name_visible", Shape.name_visible)) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 11: from models import Label, LocationUserOption, ShapeLabel db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): db.create_tables([Label, ShapeLabel]) migrate( migrator.add_column( "location_user_option", "active_filters", LocationUserOption.active_filters, )) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 12: from models import Label, LabelSelection db.foreign_keys = False migrator = SqliteMigrator(db) with db.atomic(): try: migrate( migrator.add_column("label", "category", Label.category)) except OperationalError as e: if e.args[0] != "duplicate column name: category": raise e db.create_tables([LabelSelection]) with db.atomic(): for label in Label: if ":" not in label.name: continue cat, *name = label.name.split(":") label.category = cat label.name = ":".join(name) label.save() db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 13: from models import LocationUserOption, MultiLine, Polygon db.foreign_keys = False migrator = SqliteMigrator(db) migrate(migrator.drop_column("location_user_option", "active_filters")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 14: db.foreign_keys = False migrator = SqliteMigrator(db) from models import GridLayer, Layer db.execute_sql( 'CREATE TABLE IF NOT EXISTS "base_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "shape_type" ("shape_id" TEXT NOT NULL PRIMARY KEY, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) shape_types = [ "asset_rect", "circle", "circular_token", "line", "multi_line", "polygon", "rect", "text", ] with db.atomic(): for table in shape_types: db.execute_sql( f"CREATE TEMPORARY TABLE _{table} AS SELECT * FROM {table}" ) db.execute_sql(f"DROP TABLE {table}") for query in [ 'CREATE TABLE IF NOT EXISTS "asset_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, "src" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circle" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circular_token" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, "text" TEXT NOT NULL, "font" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "x2" REAL NOT NULL, "y2" REAL NOT NULL, "line_width" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "multi_line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "line_width" INTEGER NOT NULL, "points" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "polygon" ("shape_id" TEXT NOT NULL PRIMARY KEY, "vertices" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "text" ("shape_id" TEXT NOT NULL PRIMARY KEY, "text" TEXT NOT NULL, "font" TEXT NOT NULL, "angle" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', ]: db.execute_sql(query) for table in shape_types: db.execute_sql( f"INSERT INTO {table} SELECT _{table}.* FROM _{table} INNER JOIN shape ON shape.uuid = _{table}.uuid" ) field = ForeignKeyField(Layer, Layer.id, null=True) with db.atomic(): migrate(migrator.add_column("grid_layer", "layer_id", field)) for gl in GridLayer.select(): l = Layer.get_or_none(id=gl.id) if l: gl.layer = l gl.save() else: gl.delete_instance() migrate(migrator.add_not_null("grid_layer", "layer_id")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 15: from peewee import BooleanField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("room", "is_locked", BooleanField(default=False))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 16: from peewee import TextField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("location", "unit_size_unit", TextField(default="ft"))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 17: from peewee import BooleanField, IntegerField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("polygon", "open_polygon", BooleanField(default=False)), migrator.add_column("polygon", "line_width", IntegerField(default=2)), ) db.execute_sql( "INSERT INTO polygon (shape_id, line_width, vertices, open_polygon) SELECT shape_id, line_width, points, 1 FROM multi_line" ) db.execute_sql("DROP TABLE multi_line") db.execute_sql( "UPDATE shape SET type_ = 'polygon' WHERE type_ = 'multiline'") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 18: from peewee import TextField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate(migrator.add_column("user", "email", TextField(null=True))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 19: from peewee import ForeignKeyField db.foreign_keys = False migrator = SqliteMigrator(db) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "floor" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "name" TEXT, "index" INTEGER NOT NULL, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO floor (location_id, name, "index") SELECT id, "ground", 0 FROM location' ) with db.atomic(): db.execute_sql( "CREATE TEMPORARY TABLE _layer AS SELECT * FROM layer") db.execute_sql("DROP TABLE layer") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "layer" ("id" INTEGER NOT NULL PRIMARY KEY, "floor_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "type_" TEXT NOT NULL, "player_visible" INTEGER NOT NULL, "player_editable" INTEGER NOT NULL, "selectable" INTEGER NOT NULL, "index" INTEGER NOT NULL, FOREIGN KEY ("floor_id") REFERENCES "floor" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO layer (id, floor_id, name, type_, player_visible, player_editable, selectable, "index") SELECT _layer.id, floor.id, _layer.name, type_, player_visible, player_editable, selectable, _layer."index" FROM _layer INNER JOIN floor ON floor.location_id = _layer.location_id' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 20: from peewee import BooleanField, BooleanField, IntegerField migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("shape", "badge", IntegerField(default=1)), migrator.add_column("shape", "show_badge", BooleanField(default=False)), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() else: raise Exception( f"No upgrade code for save format {version} was found.")
def upgrade(version): if version < 13: raise Exception( f"Upgrade code for this version is >1 year old and is no longer in the active codebase to reduce clutter. You can still find this code on github, contact me for more info." ) elif version == 13: from models import LocationUserOption, MultiLine, Polygon db.foreign_keys = False migrator = SqliteMigrator(db) migrate(migrator.drop_column("location_user_option", "active_filters")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 14: db.foreign_keys = False migrator = SqliteMigrator(db) from models import GridLayer, Layer db.execute_sql( 'CREATE TABLE IF NOT EXISTS "base_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "shape_type" ("shape_id" TEXT NOT NULL PRIMARY KEY, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)' ) shape_types = [ "asset_rect", "circle", "circular_token", "line", "multi_line", "polygon", "rect", "text", ] with db.atomic(): for table in shape_types: db.execute_sql( f"CREATE TEMPORARY TABLE _{table} AS SELECT * FROM {table}" ) db.execute_sql(f"DROP TABLE {table}") for query in [ 'CREATE TABLE IF NOT EXISTS "asset_rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, "src" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circle" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "circular_token" ("shape_id" TEXT NOT NULL PRIMARY KEY, "radius" REAL NOT NULL, "text" TEXT NOT NULL, "font" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "x2" REAL NOT NULL, "y2" REAL NOT NULL, "line_width" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "multi_line" ("shape_id" TEXT NOT NULL PRIMARY KEY, "line_width" INTEGER NOT NULL, "points" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "polygon" ("shape_id" TEXT NOT NULL PRIMARY KEY, "vertices" TEXT NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "rect" ("shape_id" TEXT NOT NULL PRIMARY KEY, "width" REAL NOT NULL, "height" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', 'CREATE TABLE IF NOT EXISTS "text" ("shape_id" TEXT NOT NULL PRIMARY KEY, "text" TEXT NOT NULL, "font" TEXT NOT NULL, "angle" REAL NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape" ("uuid") ON DELETE CASCADE)', ]: db.execute_sql(query) for table in shape_types: db.execute_sql( f"INSERT INTO {table} SELECT _{table}.* FROM _{table} INNER JOIN shape ON shape.uuid = _{table}.uuid" ) field = ForeignKeyField(Layer, Layer.id, null=True) with db.atomic(): migrate(migrator.add_column("grid_layer", "layer_id", field)) for gl in GridLayer.select(): l = Layer.get_or_none(id=gl.id) if l: gl.layer = l gl.save() else: gl.delete_instance() migrate(migrator.add_not_null("grid_layer", "layer_id")) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 15: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("room", "is_locked", BooleanField(default=False)) ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 16: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column( "location", "unit_size_unit", TextField(default="ft") ) ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 17: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column( "polygon", "open_polygon", BooleanField(default=False) ), migrator.add_column("polygon", "line_width", IntegerField(default=2)), ) db.execute_sql( "INSERT INTO polygon (shape_id, line_width, vertices, open_polygon) SELECT shape_id, line_width, points, 1 FROM multi_line" ) db.execute_sql("DROP TABLE multi_line") db.execute_sql( "UPDATE shape SET type_ = 'polygon' WHERE type_ = 'multiline'" ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 18: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate(migrator.add_column("user", "email", TextField(null=True))) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 19: db.foreign_keys = False migrator = SqliteMigrator(db) db.execute_sql( 'CREATE TABLE IF NOT EXISTS "floor" ("id" INTEGER NOT NULL PRIMARY KEY, "location_id" INTEGER NOT NULL, "name" TEXT, "index" INTEGER NOT NULL, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO floor (location_id, name, "index") SELECT id, "ground", 0 FROM location' ) with db.atomic(): db.execute_sql("CREATE TEMPORARY TABLE _layer AS SELECT * FROM layer") db.execute_sql("DROP TABLE layer") db.execute_sql( 'CREATE TABLE IF NOT EXISTS "layer" ("id" INTEGER NOT NULL PRIMARY KEY, "floor_id" INTEGER NOT NULL, "name" TEXT NOT NULL, "type_" TEXT NOT NULL, "player_visible" INTEGER NOT NULL, "player_editable" INTEGER NOT NULL, "selectable" INTEGER NOT NULL, "index" INTEGER NOT NULL, FOREIGN KEY ("floor_id") REFERENCES "floor" ("id") ON DELETE CASCADE)' ) db.execute_sql( 'INSERT INTO layer (id, floor_id, name, type_, player_visible, player_editable, selectable, "index") SELECT _layer.id, floor.id, _layer.name, type_, player_visible, player_editable, selectable, _layer."index" FROM _layer INNER JOIN floor ON floor.location_id = _layer.location_id' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 20: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("shape", "badge", IntegerField(default=1)), migrator.add_column("shape", "show_badge", BooleanField(default=False)), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 21: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column("user", "invert_alt", BooleanField(default=False)) ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 22: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): db.execute_sql( 'CREATE TABLE IF NOT EXISTS "marker" ("id" INTEGER NOT NULL PRIMARY KEY, "shape_id" TEXT NOT NULL, "user_id" INTEGER NOT NULL, "location_id" INTEGER NOT NULL, FOREIGN KEY ("shape_id") REFERENCES "shape"("uuid") ON DELETE CASCADE, FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE CASCADE, FOREIGN KEY ("user_id") REFERENCES "user"("id") ON DELETE CASCADE)' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 23: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column( "shape_owner", "edit_access", BooleanField(default=True) ), migrator.add_column( "shape_owner", "vision_access", BooleanField(default=True) ), migrator.add_column( "shape", "default_edit_access", BooleanField(default=False) ), migrator.add_column( "shape", "default_vision_access", BooleanField(default=False) ), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 24: migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): db.execute_sql( 'DELETE FROM "player_room" WHERE id IN (SELECT pr.id FROM "player_room" pr INNER JOIN "room" r ON r.id = pr.room_id WHERE r.creator_id = pr.player_id )' ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 25: # Move Room.dm_location and Room.player_location to PlayerRoom.active_location # Add PlayerRoom.role # Add order index on location from models import Location migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): migrate( migrator.add_column( "player_room", "active_location_id", ForeignKeyField( Location, Location.id, backref="players", on_delete="CASCADE", null=True, ), ), migrator.add_column("player_room", "role", IntegerField(default=0)), migrator.add_column("location", "index", IntegerField(default=0)), ) db.execute_sql( "UPDATE player_room SET active_location_id = (SELECT location.id FROM room INNER JOIN location ON room.id = location.room_id WHERE location.name = room.player_location AND room.id = player_room.room_id)" ) db.execute_sql( "INSERT INTO player_room (role, player_id, room_id, active_location_id) SELECT 1, u.id, r.id, l.id FROM room r INNER JOIN user u ON u.id = r.creator_id INNER JOIN location l ON l.name = r.dm_location AND l.room_id = r.id" ) db.execute_sql( "UPDATE location SET 'index' = (SELECT COUNT(*) + 1 FROM location l INNER JOIN room r WHERE location.room_id = r.id AND l.room_id = r.id AND l.'index' != 0) " ) migrate( migrator.drop_column("room", "player_location"), migrator.drop_column("room", "dm_location"), migrator.add_not_null("player_room", "active_location_id"), ) db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() elif version == 26: # Move Location settings to a separate LocationSettings table # Add a default_settings field to Room that refers to such a LocationSettings row from models import LocationOptions migrator = SqliteMigrator(db) db.foreign_keys = False with db.atomic(): db.execute_sql( 'CREATE TABLE IF NOT EXISTS "location_options" ("id" INTEGER NOT NULL PRIMARY KEY, "unit_size" REAL DEFAULT 5, "unit_size_unit" TEXT DEFAULT "ft", "use_grid" INTEGER DEFAULT 1, "full_fow" INTEGER DEFAULT 0, "fow_opacity" REAL DEFAULT 0.3, "fow_los" INTEGER DEFAULT 0, "vision_mode" TEXT DEFAULT "triangle", "vision_min_range" REAL DEFAULT 1640, "vision_max_range" REAL DEFAULT 3281, "grid_size" INTEGER DEFAULT 50)' ) migrate( migrator.add_column( "location", "options_id", ForeignKeyField( LocationOptions, LocationOptions.id, on_delete="CASCADE", null=True, ), ), migrator.add_column( "room", "default_options_id", ForeignKeyField( LocationOptions, LocationOptions.id, on_delete="CASCADE", null=True, ), ), ) data = db.execute_sql( """SELECT l.id, r.id, l.unit_size, l.unit_size_unit, l.use_grid, l.full_fow, l.fow_opacity, l.fow_los, l.vision_mode, l.vision_min_range, l.vision_max_range, g.size AS grid_size FROM location l INNER JOIN room r INNER JOIN floor f ON f.id = (SELECT id FROM floor f2 WHERE f2.location_id = l.id LIMIT 1) INNER JOIN layer la INNER JOIN grid_layer g WHERE r.id = l.room_id AND la.floor_id = f.id AND la.name = 'grid' AND g.layer_id = la.id""" ) room_options = {} descr = data.description mapping = { "unit_size": 0, "unit_size_unit": 1, "use_grid": 2, "full_fow": 3, "fow_opacity": 4, "fow_los": 5, "vision_mode": 6, "vision_min_range": 7, "vision_max_range": 8, "grid_size": 9, } default_row = [5, "ft", True, False, 0.3, False, "triangle", 1640, 3281, 50] for row in data.fetchall(): new_row = [None, None, None, None, None, None, None, None, None, None] if row[1] not in room_options: room_options[row[1]] = db.execute_sql( "INSERT INTO location_options (unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, grid_size) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", default_row, ).lastrowid db.execute_sql( f"UPDATE room SET default_options_id = {room_options[row[1]]} WHERE id = {row[1]}" ) for col, val in zip(descr, row): if col[0] in ["id", "room_id"]: continue idx = mapping[col[0]] if val != default_row[idx]: new_row[idx] = val loc_id = db.execute_sql( "INSERT INTO location_options (unit_size, unit_size_unit, use_grid, full_fow, fow_opacity, fow_los, vision_mode, vision_min_range, vision_max_range, grid_size) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", new_row, ).lastrowid db.execute_sql( f"UPDATE location SET options_id = {loc_id} WHERE id = {row[0]}" ) migrate( migrator.add_not_null("room", "default_options_id"), migrator.drop_column("location", "unit_size"), migrator.drop_column("location", "unit_size_unit"), migrator.drop_column("location", "use_grid"), migrator.drop_column("location", "full_fow"), migrator.drop_column("location", "fow_opacity"), migrator.drop_column("location", "fow_los"), migrator.drop_column("location", "vision_mode"), migrator.drop_column("location", "vision_min_range"), migrator.drop_column("location", "vision_max_range"), migrator.drop_index("location", "location_room_id_name"), ) db.execute_sql("DROP TABLE 'grid_layer'") db.foreign_keys = True Constants.get().update(save_version=Constants.save_version + 1).execute() else: raise Exception(f"No upgrade code for save format {version} was found.")
def init_db(con=None): if con is not None: os.umask(0o0077) db.initialize(con) db.connect() db.create_tables([ RepoModel, RepoPassword, BackupProfileModel, SourceFileModel, SettingsModel, ArchiveModel, WifiSettingModel, EventLogModel, SchemaVersion ]) if BackupProfileModel.select().count() == 0: default_profile = BackupProfileModel(name='Default') default_profile.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - timedelta(days=180) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago) # Migrations # See http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations current_schema, created = SchemaVersion.get_or_create( id=1, defaults={'version': SCHEMA_VERSION}) current_schema.save() if created or current_schema.version == SCHEMA_VERSION: pass else: migrator = SqliteMigrator(con) if current_schema.version < 4: # version 3 to 4 _apply_schema_update( current_schema, 4, migrator.add_column(ArchiveModel._meta.table_name, 'duration', pw.FloatField(null=True)), migrator.add_column(ArchiveModel._meta.table_name, 'size', pw.IntegerField(null=True))) if current_schema.version < 5: _apply_schema_update( current_schema, 5, migrator.drop_not_null(WifiSettingModel._meta.table_name, 'last_connected'), ) if current_schema.version < 6: _apply_schema_update( current_schema, 6, migrator.add_column(EventLogModel._meta.table_name, 'repo_url', pw.CharField(null=True))) if current_schema.version < 7: _apply_schema_update( current_schema, 7, migrator.rename_column(SourceFileModel._meta.table_name, 'config_id', 'profile_id'), migrator.drop_column(EventLogModel._meta.table_name, 'profile_id'), migrator.add_column(EventLogModel._meta.table_name, 'profile', pw.CharField(null=True))) if current_schema.version < 8: _apply_schema_update( current_schema, 8, migrator.add_column(BackupProfileModel._meta.table_name, 'prune_keep_within', pw.CharField(null=True))) if current_schema.version < 9: _apply_schema_update( current_schema, 9, migrator.add_column( BackupProfileModel._meta.table_name, 'new_archive_name', pw.CharField( default="{hostname}-{profile_slug}-{now:%Y-%m-%dT%H:%M:%S}" )), migrator.add_column( BackupProfileModel._meta.table_name, 'prune_prefix', pw.CharField(default="{hostname}-{profile_slug}-")), ) if current_schema.version < 10: _apply_schema_update( current_schema, 10, migrator.add_column(BackupProfileModel._meta.table_name, 'pre_backup_cmd', pw.CharField(default='')), migrator.add_column(BackupProfileModel._meta.table_name, 'post_backup_cmd', pw.CharField(default='')), ) if current_schema.version < 11: _apply_schema_update(current_schema, 11) for profile in BackupProfileModel: if profile.compression == 'zstd': profile.compression = 'zstd,3' if profile.compression == 'lzma,6': profile.compression = 'auto,lzma,6' profile.save() if current_schema.version < 12: _apply_schema_update( current_schema, 12, migrator.add_column(RepoModel._meta.table_name, 'extra_borg_arguments', pw.CharField(default=''))) if current_schema.version < 13: # Migrate ArchiveModel data to new table to remove unique constraint from snapshot_id column. tables = db.get_tables() if ArchiveModel.select().count() == 0 and 'snapshotmodel' in tables: cursor = db.execute_sql('select * from snapshotmodel;') fields = [ ArchiveModel.id, ArchiveModel.snapshot_id, ArchiveModel.name, ArchiveModel.repo, ArchiveModel.time, ArchiveModel.duration, ArchiveModel.size ] data = [row for row in cursor.fetchall()] with db.atomic(): size = 1000 for i in range(0, len(data), size): ArchiveModel.insert_many(data[i:i + size], fields=fields).execute() _apply_schema_update(current_schema, 13) if current_schema.version < 14: _apply_schema_update( current_schema, 14, migrator.add_column(SettingsModel._meta.table_name, 'str_value', pw.CharField(default=''))) if current_schema.version < 15: _apply_schema_update( current_schema, 15, migrator.add_column(BackupProfileModel._meta.table_name, 'dont_run_on_metered_networks', pw.BooleanField(default=True))) if current_schema.version < 16: _apply_schema_update( current_schema, 16, migrator.add_column(SourceFileModel._meta.table_name, 'dir_size', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'dir_files_count', pw.BigIntegerField(default=-1)), migrator.add_column(SourceFileModel._meta.table_name, 'path_isdir', pw.BooleanField(default=False))) # Create missing settings and update labels. Leave setting values untouched. for setting in get_misc_settings(): s, created = SettingsModel.get_or_create(key=setting['key'], defaults=setting) s.label = setting['label'] s.save() # Delete old log entries after 3 months. three_months_ago = datetime.now() - rd(months=3) EventLogModel.delete().where(EventLogModel.start_time < three_months_ago)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' # SOURCE: http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#schema-migrations from playhouse.migrate import SqliteDatabase, SqliteMigrator, migrate from config import DB_FILE_NAME db = SqliteDatabase(DB_FILE_NAME) migrator = SqliteMigrator(db) with db.atomic(): migrate( migrator.drop_column('settings', 'limit_unique_quotes'), )