def migrate_Context(self): old_objs = self.store_old.find(self.model_from['Context']) for old_obj in old_objs: new_obj = self.model_to['Context']() for _, v in new_obj._storm_columns.items(): if v.name == 'tip_timetolive': # NOTE hardcoded policy. . . . tip_ttl = 5 * 365 if old_obj.tip_timetolive > tip_ttl: GLSettings.print_msg( '[WARNING] Found an expiration date longer than 5 years! Configuring tips to never expire.' ) # If data retention was larger than 5 years the intended goal was # probably to keep the submission around forever. new_obj.tip_timetolive = -1 elif old_obj.tip_timetolive < -1: GLSettings.print_msg( '[WARNING] Found a negative tip expiration! Configuring tips to never expire.' ) new_obj.tip_timetolive = -1 else: new_obj.tip_timetolive = old_obj.tip_timetolive continue elif v.name == 'enable_rc_to_wb_files': new_obj.enable_rc_to_wb_files = False continue setattr(new_obj, v.name, getattr(old_obj, v.name)) self.store_new.add(new_obj)
def migrate_Context(self): old_objs = self.store_old.find(self.model_from['Context']) for old_obj in old_objs: new_obj = self.model_to['Context']() for _, v in new_obj._storm_columns.iteritems(): if v.name == 'tip_timetolive': # NOTE hardcoded policy. . . . tip_ttl = 5*365 if old_obj.tip_timetolive > tip_ttl: GLSettings.print_msg('[WARNING] Found an expiration date longer than 5 years! Configuring tips to never expire.') # If data retention was larger than 5 years the intended goal was # probably to keep the submission around forever. new_obj.tip_timetolive = -1 elif old_obj.tip_timetolive < -1: GLSettings.print_msg('[WARNING] Found a negative tip expiration! Configuring tips to never expire.') new_obj.tip_timetolive = -1 else: new_obj.tip_timetolive = old_obj.tip_timetolive continue elif v.name == 'enable_rc_to_wb_files': new_obj.enable_rc_to_wb_files = False continue setattr(new_obj, v.name, getattr(old_obj, v.name)) self.store_new.add(new_obj)
def execute_query(self, query): try: self.store_new.execute(query + ';') except OperationalError as excep: GLSettings.print_msg( 'OperationalError %s while executing query: %s' % (excep, query)) raise excep
def __init__(self, migration_mapping, start_version, store_old, store_new): self.appdata = load_appdata() self.migration_mapping = migration_mapping self.start_version = start_version self.store_old = store_old self.store_new = store_new self.model_from = {} self.model_to = {} self.entries_count = {} self.fail_on_count_mismatch = {} for model_name, model_history in migration_mapping.iteritems(): length = DATABASE_VERSION + 1 - FIRST_DATABASE_VERSION_SUPPORTED if len(model_history) != length: raise TypeError( 'Expecting a table with {} statuses ({})'.format( length, model_name)) self.fail_on_count_mismatch[model_name] = True self.model_from[model_name] = self.get_right_model( model_name, start_version) self.model_to[model_name] = self.get_right_model( model_name, start_version + 1) if self.model_from[model_name] is not None and self.model_to[ model_name] is not None: self.entries_count[model_name] = self.store_old.find( self.model_from[model_name]).count() else: self.entries_count[model_name] = 0 if self.start_version + 1 == DATABASE_VERSION: # we are there! if not os.access(GLSettings.db_schema, os.R_OK): GLSettings.print_msg( "Unable to access %s ' % GLSettings.db_schema") raise IOError('Unable to access db schema file') with open(GLSettings.db_schema) as f: queries = ''.join(f).split(';') for query in queries: self.execute_query(query) else: # manage the migrantion here for k, _ in self.migration_mapping.iteritems(): query = self.get_right_sql_version(k, self.start_version + 1) if not query: # the table has been removed continue self.execute_query(query) self.store_new.commit()
def migrate_model(self, model_name): objs_count = self.store_old.find(self.model_from[model_name]).count() specific_migration_function = getattr(self, 'migrate_%s' % model_name, None) if specific_migration_function is not None: GLSettings.print_msg(' ł %s [#%d]' % (model_name, objs_count)) specific_migration_function() else: GLSettings.print_msg(' * %s [#%d]' % (model_name, objs_count)) self.generic_migration_function(model_name)
def __init__(self, migration_mapping, start_version, store_old, store_new): self.appdata = load_appdata() self.migration_mapping = migration_mapping self.start_version = start_version self.store_old = store_old self.store_new = store_new self.model_from = {} self.model_to = {} self.entries_count = {} self.fail_on_count_mismatch = {} for model_name, model_history in migration_mapping.iteritems(): length = DATABASE_VERSION + 1 - FIRST_DATABASE_VERSION_SUPPORTED if len(model_history) != length: raise TypeError('Expecting a table with {} statuses ({})'.format(length, model_name)) self.fail_on_count_mismatch[model_name] = True self.model_from[model_name] = self.get_right_model(model_name, start_version) self.model_to[model_name] = self.get_right_model(model_name, start_version + 1) if self.model_from[model_name] is not None and self.model_to[model_name] is not None: self.entries_count[model_name] = self.store_old.find(self.model_from[model_name]).count() else: self.entries_count[model_name] = 0 if self.start_version + 1 == DATABASE_VERSION: # we are there! if not os.access(GLSettings.db_schema, os.R_OK): GLSettings.print_msg("Unable to access %s ' % GLSettings.db_schema") raise IOError('Unable to access db schema file') with open(GLSettings.db_schema) as f: queries = ''.join(f).split(';') for query in queries: self.execute_query(query) else: # manage the migrantion here for k, _ in self.migration_mapping.iteritems(): query = self.get_right_sql_version(k, self.start_version + 1) if not query: # the table has been removed continue self.execute_query(query) self.store_new.commit()
def perform_schema_migration(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: GLSettings.print_msg( "Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED) quit() tmpdir = os.path.abspath(os.path.join(GLSettings.db_path, 'tmp')) orig_db_file = os.path.abspath( os.path.join(GLSettings.db_path, 'glbackend-%d.db' % version)) final_db_file = os.path.abspath( os.path.join(GLSettings.db_path, 'glbackend-%d.db' % DATABASE_VERSION)) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy2(orig_db_file, tmpdir) new_db_file = None try: while version < DATABASE_VERSION: old_db_file = os.path.abspath( os.path.join(tmpdir, 'glbackend-%d.db' % version)) new_db_file = os.path.abspath( os.path.join(tmpdir, 'glbackend-%d.db' % (version + 1))) GLSettings.db_file = new_db_file GLSettings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) GLSettings.print_msg("Updating DB from version %d to version %d" % (version, version + 1)) store_old = Store(create_database('sqlite:' + old_db_file)) store_new = Store(create_database('sqlite:' + new_db_file)) # Here is instanced the migration script MigrationModule = importlib.import_module( "globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript( migration_mapping, version, store_old, store_new) GLSettings.print_msg("Migrating table:") try: try: migration_script.prologue() except Exception as exception: GLSettings.print_msg( "Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.iteritems(): if migration_script.model_from[ model_name] is not None and migration_script.model_to[ model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: GLSettings.print_msg( "Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: GLSettings.print_msg( "Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should be always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() GLSettings.print_msg("Migration stats:") # we open a new db in order to verify integrity of the generated file store_verify = Store( create_database(GLSettings.make_db_uri(new_db_file))) for model_name, _ in migration_mapping.iteritems(): if model_name == 'ApplicationData': continue if migration_script.model_from[ model_name] is not None and migration_script.model_to[ model_name] is not None: count = store_verify.find( migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \ (model_name, count, migration_script.entries_count[model_name])) else: GLSettings.print_msg(" * %s table migrated (entries count changed from %d to %d)" % \ (model_name, migration_script.entries_count[model_name], count)) else: GLSettings.print_msg(" * %s table migrated (%d entry(s))" % \ (model_name, migration_script.entries_count[model_name])) version += 1 store_verify.close() perform_data_update(new_db_file) except Exception as exception: # simply propagate the exception raise exception else: # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(new_db_file, final_db_file) security.overwrite_and_remove(orig_db_file) finally: # Always cleanup the temporary directory used for the migration for f in os.listdir(tmpdir): tmp_db_file = os.path.join(tmpdir, f) security.overwrite_and_remove(tmp_db_file) shutil.rmtree(tmpdir)
def perform_version_update(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: GLSettings.print_msg("Migrations from DB version lower than %d are no more supported!" % FIRST_DATABASE_VERSION_SUPPORTED) GLSettings.print_msg("If you can't create your Node from scratch, contact us asking for support.") quit() tmpdir = os.path.abspath(os.path.join(GLSettings.db_path, 'tmp')) orig_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % version)) final_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % DATABASE_VERSION)) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy2(orig_db_file, tmpdir) try: while version < DATABASE_VERSION: old_db_file = os.path.abspath(os.path.join(tmpdir, 'glbackend-%d.db' % version)) new_db_file = os.path.abspath(os.path.join(tmpdir, 'glbackend-%d.db' % (version + 1))) GLSettings.db_file = new_db_file GLSettings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) GLSettings.print_msg("Updating DB from version %d to version %d" % (version, version + 1)) store_old = Store(create_database('sqlite:' + old_db_file)) store_new = Store(create_database('sqlite:' + new_db_file)) # Here is instanced the migration script MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript(migration_mapping, version, store_old, store_new) GLSettings.print_msg("Migrating table:") try: try: migration_script.prologue() except Exception as exception: GLSettings.print_msg("Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.iteritems(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: GLSettings.print_msg("Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: GLSettings.print_msg("Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should bee always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() GLSettings.print_msg("Migration stats:") # we open a new db in order to verify integrity of the generated file store_verify = Store(create_database('sqlite:' + new_db_file)) for model_name, _ in migration_mapping.iteritems(): if model_name == 'ApplicationData': continue if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: count = store_verify.find(migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \ (model_name, count, migration_script.entries_count[model_name])) else: GLSettings.print_msg(" * %s table migrated (entries count changed from %d to %d)" % \ (model_name, migration_script.entries_count[model_name], count)) else: GLSettings.print_msg(" * %s table migrated (%d entry(s))" % \ (model_name, migration_script.entries_count[model_name])) version += 1 store_verify.close() except Exception as exception: # simply propagage the exception raise exception else: # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(os.path.abspath(os.path.join(tmpdir, 'glbackend-%d.db' % DATABASE_VERSION)), final_db_file) os.remove(orig_db_file) finally: # always cleanup the temporary directory used for the migration shutil.rmtree(tmpdir, True)
def execute_query(self, query): try: self.store_new.execute(query + ';') except OperationalError as excep: GLSettings.print_msg('OperationalError %s while executing query: %s' % (excep, query)) raise excep