def perform_secure_deletion_of_files(self): # Delete files that are marked for secure deletion files_to_delete = yield self.get_files_to_secure_delete() for file_to_delete in files_to_delete: overwrite_and_remove(file_to_delete) if files_to_delete: yield self.commit_files_deletion(files_to_delete) # Delete the outdated AES files older than 1 day files_to_remove = [ f for f in os.listdir(self.state.settings.attachments_path) if fnmatch.fnmatch(f, '*.aes') ] for f in files_to_remove: path = os.path.join(self.state.settings.attachments_path, f) timestamp = datetime.datetime.fromtimestamp(os.path.getmtime(path)) if is_expired(timestamp, days=1): os.remove(path) # Delete the backups older than 15 days for f in os.listdir(self.state.settings.backups_path): path = os.path.join(self.state.settings.backups_path, f) timestamp = datetime.datetime.fromtimestamp(os.path.getmtime(path)) if is_expired(timestamp, days=15): os.remove(path)
def sync_clean_untracked_files(session): """ removes files in Settings.attachments_path that are not tracked by InternalFile/ReceiverFile. """ tracked_files = db_get_tracked_files(session) for filesystem_file in os.listdir(Settings.attachments_path): if filesystem_file not in tracked_files: file_to_remove = os.path.join(Settings.attachments_path, filesystem_file) try: log.debug('Removing untracked file: %s', file_to_remove) security.overwrite_and_remove(file_to_remove) except OSError: log.err('Failed to remove untracked file', file_to_remove)
def perform_migration(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED) quit() tmpdir = os.path.abspath(os.path.join(Settings.tmp_path, 'tmp')) if version < 41: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version)) else: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db')) new_db_file = None try: while version < DATABASE_VERSION: old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db')) new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db')) if os.path.exists(new_db_file): shutil.move(new_db_file, old_db_file) Settings.db_file = new_db_file Settings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) log.info("Updating DB from version %d to version %d" % (version, version + 1)) j = version - FIRST_DATABASE_VERSION_SUPPORTED session_old = get_session(make_db_uri(old_db_file)) engine = get_engine(make_db_uri(new_db_file), foreign_keys=False) if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION: Base.metadata.create_all(engine) else: Bases[j+1].metadata.create_all(engine) session_new = sessionmaker(bind=engine)() # Here is instanced the migration script MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new) log.info("Migrating table:") try: try: migration_script.prologue() except Exception as exception: log.err("Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: log.err("Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: log.err("Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should be always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() log.info("Migration stats:") # we open a new db in order to verify integrity of the generated file session_verify = get_session(make_db_uri(new_db_file)) for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: count = session_verify.query(migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \ (model_name, count, migration_script.entries_count[model_name])) else: log.info(" * %s table migrated (entries count changed from %d to %d)" % \ (model_name, migration_script.entries_count[model_name], count)) else: log.info(" * %s table migrated (%d entry(s))" % \ (model_name, migration_script.entries_count[model_name])) version += 1 session_verify.close() perform_data_update(new_db_file) # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(new_db_file, final_db_file) if orig_db_file != final_db_file: overwrite_and_remove(orig_db_file) path = os.path.join(Settings.working_path, 'db') if os.path.exists(path): shutil.rmtree(path) finally: # Always cleanup the temporary directory used for the migration for f in os.listdir(tmpdir): overwrite_and_remove(os.path.join(tmpdir, f)) shutil.rmtree(tmpdir)
def perform_migration(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED) quit() tmpdir = os.path.abspath(os.path.join(Settings.tmp_path, 'tmp')) if version < 41: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version)) else: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db')) new_db_file = None try: while version < DATABASE_VERSION: old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db')) new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db')) if os.path.exists(new_db_file): shutil.move(new_db_file, old_db_file) Settings.db_file = new_db_file Settings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) log.info("Updating DB from version %d to version %d" % (version, version + 1)) j = version - FIRST_DATABASE_VERSION_SUPPORTED session_old = get_session(make_db_uri(old_db_file)) engine = get_engine(make_db_uri(new_db_file), foreign_keys=False) if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION: Base.metadata.create_all(engine) else: Bases[j+1].metadata.create_all(engine) session_new = sessionmaker(bind=engine)() # Here is instanced the migration script MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new) log.info("Migrating table:") try: try: migration_script.prologue() except Exception as exception: log.err("Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: log.err("Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: log.err("Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should be always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() log.info("Migration stats:") # we open a new db in order to verify integrity of the generated file session_verify = get_session(make_db_uri(new_db_file)) for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: count = session_verify.query(migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % (model_name, count, migration_script.entries_count[model_name])) else: log.info(" * %s table migrated (entries count changed from %d to %d)" % (model_name, migration_script.entries_count[model_name], count)) else: log.info(" * %s table migrated (%d entry(s))" % (model_name, migration_script.entries_count[model_name])) version += 1 session_verify.close() perform_data_update(new_db_file) # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(new_db_file, final_db_file) if orig_db_file != final_db_file: overwrite_and_remove(orig_db_file) path = os.path.join(Settings.working_path, 'db') if os.path.exists(path): shutil.rmtree(path) except Exception as e: print(e) finally: # Always cleanup the temporary directory used for the migration for f in os.listdir(tmpdir): overwrite_and_remove(os.path.join(tmpdir, f)) shutil.rmtree(tmpdir)