class ManagementLock(object): def __init__(self): self.lock = None def acquire(self): self.lock = InterProcessLock(LOCK_PATH) # Attempt to obtain a lock, retry every 10 seconds. Wait at most 5 minutes. # The retrying is necessary so we can report on stderr that we are waiting # for a lock. Otherwise, a user trying to run the command manually might # get confused why the command execution is delayed. if self.lock.acquire(blocking=False): return print("Another management command is running, waiting for lock...", file=sys.stderr) if self.lock.acquire(delay=10, max_delay=10, timeout=300): return self.lock = None raise RuntimeError("Failed to acquire lock.") def release(self): if self.lock is not None: self.lock.release() def __enter__(self): self.acquire() return self def __exit__(self, _exc_type, _exc_val, _exc_tb): self.release() return False
def _update_db(self): if self._prim_db: lock = InterProcessLock("%s.lock" % self._prim_db) acquired = lock.acquire(blocking=False) if not acquired: logger.debug("Waiting 60 seconds for file lock") acquired = lock.acquire(blocking=True, timeout=60) if acquired: try: with open(self._prim_db, "w", encoding="utf-8") as out: out.write(unicode(json.dumps(self._dbs[self._prim_db]))) return True finally: lock.release() else: logger.error( "Could not update platform database: " "Lock acquire failed after 60 seconds" ) return False else: logger.error( "Can't update platform database: destination database is ambiguous" ) return False
def wrapper(self, *args): ret = None lock = InterProcessLock(self.lock_file) acquired = lock.acquire(blocking=False) if not acquired: self.debug("timed_mbedls_lock", "Waiting %d seconds for mock file lock." % timeout) acquired = lock.acquire(blocking=True, timeout=timeout) if acquired: try: ret = wrapper.original(self, *args) except Exception, e: lock.release() raise e lock.release()
def wrapper(self, *args): ret = None lock = InterProcessLock(self.lock_file) acquired = lock.acquire(blocking=False) if not acquired: self.debug("timed_mbedls_lock", "Waiting %d seconds for mock file lock." % timeout) acquired = lock.acquire(blocking=True, timeout=timeout) if acquired: try: ret = wrapper.original(self, *args) except Exception as e: lock.release() raise e lock.release() else: self.err("timed_mbedls_lock", "Failed to acquired mock file lock in %d seconds!" % timeout) sys.exit(1) return ret
def _get_pid_lock(self, path_pidfile): pid_lock = InterProcessLock(path_pidfile) got_pid_lock = pid_lock.acquire(blocking=False) if not got_pid_lock: raise AlreadyCrawling(path_pidfile) self.logger.debug(f"Aquired PID lock {self.config['path_pidfile']!r}") return pid_lock, got_pid_lock
class AdaptiveLock: timeout = 600 def __init__(self): self.method = "hard_links" self.lock_instance = None def lock(self, lock_file): if self.method == "hard_links": self.lock_instance = FluflLock( lock_file, lifetime=60) # seconds after which the lock is broken try: self.lock_instance.lock( timeout=self.timeout) # try for a long time return except TimeoutError: pass except OSError: pass except PermissionError: pass logger.warning( "Unable to use hard link-based file locks. " "Trying fcntl-based file locks", exc_info=True) self.method = "fcntl" if self.method == "fcntl": self.lock_instance = FcntlLock(lock_file) acquired = self.lock_instance.acquire(timeout=self.timeout) if acquired: return logger.warning( "Unable to use fcntl-based file locks. " "Disabling file locks", exc_info=True) self.method = None self.delay = 10.0 # use a large delay to make write collisions unlikely def unlock(self): if self.method == "hard_links": self.lock_instance.unlock( unconditionally=True) # do not raise errors in unlock self.lock_instance = None elif self.method == "fcntl": self.lock_instance.release()
def acquire_inter_process_lock(lock_name): # Lock preventing simultaneous crawling processes lock_name = _compute_lock_name(lock_name) lock = InterProcessLock('/tmp/%s' % lock_name) acquired_lock = lock.acquire(blocking=False) try: yield acquired_lock finally: if acquired_lock: lock.release()
def _update_db(self): if self._prim_db: lock = InterProcessLock("%s.lock" % self._prim_db) acquired = lock.acquire(blocking=False) if not acquired: logger.debug("Waiting 60 seconds for file lock") acquired = lock.acquire(blocking=True, timeout=60) if acquired: try: with open(self._prim_db, "w", encoding="utf-8") as out: out.write(unicode( json.dumps(self._dbs[self._prim_db]))) return True finally: lock.release() else: logger.error("Could not update platform database: " "Lock acquire failed after 60 seconds") return False else: logger.error("Can't update platform database: " "destination database is ambiguous") return False
class AdaptiveLock: def __init__(self, timeout: int = 180): self.timeout = timeout self.methods: List[str] = ["fcntl", "hard_links", "delay"] self.lock_instance = None def lock(self, lock_file): if self.methods[0] == "hard_links": self.lock_instance = FluflLock( lock_file, lifetime=self.timeout ) # seconds after which the lock is broken try: self.lock_instance.lock( timeout=self.timeout) # try for a long time return except (FluflLockError, TimeOutError): # timeouts etc. pass except OSError: # such as PermissionError pass logger.warning("Unable to use hard link-based file locks", exc_info=True) self.methods.pop(0) self.lock(lock_file) elif self.methods[0] == "fcntl": self.lock_instance = FcntlLock(lock_file) acquired = self.lock_instance.acquire(timeout=self.timeout, delay=1) if acquired: return logger.warning("Unable to use fcntl-based file locks", exc_info=True) self.methods.pop(0) self.lock(lock_file) else: # use a random delay to make write collisions unlikely delay = gauss(20, 5) if delay > 0: sleep(delay) def unlock(self): if self.methods[0] == "hard_links": assert isinstance(self.lock_instance, FluflLock) self.lock_instance.unlock( unconditionally=True) # do not raise errors in unlock self.lock_instance = None elif self.methods[0] == "fcntl": assert isinstance(self.lock_instance, FcntlLock) self.lock_instance.release()
def run(self): """ Run application. """ # configure SQLAlchemy logging # log_level = self.logger.getEffectiveLevel() # logging.getLogger('sqlalchemy.engine').setLevel(log_level) exit_code = ExitCodes.EXIT_SUCCESS self.logger.info(f"{self.PROG}: Version v{__version__}") self.logger.debug(f"Configuration: {dict(self.config)!r}") try: path_pidfile = self.config["path_pidfile"] pid_lock = InterProcessLock(path_pidfile) pid_lock_gotten = pid_lock.acquire(blocking=False) if not pid_lock_gotten: raise AlreadyHarvesting(path_pidfile) self.logger.debug( f"Aquired PID lock {self.config['path_pidfile']!r}" ) if ( self.config["no_routes"] and self.config["no_vnetworks"] and not self.config["truncate"] ): raise NothingToDo() harvesting = not ( self.config["no_routes"] and self.config["no_vnetworks"] ) Session = db.ScopedSession() engine = create_engine( self.config["sqlalchemy_database_uri"], echo=False ) Session.configure(bind=engine) if engine.name == "sqlite": db.configure_sqlite(self.DB_PRAGMAS) # TODO(damb): Implement multithreaded harvesting using a thread # pool. try: if harvesting: self.logger.info("Start harvesting.") if not self.config["no_routes"]: self._harvest_routes(Session) else: self.logger.info( "Disabled processing <route></route> information." ) if not self.config["no_vnetworks"]: self._harvest_vnetworks(Session) else: self.logger.info( "Disabled processing <vnetwork></vnetwork> " "information." ) if harvesting: self.logger.info("Finished harvesting successfully.") if self.config["truncate"]: self.logger.warning("Removing outdated data.") session = Session() with db.session_guard(session) as _session: num_removed_rows = db.clean( _session, self.config["truncate"], ) self.logger.info( f"Number of rows removed: {num_removed_rows}" ) except OperationalError as err: raise db.StationLiteDBEngineError(err) # TODO(damb): signal handling except Error as err: self.logger.error(err) exit_code = ExitCodes.EXIT_ERROR except Exception as err: exc_type, exc_value, exc_traceback = sys.exc_info() self.logger.critical("Local Exception: %s" % err) self.logger.critical( "Traceback information: " + repr( traceback.format_exception( exc_type, exc_value, exc_traceback ) ) ) exit_code = ExitCodes.EXIT_ERROR finally: try: if pid_lock_gotten: pid_lock.release() except NameError: pass sys.exit(exit_code)
if not mailpile_user or mailpile_user == 'root': usage(2, "Please specify a (non-root) user to launch Mailpile.") if not re.match(r'^[a-zA-Z0-9\._-]+$', mailpile_user): usage(2, "That is a strange looking username.") mailpile_user = pwd.getpwnam(mailpile_user) if not mailpile_user: usage(2, "Please specify a (non-root) user to launch Mailpile.") if not re.match(r'^[a-zA-Z0-9\.:/]+$', url): usage(2, "That is a strange looking URL.") mailpile_home = MAILPILE_HOME_PATH % mailpile_user.pw_dir if not os.path.exists(mailpile_home): usage(3, "That user has never run Mailpile. Aborting.") mp_lockfile = os.path.join(mailpile_home, MAILPILE_WORK_LOCK) mp_lock = InterProcessLock(mp_lockfile) if not mp_lock.acquire(blocking=False): # We are happy with this result, don't raise an error. sys.stderr.write( "Mailpile is already running for that user. Doing Nothing.\n") else: # We will release the lock on exec(), but make sure the user owns # the lockfile and will be able to take over. os.chown(mp_lockfile, mailpile_user.pw_uid, mailpile_user.pw_gid) os.execv('/bin/su', [ '/bin/su', '-', mailpile_user.pw_name, '-c', ('screen -S mailpile -d -m ' 'mailpile --idlequit=%d --pid=%s/%s.pid --www=%s --interact') % (idlequit, MAILPILE_PIDS_PATH, mailpile_user.pw_name, url) ])
class Lock: """ A inter-process and inter-thread lock. This reuses uses code from oslo.concurrency but provides non-blocking acquire. Use the :meth:`singleton` class method to retrieve an existing instance for thread-safe usage. """ _instances: Dict[str, "Lock"] = dict() _singleton_lock = threading.Lock() @classmethod def singleton(cls, name: str, lock_path: Optional[str] = None) -> "Lock": """ Retrieve an existing lock object with a given 'name' or create a new one. Use this method for thread-safe locks. :param name: Name of lock file. :param lock_path: Directory for lock files. Defaults to the temporary directory returned by :func:`tempfile.gettempdir()` if not given. """ with cls._singleton_lock: try: instance = cls._instances[name] except KeyError: instance = cls(name, lock_path) cls._instances[name] = instance return instance def __init__(self, name: str, lock_path: Optional[str] = None) -> None: self.name = name dirname = lock_path or tempfile.gettempdir() lock_path = os.path.join(dirname, name) self._internal_lock = threading.Semaphore() self._external_lock = InterProcessLock(lock_path) self._lock = threading.RLock() def acquire(self) -> bool: """ Attempts to acquire the given lock. :returns: Whether or not the acquisition succeeded. """ with self._lock: locked_internal = self._internal_lock.acquire(blocking=False) if not locked_internal: return False try: locked_external = self._external_lock.acquire(blocking=False) except Exception: self._internal_lock.release() raise else: if locked_external: return True else: self._internal_lock.release() return False def release(self) -> None: """Release the previously acquired lock.""" with self._lock: self._external_lock.release() self._internal_lock.release() def locked(self) -> bool: """Checks if the lock is currently held by any thread or process.""" with self._lock: gotten = self.acquire() if gotten: self.release() return not gotten def locking_pid(self) -> Optional[int]: """ Returns the PID of the process which currently holds the lock or ``None``. This should work on macOS, OpenBSD and Linux but may fail on some platforms. Always use :meth:`locked` to check if the lock is held by any process. :returns: The PID of the process which currently holds the lock or ``None``. """ with self._lock: if self._external_lock.acquired: return os.getpid() try: # don't close again in case we are the locking process self._external_lock._do_open() lockdata, fmt, pid_index = _get_lockdata() lockdata = fcntl.fcntl(self._external_lock.lockfile, fcntl.F_GETLK, lockdata) lockdata_list = struct.unpack(fmt, lockdata) pid = lockdata_list[pid_index] if pid > 0: return pid except OSError: pass return None
if not mailpile_user or mailpile_user == 'root': usage(2, "Please specify a (non-root) user to launch Mailpile.") if not re.match(r'^[a-zA-Z0-9\._-]+$', mailpile_user): usage(2, "That is a strange looking username.") mailpile_user = pwd.getpwnam(mailpile_user) if not mailpile_user: usage(2, "Please specify a (non-root) user to launch Mailpile.") if not re.match(r'^[a-zA-Z0-9\.:/]+$', url): usage(2, "That is a strange looking URL.") mailpile_home = MAILPILE_HOME_PATH % mailpile_user.pw_dir if not os.path.exists(mailpile_home): usage(3, "That user has never run Mailpile. Aborting.") mp_lockfile = os.path.join(mailpile_home, MAILPILE_WORK_LOCK) mp_lock = InterProcessLock(mp_lockfile) if not mp_lock.acquire(blocking=False): # We are happy with this result, don't raise an error. sys.stderr.write( "Mailpile is already running for that user. Doing Nothing.\n") else: # We will release the lock on exec(), but make sure the user owns # the lockfile and will be able to take over. os.chown(mp_lockfile, mailpile_user.pw_uid, mailpile_user.pw_gid) os.execv('/bin/su', ['/bin/su', '-', mailpile_user.pw_name, '-c', ( 'screen -S mailpile -d -m ' 'mailpile --idlequit=%d --pid=%s/%s.pid --www=%s --interact' ) % (idlequit, MAILPILE_PIDS_PATH, mailpile_user.pw_name, url)])
class ProjectManager(Iterable): _basic_directories = ( "backups", "intermediate", "lci", "processed", ) _is_temp_dir = False read_only = False def __init__(self): self._base_data_dir, self._base_logs_dir = self._get_base_directories() self._create_base_directories() self.db = SubstitutableDatabase(self._base_data_dir / "projects.db", [ProjectDataset]) columns = { o.name for o in self.db._database.get_columns("projectdataset") } if "full_hash" not in columns: src_filepath = self._base_data_dir / "projects.db" backup_filepath = self._base_data_dir / "projects.backup.db" shutil.copy(src_filepath, backup_filepath) MIGRATION_WARNING = """Adding a column to the projects database. A backup copy of this database '{}' was made at '{}'; if you have problems, file an issue, and restore the backup data to use the stable version of Brightway2.""" print(MIGRATION_WARNING.format(src_filepath, backup_filepath)) ADD_FULL_HASH_COLUMN = """ALTER TABLE projectdataset ADD COLUMN "full_hash" integer default 1""" self.db.execute_sql(ADD_FULL_HASH_COLUMN) # We don't do this, as the column added doesn't have a default # value, meaning that one would get error from using the # development branch alongside the stable branch. # from playhouse.migrate import SqliteMigrator, migrate # migrator = SqliteMigrator(self.db._database) # full_hash = BooleanField(default=True) # migrate(migrator.add_column("projectdataset", "full_hash", full_hash),) self.set_current("default", update=False) def __iter__(self): for project_ds in ProjectDataset.select(): yield project_ds def __contains__(self, name): return ProjectDataset.select().where( ProjectDataset.name == name).count() > 0 def __len__(self): return ProjectDataset.select().count() def __repr__(self): if len(self) > 20: return ("Brightway2 projects manager with {} objects, including:" "{}\nUse `sorted(projects)` to get full list, " "`projects.report()` to get\n\ta report on all projects." ).format( len(self), "".join([ "\n\t{}".format(x) for x in sorted([x.name for x in self])[:10] ]), ) else: return ( "Brightway2 projects manager with {} objects:{}" "\nUse `projects.report()` to get a report on all projects." ).format( len(self), "".join([ "\n\t{}".format(x) for x in sorted([x.name for x in self]) ]), ) ### Internal functions for managing projects def _get_base_directories(self): envvar = maybe_path(os.getenv("BRIGHTWAY2_DIR")) if envvar: if not envvar.is_dir(): raise OSError(("BRIGHTWAY2_DIR variable is {}, but this is not" " a valid directory").format(envvar)) else: print("Using environment variable BRIGHTWAY2_DIR for data " "directory:\n{}".format(envvar)) envvar = envvar.absolute() logs_dir = envvar / "logs" create_dir(logs_dir) return envvar, logs_dir LABEL = "Brightway3" data_dir = Path(appdirs.user_data_dir(LABEL, "pylca")) logs_dir = Path(appdirs.user_log_dir(LABEL, "pylca")) return data_dir, logs_dir def _create_base_directories(self): create_dir(self._base_data_dir) create_dir(self._base_logs_dir) @property def current(self): return self._project_name @property def twofive(self): return bool(self.dataset.data.get("25")) def set_current(self, name, writable=True, update=True): if not self.read_only and lockable() and hasattr(self, "_lock"): try: self._lock.release() except (RuntimeError, ThreadError): pass self._project_name = str(name) # Need to allow writes when creating a new project # for new metadata stores self.read_only = False self.create_project(name) self.dataset = ProjectDataset.get( ProjectDataset.name == self._project_name) self._reset_meta() self._reset_sqlite3_databases() self.dataset = ProjectDataset.get(name=name) if not lockable(): pass elif writable: self._lock = InterProcessLock(self.dir / "write-lock") self.read_only = not self._lock.acquire(timeout=0.05) if self.read_only: warnings.warn(READ_ONLY_PROJECT) else: self.read_only = True if not self.read_only and update: self._do_automatic_updates() def _do_automatic_updates(self): """Run any available automatic updates""" from .updates import Updates for update_name in Updates.check_automatic_updates(): print("Applying automatic update: {}".format(update_name)) Updates.do_update(update_name) def _reset_meta(self): for obj in config.metadata: obj.__init__() def _reset_sqlite3_databases(self): for relative_path, substitutable_db in config.sqlite3_databases: substitutable_db.change_path(self.dir / relative_path) ### Public API @property def dir(self): return Path(self._base_data_dir) / safe_filename( self.current, full=self.dataset.full_hash) @property def logs_dir(self): return Path(self._base_logs_dir) / safe_filename( self.current, full=self.dataset.full_hash) @property def output_dir(self): """Get directory for output files. Uses environment variable ``BRIGHTWAY2_OUTPUT_DIR``; ``preferences['output_dir']``; or directory ``output`` in current project. Returns output directory path. """ ep, pp = ( maybe_path(os.getenv("BRIGHTWAY2_OUTPUT_DIR")), maybe_path(config.p.get("output_dir")), ) if ep and ep.is_dir(): return ep elif pp and pp.is_dir(): return pp else: return self.request_directory("output") def create_project(self, name=None, **kwargs): name = name or self.current kwargs["25"] = True full_hash = kwargs.pop("full_hash", False) try: self.dataset = ProjectDataset.get(ProjectDataset.name == name) except DoesNotExist: self.dataset = ProjectDataset.create(data=kwargs, name=name, full_hash=full_hash) create_dir(self.dir) for dir_name in self._basic_directories: create_dir(self.dir / dir_name) create_dir(self.logs_dir) def copy_project(self, new_name, switch=True): """Copy current project to a new project named ``new_name``. If ``switch``, switch to new project.""" if new_name in self: raise ValueError("Project {} already exists".format(new_name)) fp = self._base_data_dir / safe_filename(new_name, full=self.dataset.full_hash) if fp.exists(): raise ValueError("Project directory already exists") project_data = ProjectDataset.get( ProjectDataset.name == self.current).data ProjectDataset.create(data=project_data, name=new_name, full_hash=self.dataset.full_hash) shutil.copytree(self.dir, fp, ignore=lambda x, y: ["write-lock"]) create_dir(self._base_logs_dir / safe_filename(new_name)) if switch: self.set_current(new_name) def request_directory(self, name): """Return the absolute path to the subdirectory ``dirname``, creating it if necessary. Returns ``False`` if directory can't be created.""" fp = self.dir / str(name) create_dir(fp) if not fp.is_dir(): return False return fp def _use_temp_directory(self): """Point the ProjectManager towards a temporary directory instead of `user_data_dir`. Used exclusively for tests.""" if not self._is_temp_dir: self._orig_base_data_dir = self._base_data_dir self._orig_base_logs_dir = self._base_logs_dir temp_dir = Path(tempfile.mkdtemp()) self._base_data_dir = temp_dir / "data" self._base_logs_dir = temp_dir / "logs" self.db.change_path(":memory:") self.set_current("default", update=False) self._is_temp_dir = True return temp_dir def _restore_orig_directory(self): """Point the ProjectManager back to original directories. Used exclusively in tests.""" if not self._is_temp_dir: return self._base_data_dir = self._orig_base_data_dir del self._orig_base_data_dir self._base_logs_dir = self._orig_base_logs_dir del self._orig_base_logs_dir self.db.change_path(self._base_data_dir / "projects.db") self.set_current("default", update=False) self._is_temp_dir = False def migrate_project_25(self): """Migrate project to Brightway 2.5. Reprocesses all databases and LCIA objects.""" assert not self.twofive, "Project is already 2.5 compatible" from .updates import Updates Updates()._reprocess_all() self.dataset.data["25"] = True self.dataset.save() def delete_project(self, name=None, delete_dir=False): """Delete project ``name``, or the current project. ``name`` is the project to delete. If ``name`` is not provided, delete the current project. By default, the underlying project directory is not deleted; only the project name is removed from the list of active projects. If ``delete_dir`` is ``True``, then also delete the project directory. If deleting the current project, this function sets the current directory to ``default`` if it exists, or to a random project. Returns the current project.""" victim = name or self.current if victim not in self: raise ValueError("{} is not a project".format(victim)) if len(self) == 1: raise ValueError("Can't delete only remaining project") ProjectDataset.delete().where(ProjectDataset.name == victim).execute() if delete_dir: dir_path = self._base_data_dir / safe_filename(victim) assert dir_path.is_dir(), "Can't find project directory" shutil.rmtree(dir_path) if name is None or name == self.current: if "default" in self: self.set_current("default") else: self.set_current(next(iter(self)).name) return self.current def purge_deleted_directories(self): """Delete project directories for projects which are no longer registered. Returns number of directories deleted.""" registered = {safe_filename(obj.name) for obj in self} bad_directories = [ self._base_data_dir / dirname for dirname in os.listdir(self._base_data_dir) if (self._base_data_dir / dirname).is_dir() and dirname not in registered ] for fp in bad_directories: shutil.rmtree(fp) return len(bad_directories) def report(self): """Give a report on current projects, including installed databases and file sizes. Returns tuples of ``(project name, number of databases, size of all databases (GB))``.""" from . import databases _current = self.current data = [] def get_dir_size(dirpath): """Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does. Does not follow symbolic links""" return sum( sum(os.path.getsize(root / name) for name in files) for root, dirs, files in os.walk(dirpath)) names = sorted([x.name for x in self]) for obj in names: self.set_current(obj, update=False, writable=False) data.append( (obj, len(databases), get_dir_size(projects.dir) / 1e9)) self.set_current(_current) return data def use_short_hash(self): if not self.dataset.full_hash: return try: old_dir, old_logs_dir = self.dir, self.logs_dir self.dataset.full_hash = False if self.dir.exists(): raise OSError("Target directory {} already exists".format( self.dir)) if self.logs_dir.exists(): raise OSError("Target directory {} already exists".format( self.logs_dir)) old_dir.rename(self.dir) old_logs_dir.rename(self.logs_dir) self.dataset.save() except Exception as ex: self.dataset.full_hash = True raise ex def use_full_hash(self): if self.dataset.full_hash: return try: old_dir, old_logs_dir = self.dir, self.logs_dir self.dataset.full_hash = True if self.dir.exists(): raise OSError("Target directory {} already exists".format( self.dir)) if self.logs_dir.exists(): raise OSError("Target directory {} already exists".format( self.logs_dir)) old_dir.rename(self.dir) old_logs_dir.rename(self.logs_dir) self.dataset.save() except Exception as ex: self.dataset.full_hash = False raise ex
def run(self): """ Run application. """ # output work with # configure SQLAlchemy logging # log_level = self.logger.getEffectiveLevel() # logging.getLogger('sqlalchemy.engine').setLevel(log_level) exit_code = ExitCodes.EXIT_SUCCESS try: pid_lock = InterProcessLock(self.args.path_pidfile) pid_lock_gotten = pid_lock.acquire(blocking=False) if not pid_lock_gotten: raise AlreadyHarvesting(self.args.path_pidfile) self.logger.debug('Aquired PID lock {0!r}'.format( self.args.path_pidfile)) if (self.args.no_routes and self.args.no_vnetworks and not self.args.truncate): raise NothingToDo() harvesting = not (self.args.no_routes and self.args.no_vnetworks) Session = db.ScopedSession() Session.configure(bind=self.args.db_engine) db.configure_db(self.DB_PRAGMAS) # TODO(damb): Implement multithreaded harvesting using a thread # pool. try: if harvesting: self.logger.info('Start harvesting.') if not self.args.no_routes: self._harvest_routes(Session) else: self.logger.warn( 'Disabled processing <route></route> information.') if not self.args.no_vnetworks: self._harvest_vnetworks(Session) else: self.logger.warn( 'Disabled processing <vnetwork></vnetwork> ' 'information.') if harvesting: self.logger.info('Finished harvesting successfully.') if self.args.truncate: self.logger.warning('Removing outdated data.') session = Session() with db.session_guard(session) as _session: num_removed_rows = db.clean(_session, self.args.truncate) self.logger.info('Number of rows removed: {}'.format( num_removed_rows)) except OperationalError as err: raise db.StationLiteDBEngineError(err) # TODO(damb): signal handling except Error as err: self.logger.error(err) exit_code = ExitCodes.EXIT_ERROR except Exception as err: exc_type, exc_value, exc_traceback = sys.exc_info() self.logger.critical('Local Exception: %s' % err) self.logger.critical('Traceback information: ' + repr( traceback.format_exception(exc_type, exc_value, exc_traceback)) ) exit_code = ExitCodes.EXIT_ERROR finally: try: if pid_lock_gotten: pid_lock.release() except NameError: pass sys.exit(exit_code)
user32.SetWinEventHook.restype = ctypes.wintypes.HANDLE hook = user32.SetWinEventHook( EVENT_SYSTEM_FOREGROUND, EVENT_SYSTEM_FOREGROUND, 0, WinEventProc, 0, 0, WINEVENT_OUTOFCONTEXT ) if hook == 0: exit(1) msg = ctypes.wintypes.MSG() while user32.GetMessageW(ctypes.byref(msg), 0, 0, 0) != 0: user32.TranslateMessageW(msg) user32.DispatchMessageW(msg) user32.UnhookWinEvent(hook) ole32.CoUninitialize() logger.info('Acquiring lock') lock = InterProcessLock(gettempdir() + '/bdbc_lock_file') gotten = lock.acquire(timeout=10) if gotten: logger.info('Lock acquired') foreground_window_hook() else: logger.info('Lock failed')
def main(): arguments = docopt.docopt(__doc__, version=__version__) # Load configuration default_config = { 'main': { 'warning_timeout': 15, 'kill_timeout': 30, 'max_spawn': 4, 'min_wait': 15 * 60, 'log_level': 'info', 'runas_user': None, 'runas_group': None, } } config = load_configuration(arguments['--config'], default_config) # Set procname try: setproctitle.setproctitle('dovebackupr %s' % " ".join(sys.argv[1:])) except Exception as e: print("Cant set process title: %s" % e) # Drop privileges if config['main']['runas_user'] is not None and config['main'][ 'runas_group'] is not None: drop_privileges(config['main']['runas_user'], config['main']['runas_group']) # Setup logger logger = start_logger(config) # Acquire lock to prevent multiple instances running lock = InterProcessLock(config['main']['lock_file']) acquired_lock = lock.acquire(blocking=False) if not acquired_lock: print( "dovebackupr is already running.\nlock file: '%s'\nExiting now." % config['main']['lock_file']) logger.error( "dovebackupr is already running.\nlock file: '%s'\nExiting now." % config['main']['lock_file']) raise SystemExit(2) mailboxes = config['mailbox'] if arguments.get('single', None): try: mailbox = mailboxes[arguments['<mailbox>']] except KeyError: logger.error( "Given mailbox '%s' not found in configuration file '%s'" % (arguments['<mailbox>'], arguments['--config'])) raise SystemExit(-1) mailboxes = {arguments['<mailbox>']: mailbox} # Register signal handlers signal.signal(signal.SIGINT, handle_sigint) signal.signal(signal.SIGTERM, handle_sigterm) logger.info( "dovebackupr started. Logging at '%s', with debug=%r" % (config['main']['log_file'], config['main']['log_level'] == 'debug')) # Run main loop main_loop(mailboxes, run_forever=arguments['run'], config=config)
class ProjectManager(collections.Iterable): _basic_directories = ( "backups", "intermediate", "lci", "processed", ) _is_temp_dir = False read_only = False def __init__(self): self._base_data_dir, self._base_logs_dir = self._get_base_directories() self._create_base_directories() self.db = SubstitutableDatabase( os.path.join(self._base_data_dir, "projects.db"), [ProjectDataset]) self.set_current("default", update=False) def __iter__(self): for project_ds in ProjectDataset.select(): yield project_ds def __contains__(self, name): return ProjectDataset.select().where( ProjectDataset.name == name).count() > 0 def __len__(self): return ProjectDataset.select().count() def __repr__(self): if len(self) > 20: return ("Brightway2 projects manager with {} objects, including:" "{}\nUse `sorted(projects)` to get full list, " "`projects.report()` to get\n\ta report on all projects." ).format( len(self), "".join([ "\n\t{}".format(x) for x in sorted([x.name for x in self])[:10] ])) else: return ( "Brightway2 projects manager with {} objects:{}" "\nUse `projects.report()` to get a report on all projects." ).format( len(self), "".join([ "\n\t{}".format(x) for x in sorted([x.name for x in self]) ])) ### Internal functions for managing projects def _get_base_directories(self): eight.wrap_os_environ_io() envvar = os.getenv("BRIGHTWAY2_DIR") if envvar: if not os.path.isdir(envvar): raise OSError(("BRIGHTWAY2_DIR variable is {}, but this is not" " a valid directory").format(envvar)) else: print("Using environment variable BRIGHTWAY2_DIR for data " "directory:\n{}".format(envvar)) envvar = os.path.abspath(envvar) logs_dir = os.path.join(envvar, "logs") create_dir(logs_dir) return envvar, logs_dir LABEL = "Brightway2" if sys.version_info < (3, 0) else "Brightway3" data_dir = appdirs.user_data_dir(LABEL, "pylca") logs_dir = appdirs.user_log_dir(LABEL, "pylca") return data_dir, logs_dir def _create_base_directories(self): create_dir(self._base_data_dir) create_dir(self._base_logs_dir) @property def current(self): return self._project_name def set_current(self, name, writable=True, update=True): if not self.read_only and lockable() and hasattr(self, "_lock"): try: self._lock.release() except (RuntimeError, ThreadError): pass self._project_name = str(name) # Need to allow writes when creating a new project # for new metadata stores self.read_only = False self.create_project(name) self._reset_meta() self._reset_sqlite3_databases() if not lockable(): pass elif writable: self._lock = InterProcessLock(os.path.join(self.dir, "write-lock")) self.read_only = not self._lock.acquire(timeout=0.05) if self.read_only: warnings.warn(READ_ONLY_PROJECT) else: self.read_only = True if not self.read_only and update: self._do_automatic_updates() def _do_automatic_updates(self): """Run any available automatic updates""" from .updates import Updates for update_name in Updates.check_automatic_updates(): print("Applying automatic update: {}".format(update_name)) Updates.do_update(update_name) def _reset_meta(self): for obj in config.metadata: obj.__init__() def _reset_sqlite3_databases(self): for relative_path, substitutable_db in config.sqlite3_databases: substitutable_db.change_path(os.path.join(self.dir, relative_path)) ### Public API @property def dir(self): return os.path.join(self._base_data_dir, safe_filename(self.current)) @property def logs_dir(self): return os.path.join(self._base_logs_dir, safe_filename(self.current)) @property def output_dir(self): """Get directory for output files. Uses environment variable ``BRIGHTWAY2_OUTPUT_DIR``; ``preferences['output_dir']``; or directory ``output`` in current project. Returns output directory path. """ eight.wrap_os_environ_io() ep, pp = os.getenv('BRIGHTWAY2_OUTPUT_DIR'), config.p.get('output_dir') if ep and os.path.isdir(ep): return ep elif pp and os.path.isdir(pp): return pp else: return self.request_directory('output') def create_project(self, name=None, **kwargs): name = name or self.current if not ProjectDataset.select().where( ProjectDataset.name == name).count(): ProjectDataset.create(data=kwargs, name=name) create_dir(self.dir) for dir_name in self._basic_directories: create_dir(os.path.join(self.dir, dir_name)) create_dir(self.logs_dir) def copy_project(self, new_name, switch=True): """Copy current project to a new project named ``new_name``. If ``switch``, switch to new project.""" if new_name in self: raise ValueError("Project {} already exists".format(new_name)) fp = os.path.join(self._base_data_dir, safe_filename(new_name)) if os.path.exists(fp): raise ValueError("Project directory already exists") project_data = ProjectDataset.select( ProjectDataset.name == self.current).get().data ProjectDataset.create(data=project_data, name=new_name) shutil.copytree(self.dir, fp, ignore=lambda x, y: ["write-lock"]) create_dir(os.path.join(self._base_logs_dir, safe_filename(new_name))) if switch: self.set_current(new_name) def request_directory(self, name): """Return the absolute path to the subdirectory ``dirname``, creating it if necessary. Returns ``False`` if directory can't be created.""" fp = os.path.join(self.dir, str(name)) create_dir(fp) if not os.path.isdir(fp): return False return fp def _use_temp_directory(self): """Point the ProjectManager towards a temporary directory instead of `user_data_dir`. Used exclusively for tests.""" if not self._is_temp_dir: self._orig_base_data_dir = self._base_data_dir self._orig_base_logs_dir = self._base_logs_dir temp_dir = tempfile.mkdtemp() self._base_data_dir = os.path.join(temp_dir, "data") self._base_logs_dir = os.path.join(temp_dir, "logs") self.db.change_path(':memory:') self.set_current("default", update=False) self._is_temp_dir = True return temp_dir def _restore_orig_directory(self): """Point the ProjectManager back to original directories. Used exclusively in tests.""" if not self._is_temp_dir: return self._base_data_dir = self._orig_base_data_dir del self._orig_base_data_dir self._base_logs_dir = self._orig_base_logs_dir del self._orig_base_logs_dir self.db.change_path(os.path.join(self._base_data_dir, "projects.db")) self.set_current("default", update=False) self._is_temp_dir = False def delete_project(self, name=None, delete_dir=False): """Delete project ``name``, or the current project. ``name`` is the project to delete. If ``name`` is not provided, delete the current project. By default, the underlying project directory is not deleted; only the project name is removed from the list of active projects. If ``delete_dir`` is ``True``, then also delete the project directory. If deleting the current project, this function sets the current directory to ``default`` if it exists, or to a random project. Returns the current project.""" victim = name or self.current if victim not in self: raise ValueError("{} is not a project".format(victim)) if len(self) == 1: raise ValueError("Can't delete only remaining project") ProjectDataset.delete().where(ProjectDataset.name == victim).execute() if delete_dir: dir_path = os.path.join(self._base_data_dir, safe_filename(victim)) assert os.path.isdir(dir_path), "Can't find project directory" shutil.rmtree(dir_path) if name is None or name == self.current: if "default" in self: self.set_current("default") else: self.set_current(next(iter(self)).name) return self.current def purge_deleted_directories(self): """Delete project directories for projects which are no longer registered. Returns number of directories deleted.""" registered = {safe_filename(obj.name) for obj in self} bad_directories = [ os.path.join(self._base_data_dir, dirname) for dirname in os.listdir(self._base_data_dir) if os.path.isdir(os.path.join(self._base_data_dir, dirname)) and dirname not in registered ] for fp in bad_directories: shutil.rmtree(fp) return len(bad_directories) def report(self): """Give a report on current projects, including installed databases and file sizes. Returns tuples of ``(project name, number of databases, size of all databases (GB))``.""" from . import databases _current = self.current data = [] def get_dir_size(dirpath): """Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does. Does not follow symbolic links""" return sum( sum( os.path.getsize(os.path.join(root, name)) for name in files) for root, dirs, files in os.walk(dirpath)) names = sorted([x.name for x in self]) for obj in names: self.set_current(obj, update=False, writable=False) data.append( (obj, len(databases), get_dir_size(projects.dir) / 1e9)) self.set_current(_current) return data
class DownloadDirectory: def __init__(self, filepath, digests): #: The path to which to save the file after downloading self.filepath = Path(filepath) #: Expected hashes of the downloaded data, as a mapping from algorithm #: names to digests self.digests = digests #: The working directory in which downloaded data will be temporarily #: stored self.dirpath = self.filepath.with_name(self.filepath.name + ".dandidownload") #: The file in `dirpath` to which data will be written as it is #: received self.writefile = self.dirpath / "file" #: A `fasteners.InterProcessLock` on `dirpath` self.lock = None #: An open filehandle to `writefile` self.fp = None #: How much of the data has been downloaded so far self.offset = None def __enter__(self): from fasteners import InterProcessLock self.dirpath.mkdir(parents=True, exist_ok=True) self.lock = InterProcessLock(str(self.dirpath / "lock")) if not self.lock.acquire(blocking=False): raise RuntimeError( "Could not acquire download lock for {self.filepath}") chkpath = self.dirpath / "checksum" try: with chkpath.open() as fp: digests = json.load(fp) except (FileNotFoundError, ValueError): digests = {} matching_algs = self.digests.keys() & digests.keys() if matching_algs and all(self.digests[alg] == digests[alg] for alg in matching_algs): # Pick up where we left off, writing to the end of the file lgr.debug( "Download directory exists and has matching checksum; resuming download" ) self.fp = self.writefile.open("ab") else: # Delete the file (if it even exists) and start anew if not chkpath.exists(): lgr.debug("Starting new download in new download directory") else: lgr.debug( "Download directory found, but digests do not match; starting new download" ) try: self.writefile.unlink() except FileNotFoundError: pass self.fp = self.writefile.open("wb") with chkpath.open("w") as fp: json.dump(self.digests, fp) self.offset = self.fp.tell() return self def __exit__(self, exc_type, exc_value, traceback): self.fp.close() try: if exc_type is None: self.writefile.replace(self.filepath) finally: self.lock.release() if exc_type is None: rmtree(self.dirpath, ignore_errors=True) self.lock = None self.fp = None self.offset = None return False def append(self, blob): self.fp.write(blob)
class Lock: """A inter-process and inter-thread lock This internally uses :class:`fasteners.InterProcessLock` but provides non-blocking acquire. It also guarantees thread-safety when using the :meth:`singleton` class method to create / retrieve a lock instance. :param path: Path of the lock file to use / create. """ _instances: Dict[str, "Lock"] = {} _singleton_lock = threading.Lock() @classmethod def singleton(cls, path: str) -> "Lock": """ Retrieve an existing lock object with a given 'name' or create a new one. Use this method for thread-safe locks. :param path: Path of the lock file to use / create. """ with cls._singleton_lock: try: instance = cls._instances[path] except KeyError: instance = cls(path) cls._instances[path] = instance return instance def __init__(self, path: str) -> None: self.path = path self._internal_lock = threading.Semaphore() self._external_lock = InterProcessLock(self.path) self._lock = threading.RLock() def acquire(self) -> bool: """ Attempts to acquire the given lock. :returns: Whether or not the acquisition succeeded. """ with self._lock: locked_internal = self._internal_lock.acquire(blocking=False) if not locked_internal: return False try: locked_external = self._external_lock.acquire(blocking=False) except Exception: self._internal_lock.release() raise else: if locked_external: return True else: self._internal_lock.release() return False def release(self) -> None: """Release the previously acquired lock.""" with self._lock: self._external_lock.release() self._internal_lock.release() def locked(self) -> bool: """ Checks if the lock is currently held by any thread or process. :returns: Whether the lock is acquired. """ with self._lock: gotten = self.acquire() if gotten: self.release() return not gotten def locking_pid(self) -> Optional[int]: """ Returns the PID of the process which currently holds the lock or ``None``. This should work on macOS, OpenBSD and Linux but may fail on some platforms. Always use :meth:`locked` to check if the lock is held by any process. :returns: The PID of the process which currently holds the lock or ``None``. """ with self._lock: if self._external_lock.acquired: return os.getpid() try: # Don't close again in case we are the locking process. self._external_lock._do_open() lockdata, fmt, pid_index = _get_lockdata() lockdata = fcntl.fcntl(self._external_lock.lockfile, fcntl.F_GETLK, lockdata) lockdata_list = struct.unpack(fmt, lockdata) pid = lockdata_list[pid_index] if pid > 0: return pid except OSError: pass return None
def lock_if_check_fails( check, lock_path, operation=None, blocking=True, _return_acquired=False, **kwargs ): """A context manager to establish a lock conditionally on result of a check It is intended to be used as a lock for a specific file and/or operation, e.g. for `annex get`ing a file or extracting an archive, so only one process would be performing such an operation. If verification of the check fails, it tries to acquire the lock, but if that fails on the first try, it will rerun check before proceeding to func checker and lock_path_prefix could be a value, or callable, or a tuple composing callable and its args Unfortunately yoh did not find any way in Python 2 to have a context manager which just skips the entire block if some condition is met (in Python3 there is ExitStack which could potentially be used). So we would need still to check in the block body if the context manager return value is not None. Note also that the used type of the lock (fasteners.InterprocessLock) works only across processes and would not lock within the same (threads) process. Parameters ---------- check: callable or (callable, args) or value If value (possibly after calling a callable) evaluates to True, no lock is acquired, and no context is executed lock_path: callable or (callable, args) or value Provides a path for the lock file, composed from that path + '.lck' extension operation: str, optional If provided, would be part of the locking extension blocking: bool, optional If blocking, process would be blocked until acquired and verified that it was acquired after it gets the lock _return_acquired: bool, optional Return also if lock was acquired. For "private" use within DataLad (tests), do not rely on it in 3rd party solutions. **kwargs Passed to `.acquire` of the fasteners.InterProcessLock Returns ------- result of check, lock[, acquired] """ check1 = _get(check) if check1: # we are done - nothing to do yield check1, None return # acquire blocking lock lock_filename = _get(lock_path) lock_filename += '.' if operation: lock_filename += operation + '-' lock_filename += 'lck' lock = InterProcessLock(lock_filename) acquired = False try: lgr.debug("Acquiring a lock %s", lock_filename) acquired = lock.acquire(blocking=blocking, **kwargs) lgr.debug("Acquired? lock %s: %s", lock_filename, acquired) if blocking: assert acquired check2 = _get(check) ret_lock = None if check2 else lock if _return_acquired: yield check2, ret_lock, acquired else: yield check2, ret_lock finally: if acquired: lgr.debug("Releasing lock %s", lock_filename) lock.release() if exists(lock_filename): unlink(lock_filename)
from config import config from fasteners import InterProcessLock ipl = InterProcessLock(config['lock_file']) def lock_obtained(): print('Lock obtained, starting YeenBot...') from yeenbot import main main() def lock_not_obtained(): print('Lock not obtained, exiting') # Attempt to acquire the lock acquired = ipl.acquire(blocking=False) try: if acquired: lock_obtained() else: lock_not_obtained() finally: if acquired: ipl.release()
class Model: """The mod management model""" def __init__(self, gamePath: Path, configPath: Path, cachePath: Path, ignorelock: bool = False) -> None: self._gamePath: Path = Path() self._configPath: Path = Path() self._cachePath: Path = Path() self._modsPath: Path = Path() self._dlcsPath: Path = Path() self._modList: Dict[Tuple[str, str], Mod] = {} _cachePath = verifyCachePath(cachePath) if not _cachePath: raise InvalidCachePath(cachePath) self._cachePath = _cachePath self.updateCallbacks = CallbackList() self.updateLock = asyncio.Lock() if not ignorelock: self._lock = InterProcessLock(self.lockfile) if not self._lock.acquire(False): raise OtherInstanceError(self.lockfile) self.setPaths(gamePath, configPath) # TODO: enhancement: watch mod directory for changes logger.debug('Initialized model') logger.debug(f'Game path: {self._gamePath}') logger.debug(f'Config path: {self._configPath}') logger.debug(f'Cache path: {self._cachePath}') logger.debug(f'Mods path: {self._modsPath}') # TODO: incomplete: implement mod installation management def setPaths(self, gamePath: Path, configPath: Path) -> None: _gamePath = verifyGamePath(gamePath) _configPath = verifyConfigPath(configPath) if self._gamePath == _gamePath and self._configPath == _configPath: return if not _gamePath: raise InvalidGamePath(gamePath) if not _configPath: raise InvalidConfigPath(configPath) modsPath = _gamePath.joinpath('Mods') _modsPath = verifyModsPath(modsPath) dlcsPath = _gamePath.joinpath('DLC') _dlcsPath = verifyDlcsPath(dlcsPath) if not _modsPath: raise InvalidModsPath(modsPath) if not _dlcsPath: raise InvalidDlcsPath(dlcsPath) self._gamePath = _gamePath self._configPath = _configPath self._modsPath = _modsPath self._dlcsPath = _dlcsPath self.lastUpdate = datetime.now(tz=timezone.utc) self.lastInitialization = datetime.now(tz=timezone.utc) self._modList = {} self.loadInstalled() self.updateCallbacks.fire(self) def loadInstalled(self) -> None: for path in self.modspath.iterdir(): if path.joinpath('.w3mm').is_file(): mod = Mod.from_json(path.joinpath('.w3mm').read_bytes()) mod.enabled = not path.name.startswith('~') mod.filename = re.sub(r'^(~)', r'', path.name) if mod.enabled: enabled = getSettingsValue( mod.filename, 'Enabled', self.configpath.joinpath('mods.settings')) if enabled == '0': mod.enabled = False self._modList[(mod.filename, mod.target)] = mod else: try: for mod in Mod.fromDirectory(path, recursive=False): mod.installdate = datetime.fromtimestamp( path.stat().st_ctime, tz=timezone.utc) mod.target = 'mods' mod.datatype = 'mod' mod.enabled = not path.name.startswith('~') if mod.enabled: enabled = getSettingsValue( mod.filename, 'Enabled', self.configpath.joinpath('mods.settings')) if enabled == '0': mod.enabled = False self._modList[(mod.filename, mod.target)] = mod asyncio.create_task(self.update(mod)) except InvalidPathError: logger.bind(path=path).debug('Invalid MOD') for path in self.dlcspath.iterdir(): if path.joinpath('.w3mm').is_file(): mod = Mod.from_json(path.joinpath('.w3mm').read_bytes()) mod.enabled = not all( file.name.endswith('.disabled') for file in path.glob('**/*') if file.is_file() and not file.name == '.w3mm') mod.filename = path.name self._modList[(mod.filename, mod.target)] = mod else: try: for mod in Mod.fromDirectory(path, recursive=False): mod.installdate = datetime.fromtimestamp( path.stat().st_ctime, tz=timezone.utc) mod.target = 'dlc' mod.datatype = 'dlc' mod.enabled = not all( file.name.endswith('.disabled') for file in path.glob('**/*') if file.is_file() and not file.name == '.w3mm') self._modList[(mod.filename, mod.target)] = mod asyncio.create_task(self.update(mod)) except InvalidPathError: logger.bind(path=path).debug('Invalid DLC') def get(self, mod: ModelIndexType) -> Mod: return self[mod] def keys(self) -> KeysView[Tuple[str, str]]: return self._modList.keys() def values(self) -> ValuesView[Mod]: return self._modList.values() def data(self) -> Dict[Tuple[str, str], Mod]: return self._modList async def add(self, mod: Mod) -> None: # TODO: incomplete: always override compilation trigger mod if self.modspath in [mod.source, *mod.source.parents]: raise InvalidSourcePath( mod.source, 'Invalid mod source: Mods cannot be installed from the mods directory' ) async with self.updateLock: if (mod.filename, mod.target) in self._modList: raise ModExistsError(mod.filename, mod.target) target = self.getModPath(mod) if target.exists(): # TODO: incomplete: make sure the mod is tracked by the model raise ModExistsError(mod.filename, mod.target) settings = 0 inputs = 0 try: target.mkdir(parents=True) # copy mod files for _file in mod.files: sourceFile = mod.source.joinpath(_file.source) targetFile = target.joinpath(_file.source) targetFile.parent.mkdir(parents=True, exist_ok=True) copyfile(sourceFile, targetFile) for _content in mod.contents: sourceFile = mod.source.joinpath(_content.source) targetFile = target.joinpath(_content.source) targetFile.parent.mkdir(parents=True, exist_ok=True) copyfile(sourceFile, targetFile) mod.installed = True settings = addSettings( mod.settings, self.configpath.joinpath('user.settings')) inputs = addSettings( mod.inputs, self.configpath.joinpath('input.settings')) setSettingsValue(mod.filename, 'Enabled', '1', self.configpath.joinpath('mods.settings')) await self.update(mod) except Exception as e: removeDirectory(target) if settings: removeSettings(mod.settings, self.configpath.joinpath('user.settings')) if inputs: removeSettings(mod.inputs, self.configpath.joinpath('input.settings')) removeSettingsSection( mod.filename, self.configpath.joinpath('mods.settings')) raise e self._modList[(mod.filename, mod.target)] = mod self.setLastUpdateTime(datetime.now(tz=timezone.utc)) async def update(self, mod: Mod) -> None: # serialize and store mod structure target = self.getModPath(mod, True) try: with target.joinpath('.w3mm').open( 'w', encoding='utf-8') as modInfoFile: modSerialized = mod.to_json() modInfoFile.write(modSerialized) except Exception as e: logger.exception(f'Could not update mod: {e}') async def replace(self, filename: str, target: str, mod: Mod) -> None: # TODO: incomplete: handle possible conflict with existing mods async with self.updateLock: self._modList[(filename, target)] = mod self.setLastUpdateTime(datetime.now(tz=timezone.utc)) async def remove(self, mod: ModelIndexType) -> None: await self.disable(mod) async with self.updateLock: mod = self[mod] target = self.getModPath(mod, True) removeDirectory(target) try: removeSettings(mod.settings, self.configpath.joinpath('user.settings')) except Exception as e: logger.bind(name=mod.filename).warning( f'Could not remove settings from user.settings: {e}') try: removeSettings(mod.inputs, self.configpath.joinpath('input.settings')) except Exception as e: logger.bind(name=mod.filename).warning( f'Could not remove settings from input.settings: {e}') try: removeSettingsSection( mod.filename, self.configpath.joinpath('mods.settings')) except Exception as e: logger.bind(name=mod.filename).warning( f'Could not remove settings from mods.settings: {e}') del self._modList[(mod.filename, mod.target)] self.setLastUpdateTime(datetime.now(tz=timezone.utc)) async def enable(self, mod: ModelIndexType) -> None: async with self.updateLock: mod = self[mod] oldstat = mod.enabled oldpath = self.getModPath(mod, True) undo = False renames = [] settings = 0 inputs = 0 try: mod.enabled = True if mod.target == 'mods': newpath = self.getModPath(mod) if oldpath != newpath: oldpath.rename(newpath) setSettingsValue(mod.filename, 'Enabled', '1', self.configpath.joinpath('mods.settings')) if mod.target == 'dlc': for file in oldpath.glob('**/*'): while file.is_file() and file.suffix == '.disabled': file = file.rename(file.with_suffix('')) renames.append(file) settings = addSettings( mod.settings, self.configpath.joinpath('user.settings')) inputs = addSettings( mod.inputs, self.configpath.joinpath('input.settings')) await self.update(mod) except PermissionError: logger.bind(path=oldpath).exception( 'Could not enable mod, invalid permissions. Is it open in the explorer?' ) undo = True except Exception as e: logger.exception(f'Could not enable mod: {e}') mod.enabled = oldstat undo = True if undo: newpath = self.getModPath(mod) mod.enabled = oldstat if newpath.is_dir() and newpath != oldpath: newpath.rename(oldpath) for rename in reversed(renames): rename.rename( rename.with_suffix(rename.suffix + '.disabled')) if settings: removeSettings(mod.settings, self.configpath.joinpath('user.settings')) if inputs: removeSettings(mod.inputs, self.configpath.joinpath('input.settings')) if mod.target == 'mods': setSettingsValue(mod.filename, 'Enabled', '0', self.configpath.joinpath('mods.settings')) # TODO: incomplete: handle xml and ini changes self.setLastUpdateTime(datetime.now(tz=timezone.utc)) async def disable(self, mod: ModelIndexType) -> None: async with self.updateLock: mod = self[mod] oldstat = mod.enabled oldpath = self.getModPath(mod, True) undo = False renames = [] settings = 0 inputs = 0 try: mod.enabled = False if mod.target == 'mods': newpath = self.getModPath(mod) if oldpath != newpath: oldpath.rename(newpath) setSettingsValue(mod.filename, 'Enabled', '0', self.configpath.joinpath('mods.settings')) if mod.target == 'dlc': for file in oldpath.glob('**/*'): if file.is_file( ) and not file.name == '.w3mm' and not file.suffix == '.disabled': file = file.rename( file.with_suffix(file.suffix + '.disabled')) renames.append(file) settings = removeSettings( mod.settings, self.configpath.joinpath('user.settings')) inputs = removeSettings( mod.inputs, self.configpath.joinpath('input.settings')) await self.update(mod) except PermissionError: logger.bind(path=oldpath).exception( 'Could not disable mod, invalid permissions. Is it open in the explorer?' ) undo = True except Exception as e: logger.exception(f'Could not disable mod: {e}') undo = True if undo: newpath = self.getModPath(mod) mod.enabled = oldstat if newpath.is_dir() and newpath != oldpath: newpath.rename(oldpath) for rename in reversed(renames): rename.rename(rename.with_suffix('')) if settings: addSettings(mod.settings, self.configpath.joinpath('user.settings')) if inputs: addSettings(mod.inputs, self.configpath.joinpath('input.settings')) if mod.target == 'mods': setSettingsValue(mod.filename, 'Enabled', '1', self.configpath.joinpath('mods.settings')) # TODO: incomplete: handle xml and ini changes self.setLastUpdateTime(datetime.now(tz=timezone.utc)) async def setFilename(self, mod: ModelIndexType, filename: str) -> None: async with self.updateLock: mod = self[mod] oldname = mod.filename oldpath = self.getModPath(mod, True) mod.filename = filename newpath = self.getModPath(mod) renamed = False undo = False try: if oldpath != newpath: oldpath.rename(newpath) renamed = True renameSettingsSection( oldname, filename, self.configpath.joinpath('mods.settings')) await self.update(mod) except PermissionError: logger.bind(path=oldpath).exception( 'Could not rename mod, invalid permissions. Is it open in the explorer?' ) undo = True except Exception as e: logger.exception(f'Could not rename mod: {e}') undo = True if undo: mod.filename = oldname if renamed: newpath.rename(oldpath) self.setLastUpdateTime(datetime.now(tz=timezone.utc), False) async def setPackage(self, mod: ModelIndexType, package: str) -> None: async with self.updateLock: mod = self[mod] mod.package = package await self.update(mod) self.setLastUpdateTime(datetime.now(tz=timezone.utc), False) async def setCategory(self, mod: ModelIndexType, category: str) -> None: async with self.updateLock: mod = self[mod] mod.category = category await self.update(mod) self.setLastUpdateTime(datetime.now(tz=timezone.utc), False) async def setPriority(self, mod: ModelIndexType, priority: int) -> None: async with self.updateLock: mod = self[mod] mod.priority = priority if mod.target == 'mods': setSettingsValue(mod.filename, 'Priority', str(priority) if priority else '', self.configpath.joinpath('mods.settings')) await self.update(mod) self.setLastUpdateTime(datetime.now(tz=timezone.utc), False) def setLastUpdateTime(self, time: datetime, fireUpdateCallbacks: bool = True) -> None: self.lastUpdate = time if fireUpdateCallbacks: self.updateCallbacks.fire(self) def getModPath(self, mod: ModelIndexType, resolve: bool = False) -> Path: if not isinstance(mod, Mod): mod = self[mod] basepath = self.gamepath.joinpath(mod.target).resolve() if not mod.enabled and mod.target == 'mods': target = basepath.joinpath(f'~{mod.filename}') else: target = basepath.joinpath(mod.filename) if resolve and not target.is_dir(): if not mod.enabled and target.parent.joinpath( re.sub(r'^~', r'', target.name)).is_dir(): target = target.parent.joinpath(re.sub(r'^~', r'', target.name)) if not target.is_dir(): raise ModNotFoundError(mod.filename, mod.target) return target def __len__(self) -> int: return len(self._modList) def __getitem__(self, mod: ModelIndexType) -> Mod: if isinstance(mod, int): return list(self._modList.values())[mod] if isinstance(mod, tuple) and len(mod) == 2: if mod not in self._modList: raise ModNotFoundError(tuple(mod)[0], tuple(mod)[1]) return self._modList[mod] if isinstance(mod, Mod): if mod not in self.values(): raise ModNotFoundError(mod.filename, mod.target) return mod raise IndexError(f'invalid index type {type(mod)}') def __iter__(self) -> Iterator[Tuple[str, str]]: yield from self._modList @property def lockfile(self) -> Path: return self._cachePath.joinpath('w3mm.lock') @property def gamepath(self) -> Path: return self._gamePath @property def configpath(self) -> Path: return self._configPath @property def cachepath(self) -> Path: return self._cachePath @property def modspath(self) -> Path: return self._modsPath @property def dlcspath(self) -> Path: return self._dlcsPath
class Model: '''Mod management model''' def __init__(self, ignorelock=False): if not ignorelock: self.lock = InterProcessLock(self.lockfile) if not self.lock.acquire(False): raise IOError('could not lock ' + self.lockfile) self.modList: Dict[str, Mod] = {} self.reload() def reload(self) -> None: self.modList = {} if path.exists(self.xmlfile): tree = XML.parse(self.xmlfile) root = tree.getroot() for xmlmod in root.findall('mod'): mod = self.populateModFromXml(Mod(), xmlmod) self.modList[mod.name] = mod def write(self) -> None: root = XML.ElementTree(XML.Element('installed')) for mod in self.all(): root = self.writeModToXml(mod, root) indent(root.getroot()) root.write(self.xmlfile) def get(self, modname: str) -> Mod: return self.modList[modname] def list(self) -> KeysView[str]: return self.modList.keys() def all(self) -> ValuesView[Mod]: return self.modList.values() def add(self, modname: str, mod: Mod): self.modList[modname] = mod def remove(self, modname: str): if modname in self.modList: del self.modList[modname] def rename(self, modname: str, newname: str) -> bool: if not modname in self.modList: return False mod = self.modList[modname] del self.modList[modname] mod.name = newname self.modList[newname] = mod return True def explore(self, modname: str) -> None: mod = self.modList[modname] for file in mod.files: moddir = data.config.mods + ('/~' if not mod.enabled else '/') + file openFolder(moddir) @property def xmlfile(self) -> str: return data.config.configuration + '/installed.xml' @property def lockfile(self) -> str: return data.config.configuration + '/installed.lock' @staticmethod def populateModFromXml(mod: Mod, root: XML.Element) -> Mod: mod.date = str(root.get('date')) enabled = str(root.get('enabled')) if enabled == 'True': mod.enabled = True else: mod.enabled = False mod.name = str(root.get('name')) prt = str(root.get('priority')) if prt != 'Not Set': mod.priority = prt for elem in root.findall('data'): mod.files.append(str(elem.text)) for elem in root.findall('dlc'): mod.dlcs.append(str(elem.text)) for elem in root.findall('menu'): mod.menus.append(str(elem.text)) for elem in root.findall('xmlkey'): mod.xmlkeys.append(str(elem.text)) for elem in root.findall('hidden'): mod.hidden.append(str(elem.text)) for elem in root.findall('key'): key = Key(elem.get('context'), str(elem.text)) mod.inputsettings.append(key) for elem in root.findall('settings'): # legacy usersetting storage format settings = fetchUserSettings(str(elem.text)) for setting in iter(settings): mod.usersettings.append(setting) for elem in root.findall('setting'): usersetting = Usersetting(str(elem.get('context')), str(elem.text)) mod.usersettings.append(usersetting) mod.checkPriority() return mod @staticmethod def writeModToXml(mod: Mod, root: XML.ElementTree) -> XML.ElementTree: elem = XML.SubElement(root.getroot(), 'mod') elem.set('name', mod.name) elem.set('enabled', str(mod.enabled)) elem.set('date', mod.date) elem.set('priority', mod.priority) if mod.files: for file in mod.files: XML.SubElement(elem, 'data').text = file if mod.dlcs: for dlc in mod.dlcs: XML.SubElement(elem, 'dlc').text = dlc if mod.menus: for menu in mod.menus: XML.SubElement(elem, 'menu').text = menu if mod.xmlkeys: for xml in mod.xmlkeys: XML.SubElement(elem, 'xmlkey').text = xml if mod.hidden: for xml in mod.hidden: XML.SubElement(elem, 'hidden').text = xml if mod.inputsettings: for key in mod.inputsettings: ky = XML.SubElement(elem, 'key') ky.text = str(key) ky.set('context', key.context) if mod.usersettings: for usersetting in mod.usersettings: us = XML.SubElement(elem, 'setting') us.text = str(usersetting) us.set('context', usersetting.context) return root
def main(argv=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (os.path.basename(__file__), __version__, __updated__)) # Actions action_group = parser.add_argument_group( "Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--report", dest="report", type=str, help="Submit the given textual report", metavar="TEXT") actions.add_argument("--report-from-file", dest="report_file", type=str, help="Submit the given file as textual report", metavar="FILE") actions.add_argument("--cycle", dest="cycle", type=str, help="Cycle the pool with the given ID", metavar="ID") # Options parser.add_argument( "--keep-reporting", dest="keep_reporting", default=0, type=int, help="Keep reporting from the specified file with specified interval", metavar="SECONDS") parser.add_argument("--random-offset", dest="random_offset", default=0, type=int, help="Random offset for the reporting interval (+/-)", metavar="SECONDS") # Settings parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") # process options opts = parser.parse_args(argv) if opts.keep_reporting and not opts.report_file: print("Error: --keep-reporting is only valid with --report-from-file", file=sys.stderr) return 2 serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid) report = None if opts.cycle: reporter.cycle(opts.cycle) return 0 elif opts.report_file: if opts.keep_reporting: if opts.random_offset > 0: random.seed(reporter.clientId) lock = InterProcessLock(opts.report_file + ".lock") while True: if os.path.exists(opts.report_file): if not lock.acquire(timeout=opts.keep_reporting): continue try: with open(opts.report_file) as f: report = f.read() try: reporter.report(report) except RuntimeError as e: # Ignore errors if the server is temporarily unavailable print("Failed to contact server: %s" % e, file=sys.stderr) finally: lock.release() random_offset = 0 if opts.random_offset: random_offset = random.randint(-opts.random_offset, opts.random_offset) time.sleep(opts.keep_reporting + random_offset) else: with open(opts.report_file) as f: report = f.read() else: report = opts.report reporter.report(report) return 0
def main(argv=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (os.path.basename(__file__), __version__, __updated__)) # Actions action_group = parser.add_argument_group("Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--report", dest="report", type=str, help="Submit the given textual report", metavar="TEXT") actions.add_argument("--report-from-file", dest="report_file", type=str, help="Submit the given file as textual report", metavar="FILE") actions.add_argument("--cycle", dest="cycle", type=str, help="Cycle the pool with the given ID", metavar="ID") actions.add_argument("--disable", dest="disable", type=str, help="Disable the pool with the given ID", metavar="ID") actions.add_argument("--enable", dest="enable", type=str, help="Enable the pool with the given ID", metavar="ID") # Options parser.add_argument("--keep-reporting", dest="keep_reporting", default=0, type=int, help="Keep reporting from the specified file with specified interval", metavar="SECONDS") parser.add_argument("--random-offset", dest="random_offset", default=0, type=int, help="Random offset for the reporting interval (+/-)", metavar="SECONDS") # Settings parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") # process options opts = parser.parse_args(argv) if opts.keep_reporting and not opts.report_file: print("Error: --keep-reporting is only valid with --report-from-file", file=sys.stderr) return 2 serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid) report = None if opts.cycle: reporter.cycle(opts.cycle) return 0 elif opts.enable: reporter.enable(opts.enable) return 0 elif opts.disable: reporter.disable(opts.disable) return 0 elif opts.report_file: if opts.keep_reporting: if opts.random_offset > 0: random.seed(reporter.clientId) lock = InterProcessLock(opts.report_file + ".lock") while True: if os.path.exists(opts.report_file): if not lock.acquire(timeout=opts.keep_reporting): continue try: with open(opts.report_file) as f: report = f.read() try: reporter.report(report) except RuntimeError as e: # Ignore errors if the server is temporarily unavailable print("Failed to contact server: %s" % e, file=sys.stderr) finally: lock.release() random_offset = 0 if opts.random_offset: random_offset = random.randint(-opts.random_offset, opts.random_offset) time.sleep(opts.keep_reporting + random_offset) else: with open(opts.report_file) as f: report = f.read() else: report = opts.report reporter.report(report) return 0