예제 #1
0
class ManagementLock(object):

    def __init__(self):
        self.lock = None

    def acquire(self):
        self.lock = InterProcessLock(LOCK_PATH)

        # Attempt to obtain a lock, retry every 10 seconds. Wait at most 5 minutes.
        # The retrying is necessary so we can report on stderr that we are waiting
        # for a lock. Otherwise, a user trying to run the command manually might
        # get confused why the command execution is delayed.
        if self.lock.acquire(blocking=False):
            return
        print("Another management command is running, waiting for lock...", file=sys.stderr)
        if self.lock.acquire(delay=10, max_delay=10, timeout=300):
            return

        self.lock = None
        raise RuntimeError("Failed to acquire lock.")

    def release(self):
        if self.lock is not None:
            self.lock.release()

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, _exc_type, _exc_val, _exc_tb):
        self.release()
        return False
예제 #2
0
파일: lock.py 프로젝트: BenoitEchernier/web
def acquire_inter_process_lock(lock_name):
    # Lock preventing simultaneous crawling processes
    lock_name = _compute_lock_name(lock_name)
    lock = InterProcessLock('/tmp/%s' % lock_name)
    acquired_lock = lock.acquire(blocking=False)

    try:
        yield acquired_lock
    finally:
        if acquired_lock:
            lock.release()
예제 #3
0
 def _update_db(self):
     if self._prim_db:
         lock = InterProcessLock("%s.lock" % self._prim_db)
         acquired = lock.acquire(blocking=False)
         if not acquired:
             logger.debug("Waiting 60 seconds for file lock")
             acquired = lock.acquire(blocking=True, timeout=60)
         if acquired:
             try:
                 with open(self._prim_db, "w", encoding="utf-8") as out:
                     out.write(unicode(
                         json.dumps(self._dbs[self._prim_db])))
                 return True
             finally:
                 lock.release()
         else:
             logger.error("Could not update platform database: "
                          "Lock acquire failed after 60 seconds")
             return False
     else:
         logger.error("Can't update platform database: "
                      "destination database is ambiguous")
         return False
예제 #4
0
 def wrapper(self, *args):
     ret = None
     lock = InterProcessLock(self.lock_file)
     acquired = lock.acquire(blocking=False)
     if not acquired:
         self.debug("timed_mbedls_lock", "Waiting %d seconds for mock file lock." % timeout)
         acquired = lock.acquire(blocking=True, timeout=timeout)
     if acquired:
         try:
             ret = wrapper.original(self, *args)
         except Exception as e:
             lock.release()
             raise e
         lock.release()
     else:
         self.err("timed_mbedls_lock", "Failed to acquired mock file lock in %d seconds!" % timeout)
         sys.exit(1)
     return ret
예제 #5
0
    def run(self):
        """
        Run application.
        """
        # output work with
        # configure SQLAlchemy logging
        # log_level = self.logger.getEffectiveLevel()
        # logging.getLogger('sqlalchemy.engine').setLevel(log_level)

        exit_code = ExitCodes.EXIT_SUCCESS

        try:
            pid_lock = InterProcessLock(self.args.path_pidfile)
            pid_lock_gotten = pid_lock.acquire(blocking=False)
            if not pid_lock_gotten:
                raise AlreadyHarvesting(self.args.path_pidfile)
            self.logger.debug('Aquired PID lock {0!r}'.format(
                self.args.path_pidfile))

            if (self.args.no_routes and self.args.no_vnetworks
                    and not self.args.truncate):
                raise NothingToDo()

            harvesting = not (self.args.no_routes and self.args.no_vnetworks)

            Session = db.ScopedSession()
            Session.configure(bind=self.args.db_engine)

            db.configure_db(self.DB_PRAGMAS)

            # TODO(damb): Implement multithreaded harvesting using a thread
            # pool.
            try:
                if harvesting:
                    self.logger.info('Start harvesting.')

                if not self.args.no_routes:
                    self._harvest_routes(Session)
                else:
                    self.logger.warn(
                        'Disabled processing <route></route> information.')

                if not self.args.no_vnetworks:
                    self._harvest_vnetworks(Session)
                else:
                    self.logger.warn(
                        'Disabled processing <vnetwork></vnetwork> '
                        'information.')

                if harvesting:
                    self.logger.info('Finished harvesting successfully.')

                if self.args.truncate:
                    self.logger.warning('Removing outdated data.')
                    session = Session()
                    with db.session_guard(session) as _session:
                        num_removed_rows = db.clean(_session,
                                                    self.args.truncate)
                        self.logger.info('Number of rows removed: {}'.format(
                            num_removed_rows))

            except OperationalError as err:
                raise db.StationLiteDBEngineError(err)

        # TODO(damb): signal handling
        except Error as err:
            self.logger.error(err)
            exit_code = ExitCodes.EXIT_ERROR
        except Exception as err:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.logger.critical('Local Exception: %s' % err)
            self.logger.critical('Traceback information: ' + repr(
                traceback.format_exception(exc_type, exc_value, exc_traceback))
                                 )
            exit_code = ExitCodes.EXIT_ERROR
        finally:
            try:
                if pid_lock_gotten:
                    pid_lock.release()
            except NameError:
                pass

        sys.exit(exit_code)
예제 #6
0
def main(argv=None):
    '''Command line options.'''

    # setup argparser
    parser = argparse.ArgumentParser()

    parser.add_argument('--version', action='version', version='%s v%s (%s)' %
                        (os.path.basename(__file__), __version__, __updated__))

    # Actions
    action_group = parser.add_argument_group("Actions", "A single action must be selected.")
    actions = action_group.add_mutually_exclusive_group(required=True)
    actions.add_argument("--report", dest="report", type=str, help="Submit the given textual report", metavar="TEXT")
    actions.add_argument("--report-from-file", dest="report_file", type=str,
                         help="Submit the given file as textual report", metavar="FILE")
    actions.add_argument("--cycle", dest="cycle", type=str, help="Cycle the pool with the given ID", metavar="ID")
    actions.add_argument("--disable", dest="disable", type=str, help="Disable the pool with the given ID", metavar="ID")
    actions.add_argument("--enable", dest="enable", type=str, help="Enable the pool with the given ID", metavar="ID")

    # Options
    parser.add_argument("--keep-reporting", dest="keep_reporting", default=0, type=int,
                        help="Keep reporting from the specified file with specified interval", metavar="SECONDS")
    parser.add_argument("--random-offset", dest="random_offset", default=0, type=int,
                        help="Random offset for the reporting interval (+/-)", metavar="SECONDS")

    # Settings
    parser.add_argument("--serverhost", dest="serverhost",
                        help="Server hostname for remote signature management", metavar="HOST")
    parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT")
    parser.add_argument("--serverproto", dest="serverproto",
                        help="Server protocol to use (default is https)", metavar="PROTO")
    parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile",
                        help="File containing the server authentication token", metavar="FILE")
    parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID")

    # process options
    opts = parser.parse_args(argv)

    if opts.keep_reporting and not opts.report_file:
        print("Error: --keep-reporting is only valid with --report-from-file", file=sys.stderr)
        return 2

    serverauthtoken = None
    if opts.serverauthtokenfile:
        with open(opts.serverauthtokenfile) as f:
            serverauthtoken = f.read().rstrip()

    reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid)
    report = None

    if opts.cycle:
        reporter.cycle(opts.cycle)
        return 0
    elif opts.enable:
        reporter.enable(opts.enable)
        return 0
    elif opts.disable:
        reporter.disable(opts.disable)
        return 0
    elif opts.report_file:
        if opts.keep_reporting:
            if opts.random_offset > 0:
                random.seed(reporter.clientId)

            lock = InterProcessLock(opts.report_file + ".lock")
            while True:
                if os.path.exists(opts.report_file):
                    if not lock.acquire(timeout=opts.keep_reporting):
                        continue
                    try:
                        with open(opts.report_file) as f:
                            report = f.read()
                        try:
                            reporter.report(report)
                        except RuntimeError as e:
                            # Ignore errors if the server is temporarily unavailable
                            print("Failed to contact server: %s" % e, file=sys.stderr)
                    finally:
                        lock.release()

                random_offset = 0
                if opts.random_offset:
                    random_offset = random.randint(-opts.random_offset, opts.random_offset)
                time.sleep(opts.keep_reporting + random_offset)
        else:
            with open(opts.report_file) as f:
                report = f.read()
    else:
        report = opts.report

    reporter.report(report)
    return 0
예제 #7
0
def _cluster_settings_lock(daemon_name):
    return InterProcessLock(
        os.path.join(
            tempfile.gettempdir(),
            'cluster-{0}-{1}.lock'.format(getpass.getuser(), daemon_name)))
예제 #8
0
 def dataframe(self, modify=True):
     with InterProcessLock(self.storagepath / 'lockfile'):
         df = self._load_dataframe()
         yield df
         if modify:
             self._save_dataframe(df)
예제 #9
0
class Model:
    '''Mod management model'''
    def __init__(self, ignorelock=False):
        if not ignorelock:
            self.lock = InterProcessLock(self.lockfile)
            if not self.lock.acquire(False):
                raise IOError('could not lock ' + self.lockfile)
        self.modList: Dict[str, Mod] = {}
        self.reload()

    def reload(self) -> None:
        self.modList = {}
        if path.exists(self.xmlfile):
            tree = XML.parse(self.xmlfile)
            root = tree.getroot()
            for xmlmod in root.findall('mod'):
                mod = self.populateModFromXml(Mod(), xmlmod)
                self.modList[mod.name] = mod

    def write(self) -> None:
        root = XML.ElementTree(XML.Element('installed'))
        for mod in self.all():
            root = self.writeModToXml(mod, root)
        indent(root.getroot())
        root.write(self.xmlfile)

    def get(self, modname: str) -> Mod:
        return self.modList[modname]

    def list(self) -> KeysView[str]:
        return self.modList.keys()

    def all(self) -> ValuesView[Mod]:
        return self.modList.values()

    def add(self, modname: str, mod: Mod):
        self.modList[modname] = mod

    def remove(self, modname: str):
        if modname in self.modList:
            del self.modList[modname]

    def rename(self, modname: str, newname: str) -> bool:
        if not modname in self.modList:
            return False
        mod = self.modList[modname]
        del self.modList[modname]
        mod.name = newname
        self.modList[newname] = mod
        return True

    def explore(self, modname: str) -> None:
        mod = self.modList[modname]
        for file in mod.files:
            moddir = data.config.mods + ('/~'
                                         if not mod.enabled else '/') + file
            openFolder(moddir)

    @property
    def xmlfile(self) -> str:
        return data.config.configuration + '/installed.xml'

    @property
    def lockfile(self) -> str:
        return data.config.configuration + '/installed.lock'

    @staticmethod
    def populateModFromXml(mod: Mod, root: XML.Element) -> Mod:
        mod.date = str(root.get('date'))
        enabled = str(root.get('enabled'))
        if enabled == 'True':
            mod.enabled = True
        else:
            mod.enabled = False
        mod.name = str(root.get('name'))
        prt = str(root.get('priority'))
        if prt != 'Not Set':
            mod.priority = prt
        for elem in root.findall('data'):
            mod.files.append(str(elem.text))
        for elem in root.findall('dlc'):
            mod.dlcs.append(str(elem.text))
        for elem in root.findall('menu'):
            mod.menus.append(str(elem.text))
        for elem in root.findall('xmlkey'):
            mod.xmlkeys.append(str(elem.text))
        for elem in root.findall('hidden'):
            mod.hidden.append(str(elem.text))
        for elem in root.findall('key'):
            key = Key(elem.get('context'), str(elem.text))
            mod.inputsettings.append(key)
        for elem in root.findall('settings'):
            # legacy usersetting storage format
            settings = fetchUserSettings(str(elem.text))
            for setting in iter(settings):
                mod.usersettings.append(setting)
        for elem in root.findall('setting'):
            usersetting = Usersetting(str(elem.get('context')), str(elem.text))
            mod.usersettings.append(usersetting)

        mod.checkPriority()
        return mod

    @staticmethod
    def writeModToXml(mod: Mod, root: XML.ElementTree) -> XML.ElementTree:
        elem = XML.SubElement(root.getroot(), 'mod')
        elem.set('name', mod.name)
        elem.set('enabled', str(mod.enabled))
        elem.set('date', mod.date)
        elem.set('priority', mod.priority)
        if mod.files:
            for file in mod.files:
                XML.SubElement(elem, 'data').text = file
        if mod.dlcs:
            for dlc in mod.dlcs:
                XML.SubElement(elem, 'dlc').text = dlc
        if mod.menus:
            for menu in mod.menus:
                XML.SubElement(elem, 'menu').text = menu
        if mod.xmlkeys:
            for xml in mod.xmlkeys:
                XML.SubElement(elem, 'xmlkey').text = xml
        if mod.hidden:
            for xml in mod.hidden:
                XML.SubElement(elem, 'hidden').text = xml
        if mod.inputsettings:
            for key in mod.inputsettings:
                ky = XML.SubElement(elem, 'key')
                ky.text = str(key)
                ky.set('context', key.context)
        if mod.usersettings:
            for usersetting in mod.usersettings:
                us = XML.SubElement(elem, 'setting')
                us.text = str(usersetting)
                us.set('context', usersetting.context)
        return root
예제 #10
0
    def run(self):
        """
        Run application.
        """
        # configure SQLAlchemy logging
        # log_level = self.logger.getEffectiveLevel()
        # logging.getLogger('sqlalchemy.engine').setLevel(log_level)

        exit_code = ExitCodes.EXIT_SUCCESS

        self.logger.info(f"{self.PROG}: Version v{__version__}")
        self.logger.debug(f"Configuration: {dict(self.config)!r}")

        try:
            path_pidfile = self.config["path_pidfile"]
            pid_lock = InterProcessLock(path_pidfile)
            pid_lock_gotten = pid_lock.acquire(blocking=False)
            if not pid_lock_gotten:
                raise AlreadyHarvesting(path_pidfile)
            self.logger.debug(
                f"Aquired PID lock {self.config['path_pidfile']!r}"
            )

            if (
                self.config["no_routes"]
                and self.config["no_vnetworks"]
                and not self.config["truncate"]
            ):
                raise NothingToDo()

            harvesting = not (
                self.config["no_routes"] and self.config["no_vnetworks"]
            )

            Session = db.ScopedSession()
            engine = create_engine(
                self.config["sqlalchemy_database_uri"], echo=False
            )
            Session.configure(bind=engine)

            if engine.name == "sqlite":
                db.configure_sqlite(self.DB_PRAGMAS)

            # TODO(damb): Implement multithreaded harvesting using a thread
            # pool.
            try:
                if harvesting:
                    self.logger.info("Start harvesting.")

                if not self.config["no_routes"]:
                    self._harvest_routes(Session)
                else:
                    self.logger.info(
                        "Disabled processing <route></route> information."
                    )

                if not self.config["no_vnetworks"]:
                    self._harvest_vnetworks(Session)
                else:
                    self.logger.info(
                        "Disabled processing <vnetwork></vnetwork> "
                        "information."
                    )

                if harvesting:
                    self.logger.info("Finished harvesting successfully.")

                if self.config["truncate"]:
                    self.logger.warning("Removing outdated data.")
                    session = Session()
                    with db.session_guard(session) as _session:
                        num_removed_rows = db.clean(
                            _session,
                            self.config["truncate"],
                        )
                        self.logger.info(
                            f"Number of rows removed: {num_removed_rows}"
                        )

            except OperationalError as err:
                raise db.StationLiteDBEngineError(err)

        # TODO(damb): signal handling
        except Error as err:
            self.logger.error(err)
            exit_code = ExitCodes.EXIT_ERROR
        except Exception as err:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.logger.critical("Local Exception: %s" % err)
            self.logger.critical(
                "Traceback information: "
                + repr(
                    traceback.format_exception(
                        exc_type, exc_value, exc_traceback
                    )
                )
            )
            exit_code = ExitCodes.EXIT_ERROR
        finally:
            try:
                if pid_lock_gotten:
                    pid_lock.release()
            except NameError:
                pass

        sys.exit(exit_code)
예제 #11
0
 def _list_outputs(self):
     """Execute this module.
     """
     # Initiate outputs
     outputs = self._base_outputs()
     out_files = []
     missing_files = []
     # Get output dir from base ArchiveSink class (will change depending on
     # whether it is per session/subject/visit/project)
     out_path = self._get_output_path()
     out_dir = os.path.abspath(os.path.join(*out_path))
     # Make session dir
     if not os.path.exists(out_dir):
         os.makedirs(out_dir, stat.S_IRWXU | stat.S_IRWXG)
     # Loop through datasets connected to the sink and copy them to archive
     # directory
     for spec in self.datasets:
         assert spec.derived, (
             "Should only be sinking derived datasets, not '{}'".format(
                 spec.name))
         filename = getattr(self.inputs, spec.name + PATH_SUFFIX)
         ext = spec.format.extension
         if not isdefined(filename):
             missing_files.append(spec.name)
             continue  # skip the upload for this file
         if lower(split_extension(filename)[1]) != lower(ext):
             raise NiAnalysisError(
                 "Mismatching extension '{}' for format '{}' ('{}')".format(
                     split_extension(filename)[1], spec.format, ext))
         assert spec.frequency == self.frequency
         # Copy to local system
         src_path = os.path.abspath(filename)
         out_fname = spec.fname()
         dst_path = os.path.join(out_dir, out_fname)
         out_files.append(dst_path)
         if os.path.isfile(src_path):
             shutil.copyfile(src_path, dst_path)
         elif os.path.isdir(src_path):
             shutil.copytree(src_path, dst_path)
         else:
             assert False
     if missing_files:
         # FIXME: Not sure if this should be an exception or not,
         #        indicates a problem but stopping now would throw
         #        away the datasets that were created
         logger.warning("Missing input datasets '{}' in LocalSink".format(
             "', '".join(missing_files)))
     # Return cache file paths
     outputs['out_files'] = out_files
     # Loop through fields connected to the sink and save them in the
     # fields JSON file
     out_fields = []
     fpath = self.fields_path(self.frequency)
     # Open fields JSON, locking to prevent other processes
     # reading or writing
     if self.fields:
         with InterProcessLock(fpath + LOCK, logger=logger):
             try:
                 with open(fpath, 'rb') as f:
                     fields = json.load(f)
             except IOError as e:
                 if e.errno == errno.ENOENT:
                     fields = {}
                 else:
                     raise
             # Update fields JSON and write back to file.
             for spec in self.fields:
                 value = getattr(self.inputs, spec.name + FIELD_SUFFIX)
                 qual_name = self.prefix_study_name(spec.name)
                 if spec.dtype is str:
                     assert isinstance(value, basestring)
                 else:
                     assert isinstance(value, spec.dtype)
                 fields[qual_name] = value
                 out_fields.append((qual_name, value))
             with open(fpath, 'wb') as f:
                 json.dump(fields, f)
     outputs['out_fields'] = out_fields
     return outputs
예제 #12
0
def lock_if_check_fails(check,
                        lock_path,
                        operation=None,
                        blocking=True,
                        _return_acquired=False,
                        **kwargs):
    """A context manager to establish a lock conditionally on result of a check

    It is intended to be used as a lock for a specific file and/or operation,
    e.g. for `annex get`ing a file or extracting an archive, so only one process
    would be performing such an operation.

    If verification of the check fails, it tries to acquire the lock, but if
    that fails on the first try, it will rerun check before proceeding to func

    checker and lock_path_prefix could be a value, or callable, or
    a tuple composing callable and its args

    Unfortunately yoh did not find any way in Python 2 to have a context manager
    which just skips the entire block if some condition is met (in Python3 there
    is ExitStack which could potentially be used).  So we would need still to
    check in the block body if the context manager return value is not None.

    Note also that the used type of the lock (fasteners.InterprocessLock) works
    only across processes and would not lock within the same (threads) process.

    Parameters
    ----------
    check: callable or (callable, args) or value
      If value (possibly after calling a callable) evaluates to True, no
      lock is acquired, and no context is executed
    lock_path: callable or (callable, args) or value
      Provides a path for the lock file, composed from that path + '.lck'
      extension
    operation: str, optional
      If provided, would be part of the locking extension
    blocking: bool, optional
      If blocking, process would be blocked until acquired and verified that it
      was acquired after it gets the lock
    _return_acquired: bool, optional
      Return also if lock was acquired.  For "private" use within DataLad (tests),
      do not rely on it in 3rd party solutions.
    **kwargs
      Passed to `.acquire` of the fasteners.InterProcessLock

    Returns
    -------
    result of check, lock[, acquired]
    """
    check1 = _get(check)
    if check1:  # we are done - nothing to do
        yield check1, None
        return
    # acquire blocking lock
    lock_filename = _get(lock_path)

    lock_filename += '.'
    if operation:
        lock_filename += operation + '-'
    lock_filename += 'lck'

    lock = InterProcessLock(lock_filename)
    acquired = False
    try:
        lgr.debug("Acquiring a lock %s", lock_filename)
        acquired = lock.acquire(blocking=blocking, **kwargs)
        lgr.debug("Acquired? lock %s: %s", lock_filename, acquired)
        if blocking:
            assert acquired
        check2 = _get(check)
        ret_lock = None if check2 else lock
        if _return_acquired:
            yield check2, ret_lock, acquired
        else:
            yield check2, ret_lock
    finally:
        if acquired:
            lgr.debug("Releasing lock %s", lock_filename)
            lock.release()
            if exists(lock_filename):
                unlink(lock_filename)
예제 #13
0
class Lock:
    """A inter-process and inter-thread lock

    This internally uses :class:`fasteners.InterProcessLock` but provides non-blocking
    acquire. It also guarantees thread-safety when using the :meth:`singleton` class
    method to create / retrieve a lock instance.

    :param path: Path of the lock file to use / create.
    """

    _instances: Dict[str, "Lock"] = {}
    _singleton_lock = threading.Lock()

    @classmethod
    def singleton(cls, path: str) -> "Lock":
        """
        Retrieve an existing lock object with a given 'name' or create a new one. Use
        this method for thread-safe locks.

        :param path: Path of the lock file to use / create.
        """

        with cls._singleton_lock:
            try:
                instance = cls._instances[path]
            except KeyError:
                instance = cls(path)
                cls._instances[path] = instance

            return instance

    def __init__(self, path: str) -> None:

        self.path = path

        self._internal_lock = threading.Semaphore()
        self._external_lock = InterProcessLock(self.path)

        self._lock = threading.RLock()

    def acquire(self) -> bool:
        """
        Attempts to acquire the given lock.

        :returns: Whether or not the acquisition succeeded.
        """

        with self._lock:
            locked_internal = self._internal_lock.acquire(blocking=False)

            if not locked_internal:
                return False

            try:
                locked_external = self._external_lock.acquire(blocking=False)
            except Exception:
                self._internal_lock.release()
                raise
            else:

                if locked_external:
                    return True
                else:
                    self._internal_lock.release()
                    return False

    def release(self) -> None:
        """Release the previously acquired lock."""
        with self._lock:
            self._external_lock.release()
            self._internal_lock.release()

    def locked(self) -> bool:
        """
        Checks if the lock is currently held by any thread or process.

        :returns: Whether the lock is acquired.
        """
        with self._lock:
            gotten = self.acquire()
            if gotten:
                self.release()
            return not gotten

    def locking_pid(self) -> Optional[int]:
        """
        Returns the PID of the process which currently holds the lock or ``None``. This
        should work on macOS, OpenBSD and Linux but may fail on some platforms. Always
        use :meth:`locked` to check if the lock is held by any process.

        :returns: The PID of the process which currently holds the lock or ``None``.
        """

        with self._lock:

            if self._external_lock.acquired:
                return os.getpid()

            try:
                # Don't close again in case we are the locking process.
                self._external_lock._do_open()
                lockdata, fmt, pid_index = _get_lockdata()
                lockdata = fcntl.fcntl(self._external_lock.lockfile,
                                       fcntl.F_GETLK, lockdata)

                lockdata_list = struct.unpack(fmt, lockdata)
                pid = lockdata_list[pid_index]

                if pid > 0:
                    return pid

            except OSError:
                pass

            return None
예제 #14
0
    user32.SetWinEventHook.restype = ctypes.wintypes.HANDLE
    hook = user32.SetWinEventHook(
        EVENT_SYSTEM_FOREGROUND,
        EVENT_SYSTEM_FOREGROUND,
        0,
        WinEventProc,
        0,
        0,
        WINEVENT_OUTOFCONTEXT
    )
    if hook == 0:
        exit(1)

    msg = ctypes.wintypes.MSG()
    while user32.GetMessageW(ctypes.byref(msg), 0, 0, 0) != 0:
        user32.TranslateMessageW(msg)
        user32.DispatchMessageW(msg)

    user32.UnhookWinEvent(hook)
    ole32.CoUninitialize()

logger.info('Acquiring lock')
lock = InterProcessLock(gettempdir() + '/bdbc_lock_file')
gotten = lock.acquire(timeout=10)

if gotten:
    logger.info('Lock acquired')
    foreground_window_hook()
else:
    logger.info('Lock failed')
예제 #15
0
class ProjectManager(collections.Iterable):
    _basic_directories = (
        "backups",
        "intermediate",
        "lci",
        "processed",
    )
    _is_temp_dir = False
    read_only = False

    def __init__(self):
        self._base_data_dir, self._base_logs_dir = self._get_base_directories()
        self._create_base_directories()
        self.db = SubstitutableDatabase(
            os.path.join(self._base_data_dir, "projects.db"), [ProjectDataset])
        self.set_current("default", update=False)

    def __iter__(self):
        for project_ds in ProjectDataset.select():
            yield project_ds

    def __contains__(self, name):
        return ProjectDataset.select().where(
            ProjectDataset.name == name).count() > 0

    def __len__(self):
        return ProjectDataset.select().count()

    def __repr__(self):
        if len(self) > 20:
            return ("Brightway2 projects manager with {} objects, including:"
                    "{}\nUse `sorted(projects)` to get full list, "
                    "`projects.report()` to get\n\ta report on all projects."
                    ).format(
                        len(self), "".join([
                            "\n\t{}".format(x)
                            for x in sorted([x.name for x in self])[:10]
                        ]))
        else:
            return (
                "Brightway2 projects manager with {} objects:{}"
                "\nUse `projects.report()` to get a report on all projects."
            ).format(
                len(self), "".join([
                    "\n\t{}".format(x) for x in sorted([x.name for x in self])
                ]))

    ### Internal functions for managing projects

    def _get_base_directories(self):
        eight.wrap_os_environ_io()
        envvar = os.getenv("BRIGHTWAY2_DIR")
        if envvar:
            if not os.path.isdir(envvar):
                raise OSError(("BRIGHTWAY2_DIR variable is {}, but this is not"
                               " a valid directory").format(envvar))
            else:
                print("Using environment variable BRIGHTWAY2_DIR for data "
                      "directory:\n{}".format(envvar))
                envvar = os.path.abspath(envvar)
                logs_dir = os.path.join(envvar, "logs")
                create_dir(logs_dir)
                return envvar, logs_dir

        LABEL = "Brightway2" if sys.version_info < (3, 0) else "Brightway3"
        data_dir = appdirs.user_data_dir(LABEL, "pylca")
        logs_dir = appdirs.user_log_dir(LABEL, "pylca")
        return data_dir, logs_dir

    def _create_base_directories(self):
        create_dir(self._base_data_dir)
        create_dir(self._base_logs_dir)

    @property
    def current(self):
        return self._project_name

    def set_current(self, name, writable=True, update=True):
        if not self.read_only and lockable() and hasattr(self, "_lock"):
            try:
                self._lock.release()
            except (RuntimeError, ThreadError):
                pass
        self._project_name = str(name)

        # Need to allow writes when creating a new project
        # for new metadata stores
        self.read_only = False
        self.create_project(name)
        self._reset_meta()
        self._reset_sqlite3_databases()

        if not lockable():
            pass
        elif writable:
            self._lock = InterProcessLock(os.path.join(self.dir, "write-lock"))
            self.read_only = not self._lock.acquire(timeout=0.05)
            if self.read_only:
                warnings.warn(READ_ONLY_PROJECT)
        else:
            self.read_only = True

        if not self.read_only and update:
            self._do_automatic_updates()

    def _do_automatic_updates(self):
        """Run any available automatic updates"""
        from .updates import Updates
        for update_name in Updates.check_automatic_updates():
            print("Applying automatic update: {}".format(update_name))
            Updates.do_update(update_name)

    def _reset_meta(self):
        for obj in config.metadata:
            obj.__init__()

    def _reset_sqlite3_databases(self):
        for relative_path, substitutable_db in config.sqlite3_databases:
            substitutable_db.change_path(os.path.join(self.dir, relative_path))

    ### Public API
    @property
    def dir(self):
        return os.path.join(self._base_data_dir, safe_filename(self.current))

    @property
    def logs_dir(self):
        return os.path.join(self._base_logs_dir, safe_filename(self.current))

    @property
    def output_dir(self):
        """Get directory for output files.

        Uses environment variable ``BRIGHTWAY2_OUTPUT_DIR``; ``preferences['output_dir']``; or directory ``output`` in current project.

        Returns output directory path.

        """
        eight.wrap_os_environ_io()
        ep, pp = os.getenv('BRIGHTWAY2_OUTPUT_DIR'), config.p.get('output_dir')
        if ep and os.path.isdir(ep):
            return ep
        elif pp and os.path.isdir(pp):
            return pp
        else:
            return self.request_directory('output')

    def create_project(self, name=None, **kwargs):
        name = name or self.current
        if not ProjectDataset.select().where(
                ProjectDataset.name == name).count():
            ProjectDataset.create(data=kwargs, name=name)
        create_dir(self.dir)
        for dir_name in self._basic_directories:
            create_dir(os.path.join(self.dir, dir_name))
        create_dir(self.logs_dir)

    def copy_project(self, new_name, switch=True):
        """Copy current project to a new project named ``new_name``. If ``switch``, switch to new project."""
        if new_name in self:
            raise ValueError("Project {} already exists".format(new_name))
        fp = os.path.join(self._base_data_dir, safe_filename(new_name))
        if os.path.exists(fp):
            raise ValueError("Project directory already exists")
        project_data = ProjectDataset.select(
            ProjectDataset.name == self.current).get().data
        ProjectDataset.create(data=project_data, name=new_name)
        shutil.copytree(self.dir, fp, ignore=lambda x, y: ["write-lock"])
        create_dir(os.path.join(self._base_logs_dir, safe_filename(new_name)))
        if switch:
            self.set_current(new_name)

    def request_directory(self, name):
        """Return the absolute path to the subdirectory ``dirname``, creating it if necessary.

        Returns ``False`` if directory can't be created."""
        fp = os.path.join(self.dir, str(name))
        create_dir(fp)
        if not os.path.isdir(fp):
            return False
        return fp

    def _use_temp_directory(self):
        """Point the ProjectManager towards a temporary directory instead of `user_data_dir`.

        Used exclusively for tests."""
        if not self._is_temp_dir:
            self._orig_base_data_dir = self._base_data_dir
            self._orig_base_logs_dir = self._base_logs_dir
        temp_dir = tempfile.mkdtemp()
        self._base_data_dir = os.path.join(temp_dir, "data")
        self._base_logs_dir = os.path.join(temp_dir, "logs")
        self.db.change_path(':memory:')
        self.set_current("default", update=False)
        self._is_temp_dir = True
        return temp_dir

    def _restore_orig_directory(self):
        """Point the ProjectManager back to original directories.

        Used exclusively in tests."""
        if not self._is_temp_dir:
            return
        self._base_data_dir = self._orig_base_data_dir
        del self._orig_base_data_dir
        self._base_logs_dir = self._orig_base_logs_dir
        del self._orig_base_logs_dir
        self.db.change_path(os.path.join(self._base_data_dir, "projects.db"))
        self.set_current("default", update=False)
        self._is_temp_dir = False

    def delete_project(self, name=None, delete_dir=False):
        """Delete project ``name``, or the current project.

        ``name`` is the project to delete. If ``name`` is not provided, delete the current project.

        By default, the underlying project directory is not deleted; only the project name is removed from the list of active projects. If ``delete_dir`` is ``True``, then also delete the project directory.

        If deleting the current project, this function sets the current directory to ``default`` if it exists, or to a random project.

        Returns the current project."""
        victim = name or self.current
        if victim not in self:
            raise ValueError("{} is not a project".format(victim))
        if len(self) == 1:
            raise ValueError("Can't delete only remaining project")
        ProjectDataset.delete().where(ProjectDataset.name == victim).execute()

        if delete_dir:
            dir_path = os.path.join(self._base_data_dir, safe_filename(victim))
            assert os.path.isdir(dir_path), "Can't find project directory"
            shutil.rmtree(dir_path)

        if name is None or name == self.current:
            if "default" in self:
                self.set_current("default")
            else:
                self.set_current(next(iter(self)).name)
        return self.current

    def purge_deleted_directories(self):
        """Delete project directories for projects which are no longer registered.

        Returns number of directories deleted."""
        registered = {safe_filename(obj.name) for obj in self}
        bad_directories = [
            os.path.join(self._base_data_dir, dirname)
            for dirname in os.listdir(self._base_data_dir)
            if os.path.isdir(os.path.join(self._base_data_dir, dirname))
            and dirname not in registered
        ]

        for fp in bad_directories:
            shutil.rmtree(fp)

        return len(bad_directories)

    def report(self):
        """Give a report on current projects, including installed databases and file sizes.

        Returns tuples of ``(project name, number of databases, size of all databases (GB))``."""
        from . import databases
        _current = self.current
        data = []

        def get_dir_size(dirpath):
            """Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does.

            Does not follow symbolic links"""
            return sum(
                sum(
                    os.path.getsize(os.path.join(root, name))
                    for name in files)
                for root, dirs, files in os.walk(dirpath))

        names = sorted([x.name for x in self])
        for obj in names:
            self.set_current(obj, update=False, writable=False)
            data.append(
                (obj, len(databases), get_dir_size(projects.dir) / 1e9))
        self.set_current(_current)
        return data
예제 #16
0
class ProjectManager(Iterable):
    _basic_directories = (
        "backups",
        "intermediate",
        "lci",
        "processed",
    )
    _is_temp_dir = False
    read_only = False

    def __init__(self):
        self._base_data_dir, self._base_logs_dir = self._get_base_directories()
        self._create_base_directories()
        self.db = SubstitutableDatabase(self._base_data_dir / "projects.db",
                                        [ProjectDataset])

        columns = {
            o.name
            for o in self.db._database.get_columns("projectdataset")
        }
        if "full_hash" not in columns:
            src_filepath = self._base_data_dir / "projects.db"
            backup_filepath = self._base_data_dir / "projects.backup.db"
            shutil.copy(src_filepath, backup_filepath)

            MIGRATION_WARNING = """Adding a column to the projects database. A backup copy of this database '{}' was made at '{}'; if you have problems, file an issue, and restore the backup data to use the stable version of Brightway2."""

            print(MIGRATION_WARNING.format(src_filepath, backup_filepath))

            ADD_FULL_HASH_COLUMN = """ALTER TABLE projectdataset ADD COLUMN "full_hash" integer default 1"""
            self.db.execute_sql(ADD_FULL_HASH_COLUMN)

            # We don't do this, as the column added doesn't have a default
            # value, meaning that one would get error from using the
            # development branch alongside the stable branch.

            # from playhouse.migrate import SqliteMigrator, migrate
            # migrator = SqliteMigrator(self.db._database)
            # full_hash = BooleanField(default=True)
            # migrate(migrator.add_column("projectdataset", "full_hash", full_hash),)
        self.set_current("default", update=False)

    def __iter__(self):
        for project_ds in ProjectDataset.select():
            yield project_ds

    def __contains__(self, name):
        return ProjectDataset.select().where(
            ProjectDataset.name == name).count() > 0

    def __len__(self):
        return ProjectDataset.select().count()

    def __repr__(self):
        if len(self) > 20:
            return ("Brightway2 projects manager with {} objects, including:"
                    "{}\nUse `sorted(projects)` to get full list, "
                    "`projects.report()` to get\n\ta report on all projects."
                    ).format(
                        len(self),
                        "".join([
                            "\n\t{}".format(x)
                            for x in sorted([x.name for x in self])[:10]
                        ]),
                    )
        else:
            return (
                "Brightway2 projects manager with {} objects:{}"
                "\nUse `projects.report()` to get a report on all projects."
            ).format(
                len(self),
                "".join([
                    "\n\t{}".format(x) for x in sorted([x.name for x in self])
                ]),
            )

    ### Internal functions for managing projects

    def _get_base_directories(self):
        envvar = maybe_path(os.getenv("BRIGHTWAY2_DIR"))
        if envvar:
            if not envvar.is_dir():
                raise OSError(("BRIGHTWAY2_DIR variable is {}, but this is not"
                               " a valid directory").format(envvar))
            else:
                print("Using environment variable BRIGHTWAY2_DIR for data "
                      "directory:\n{}".format(envvar))
                envvar = envvar.absolute()
                logs_dir = envvar / "logs"
                create_dir(logs_dir)
                return envvar, logs_dir

        LABEL = "Brightway3"
        data_dir = Path(appdirs.user_data_dir(LABEL, "pylca"))
        logs_dir = Path(appdirs.user_log_dir(LABEL, "pylca"))
        return data_dir, logs_dir

    def _create_base_directories(self):
        create_dir(self._base_data_dir)
        create_dir(self._base_logs_dir)

    @property
    def current(self):
        return self._project_name

    @property
    def twofive(self):
        return bool(self.dataset.data.get("25"))

    def set_current(self, name, writable=True, update=True):
        if not self.read_only and lockable() and hasattr(self, "_lock"):
            try:
                self._lock.release()
            except (RuntimeError, ThreadError):
                pass
        self._project_name = str(name)

        # Need to allow writes when creating a new project
        # for new metadata stores
        self.read_only = False
        self.create_project(name)
        self.dataset = ProjectDataset.get(
            ProjectDataset.name == self._project_name)
        self._reset_meta()
        self._reset_sqlite3_databases()

        self.dataset = ProjectDataset.get(name=name)

        if not lockable():
            pass
        elif writable:
            self._lock = InterProcessLock(self.dir / "write-lock")
            self.read_only = not self._lock.acquire(timeout=0.05)
            if self.read_only:
                warnings.warn(READ_ONLY_PROJECT)
        else:
            self.read_only = True

        if not self.read_only and update:
            self._do_automatic_updates()

    def _do_automatic_updates(self):
        """Run any available automatic updates"""
        from .updates import Updates

        for update_name in Updates.check_automatic_updates():
            print("Applying automatic update: {}".format(update_name))
            Updates.do_update(update_name)

    def _reset_meta(self):
        for obj in config.metadata:
            obj.__init__()

    def _reset_sqlite3_databases(self):
        for relative_path, substitutable_db in config.sqlite3_databases:
            substitutable_db.change_path(self.dir / relative_path)

    ### Public API
    @property
    def dir(self):
        return Path(self._base_data_dir) / safe_filename(
            self.current, full=self.dataset.full_hash)

    @property
    def logs_dir(self):
        return Path(self._base_logs_dir) / safe_filename(
            self.current, full=self.dataset.full_hash)

    @property
    def output_dir(self):
        """Get directory for output files.

        Uses environment variable ``BRIGHTWAY2_OUTPUT_DIR``; ``preferences['output_dir']``; or directory ``output`` in current project.

        Returns output directory path.

        """
        ep, pp = (
            maybe_path(os.getenv("BRIGHTWAY2_OUTPUT_DIR")),
            maybe_path(config.p.get("output_dir")),
        )
        if ep and ep.is_dir():
            return ep
        elif pp and pp.is_dir():
            return pp
        else:
            return self.request_directory("output")

    def create_project(self, name=None, **kwargs):
        name = name or self.current

        kwargs["25"] = True
        full_hash = kwargs.pop("full_hash", False)
        try:
            self.dataset = ProjectDataset.get(ProjectDataset.name == name)
        except DoesNotExist:
            self.dataset = ProjectDataset.create(data=kwargs,
                                                 name=name,
                                                 full_hash=full_hash)
        create_dir(self.dir)
        for dir_name in self._basic_directories:
            create_dir(self.dir / dir_name)
        create_dir(self.logs_dir)

    def copy_project(self, new_name, switch=True):
        """Copy current project to a new project named ``new_name``. If ``switch``, switch to new project."""
        if new_name in self:
            raise ValueError("Project {} already exists".format(new_name))
        fp = self._base_data_dir / safe_filename(new_name,
                                                 full=self.dataset.full_hash)
        if fp.exists():
            raise ValueError("Project directory already exists")
        project_data = ProjectDataset.get(
            ProjectDataset.name == self.current).data
        ProjectDataset.create(data=project_data,
                              name=new_name,
                              full_hash=self.dataset.full_hash)
        shutil.copytree(self.dir, fp, ignore=lambda x, y: ["write-lock"])
        create_dir(self._base_logs_dir / safe_filename(new_name))
        if switch:
            self.set_current(new_name)

    def request_directory(self, name):
        """Return the absolute path to the subdirectory ``dirname``, creating it if necessary.

        Returns ``False`` if directory can't be created."""
        fp = self.dir / str(name)
        create_dir(fp)
        if not fp.is_dir():
            return False
        return fp

    def _use_temp_directory(self):
        """Point the ProjectManager towards a temporary directory instead of `user_data_dir`.

        Used exclusively for tests."""
        if not self._is_temp_dir:
            self._orig_base_data_dir = self._base_data_dir
            self._orig_base_logs_dir = self._base_logs_dir
        temp_dir = Path(tempfile.mkdtemp())
        self._base_data_dir = temp_dir / "data"
        self._base_logs_dir = temp_dir / "logs"
        self.db.change_path(":memory:")
        self.set_current("default", update=False)
        self._is_temp_dir = True
        return temp_dir

    def _restore_orig_directory(self):
        """Point the ProjectManager back to original directories.

        Used exclusively in tests."""
        if not self._is_temp_dir:
            return
        self._base_data_dir = self._orig_base_data_dir
        del self._orig_base_data_dir
        self._base_logs_dir = self._orig_base_logs_dir
        del self._orig_base_logs_dir
        self.db.change_path(self._base_data_dir / "projects.db")
        self.set_current("default", update=False)
        self._is_temp_dir = False

    def migrate_project_25(self):
        """Migrate project to Brightway 2.5.

        Reprocesses all databases and LCIA objects."""
        assert not self.twofive, "Project is already 2.5 compatible"

        from .updates import Updates
        Updates()._reprocess_all()

        self.dataset.data["25"] = True
        self.dataset.save()

    def delete_project(self, name=None, delete_dir=False):
        """Delete project ``name``, or the current project.

        ``name`` is the project to delete. If ``name`` is not provided, delete the current project.

        By default, the underlying project directory is not deleted; only the project name is removed from the list of active projects. If ``delete_dir`` is ``True``, then also delete the project directory.

        If deleting the current project, this function sets the current directory to ``default`` if it exists, or to a random project.

        Returns the current project."""
        victim = name or self.current
        if victim not in self:
            raise ValueError("{} is not a project".format(victim))

        if len(self) == 1:
            raise ValueError("Can't delete only remaining project")

        ProjectDataset.delete().where(ProjectDataset.name == victim).execute()

        if delete_dir:
            dir_path = self._base_data_dir / safe_filename(victim)
            assert dir_path.is_dir(), "Can't find project directory"
            shutil.rmtree(dir_path)

        if name is None or name == self.current:
            if "default" in self:
                self.set_current("default")
            else:
                self.set_current(next(iter(self)).name)
        return self.current

    def purge_deleted_directories(self):
        """Delete project directories for projects which are no longer registered.

        Returns number of directories deleted."""
        registered = {safe_filename(obj.name) for obj in self}
        bad_directories = [
            self._base_data_dir / dirname
            for dirname in os.listdir(self._base_data_dir)
            if (self._base_data_dir /
                dirname).is_dir() and dirname not in registered
        ]

        for fp in bad_directories:
            shutil.rmtree(fp)

        return len(bad_directories)

    def report(self):
        """Give a report on current projects, including installed databases and file sizes.

        Returns tuples of ``(project name, number of databases, size of all databases (GB))``."""
        from . import databases

        _current = self.current
        data = []

        def get_dir_size(dirpath):
            """Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does.

            Does not follow symbolic links"""
            return sum(
                sum(os.path.getsize(root / name) for name in files)
                for root, dirs, files in os.walk(dirpath))

        names = sorted([x.name for x in self])
        for obj in names:
            self.set_current(obj, update=False, writable=False)
            data.append(
                (obj, len(databases), get_dir_size(projects.dir) / 1e9))
        self.set_current(_current)
        return data

    def use_short_hash(self):
        if not self.dataset.full_hash:
            return
        try:
            old_dir, old_logs_dir = self.dir, self.logs_dir
            self.dataset.full_hash = False
            if self.dir.exists():
                raise OSError("Target directory {} already exists".format(
                    self.dir))
            if self.logs_dir.exists():
                raise OSError("Target directory {} already exists".format(
                    self.logs_dir))
            old_dir.rename(self.dir)
            old_logs_dir.rename(self.logs_dir)
            self.dataset.save()
        except Exception as ex:
            self.dataset.full_hash = True
            raise ex

    def use_full_hash(self):
        if self.dataset.full_hash:
            return
        try:
            old_dir, old_logs_dir = self.dir, self.logs_dir
            self.dataset.full_hash = True
            if self.dir.exists():
                raise OSError("Target directory {} already exists".format(
                    self.dir))
            if self.logs_dir.exists():
                raise OSError("Target directory {} already exists".format(
                    self.logs_dir))
            old_dir.rename(self.dir)
            old_logs_dir.rename(self.logs_dir)
            self.dataset.save()
        except Exception as ex:
            self.dataset.full_hash = False
            raise ex
예제 #17
0
    if not mailpile_user or mailpile_user == 'root':
        usage(2, "Please specify a (non-root) user to launch Mailpile.")
    if not re.match(r'^[a-zA-Z0-9\._-]+$', mailpile_user):
        usage(2, "That is a strange looking username.")
    mailpile_user = pwd.getpwnam(mailpile_user)
    if not mailpile_user:
        usage(2, "Please specify a (non-root) user to launch Mailpile.")
    if not re.match(r'^[a-zA-Z0-9\.:/]+$', url):
        usage(2, "That is a strange looking URL.")

    mailpile_home = MAILPILE_HOME_PATH % mailpile_user.pw_dir
    if not os.path.exists(mailpile_home):
        usage(3, "That user has never run Mailpile. Aborting.")

    mp_lockfile = os.path.join(mailpile_home, MAILPILE_WORK_LOCK)
    mp_lock = InterProcessLock(mp_lockfile)
    if not mp_lock.acquire(blocking=False):
        # We are happy with this result, don't raise an error.
        sys.stderr.write(
            "Mailpile is already running for that user. Doing Nothing.\n")
    else:
        # We will release the lock on exec(), but make sure the user owns
        # the lockfile and will be able to take over.
        os.chown(mp_lockfile, mailpile_user.pw_uid, mailpile_user.pw_gid)
        os.execv('/bin/su',
            ['/bin/su', '-', mailpile_user.pw_name, '-c', (
                'screen -S mailpile -d -m '
                'mailpile --idlequit=%d --pid=%s/%s.pid --www=%s --interact'
                ) % (idlequit, MAILPILE_PIDS_PATH, mailpile_user.pw_name, url)])
예제 #18
0
class Lock:
    """
    A inter-process and inter-thread lock. This reuses uses code from oslo.concurrency
    but provides non-blocking acquire. Use the :meth:`singleton` class method to
    retrieve an existing instance for thread-safe usage.
    """

    _instances: Dict[str, "Lock"] = dict()
    _singleton_lock = threading.Lock()

    @classmethod
    def singleton(cls, name: str, lock_path: Optional[str] = None) -> "Lock":
        """
        Retrieve an existing lock object with a given 'name' or create a new one. Use
        this method for thread-safe locks.

        :param name: Name of lock file.
        :param lock_path: Directory for lock files. Defaults to the temporary directory
            returned by :func:`tempfile.gettempdir()` if not given.
        """

        with cls._singleton_lock:
            try:
                instance = cls._instances[name]
            except KeyError:
                instance = cls(name, lock_path)
                cls._instances[name] = instance

            return instance

    def __init__(self, name: str, lock_path: Optional[str] = None) -> None:

        self.name = name
        dirname = lock_path or tempfile.gettempdir()
        lock_path = os.path.join(dirname, name)

        self._internal_lock = threading.Semaphore()
        self._external_lock = InterProcessLock(lock_path)

        self._lock = threading.RLock()

    def acquire(self) -> bool:
        """
        Attempts to acquire the given lock.

        :returns: Whether or not the acquisition succeeded.
        """

        with self._lock:
            locked_internal = self._internal_lock.acquire(blocking=False)

            if not locked_internal:
                return False

            try:
                locked_external = self._external_lock.acquire(blocking=False)
            except Exception:
                self._internal_lock.release()
                raise
            else:

                if locked_external:
                    return True
                else:
                    self._internal_lock.release()
                    return False

    def release(self) -> None:
        """Release the previously acquired lock."""
        with self._lock:
            self._external_lock.release()
            self._internal_lock.release()

    def locked(self) -> bool:
        """Checks if the lock is currently held by any thread or process."""
        with self._lock:
            gotten = self.acquire()
            if gotten:
                self.release()
            return not gotten

    def locking_pid(self) -> Optional[int]:
        """
        Returns the PID of the process which currently holds the lock or ``None``. This
        should work on macOS, OpenBSD and Linux but may fail on some platforms. Always
        use :meth:`locked` to check if the lock is held by any process.

        :returns: The PID of the process which currently holds the lock or ``None``.
        """

        with self._lock:

            if self._external_lock.acquired:
                return os.getpid()

            try:
                # don't close again in case we are the locking process
                self._external_lock._do_open()
                lockdata, fmt, pid_index = _get_lockdata()
                lockdata = fcntl.fcntl(self._external_lock.lockfile,
                                       fcntl.F_GETLK, lockdata)

                lockdata_list = struct.unpack(fmt, lockdata)
                pid = lockdata_list[pid_index]

                if pid > 0:
                    return pid

            except OSError:
                pass

            return None
예제 #19
0
from apscheduler.schedulers import SchedulerNotRunningError  # type: ignore
from apscheduler.triggers.date import DateTrigger  # type: ignore
from fasteners import InterProcessLock  # type: ignore

from deck_chores import __version__
from deck_chores.config import cfg, generate_config
from deck_chores.exceptions import ConfigurationError
from deck_chores.indexes import locking_container_to_services_map
from deck_chores import jobs
import deck_chores.parsers as parse
from deck_chores.utils import from_json, generate_id, log, log_handler

####

lock = InterProcessLock('/tmp/deck-chores.lock')


def there_is_another_deck_chores_container() -> bool:
    matched_containers = 0
    for container in cfg.client.containers.list():
        if container.image.labels.get('org.label-schema.name',
                                      '') == 'deck-chores':
            matched_containers += 1
        if matched_containers > 1:
            return True
    return False


####
예제 #20
0
def main(argv=None):
    '''Command line options.'''

    # setup argparser
    parser = argparse.ArgumentParser()

    parser.add_argument('--version',
                        action='version',
                        version='%s v%s (%s)' %
                        (os.path.basename(__file__), __version__, __updated__))

    # Actions
    action_group = parser.add_argument_group(
        "Actions", "A single action must be selected.")
    actions = action_group.add_mutually_exclusive_group(required=True)
    actions.add_argument("--report",
                         dest="report",
                         type=str,
                         help="Submit the given textual report",
                         metavar="TEXT")
    actions.add_argument("--report-from-file",
                         dest="report_file",
                         type=str,
                         help="Submit the given file as textual report",
                         metavar="FILE")
    actions.add_argument("--cycle",
                         dest="cycle",
                         type=str,
                         help="Cycle the pool with the given ID",
                         metavar="ID")
    actions.add_argument("--disable",
                         dest="disable",
                         type=str,
                         help="Disable the pool with the given ID",
                         metavar="ID")
    actions.add_argument("--enable",
                         dest="enable",
                         type=str,
                         help="Enable the pool with the given ID",
                         metavar="ID")

    # Options
    parser.add_argument(
        "--keep-reporting",
        dest="keep_reporting",
        default=0,
        type=int,
        help="Keep reporting from the specified file with specified interval",
        metavar="SECONDS")
    parser.add_argument("--random-offset",
                        dest="random_offset",
                        default=0,
                        type=int,
                        help="Random offset for the reporting interval (+/-)",
                        metavar="SECONDS")

    # Settings
    parser.add_argument("--serverhost",
                        dest="serverhost",
                        help="Server hostname for remote signature management",
                        metavar="HOST")
    parser.add_argument("--serverport",
                        dest="serverport",
                        type=int,
                        help="Server port to use",
                        metavar="PORT")
    parser.add_argument("--serverproto",
                        dest="serverproto",
                        help="Server protocol to use (default is https)",
                        metavar="PROTO")
    parser.add_argument("--serverauthtokenfile",
                        dest="serverauthtokenfile",
                        help="File containing the server authentication token",
                        metavar="FILE")
    parser.add_argument("--clientid",
                        dest="clientid",
                        help="Client ID to use when submitting issues",
                        metavar="ID")

    # process options
    opts = parser.parse_args(argv)

    if opts.keep_reporting and not opts.report_file:
        print("Error: --keep-reporting is only valid with --report-from-file",
              file=sys.stderr)
        return 2

    serverauthtoken = None
    if opts.serverauthtokenfile:
        with open(opts.serverauthtokenfile) as f:
            serverauthtoken = f.read().rstrip()

    reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto,
                           serverauthtoken, opts.clientid)
    report = None

    if opts.cycle:
        reporter.cycle(opts.cycle)
        return 0
    elif opts.enable:
        reporter.enable(opts.enable)
        return 0
    elif opts.disable:
        reporter.disable(opts.disable)
        return 0
    elif opts.report_file:
        if opts.keep_reporting:
            if opts.random_offset > 0:
                random.seed(reporter.clientId)

            lock = InterProcessLock(opts.report_file + ".lock")
            while True:
                if os.path.exists(opts.report_file):
                    if not lock.acquire(timeout=opts.keep_reporting):
                        continue
                    try:
                        with open(opts.report_file) as f:
                            report = f.read()
                        try:
                            reporter.report(report)
                        except RuntimeError as e:
                            # Ignore errors if the server is temporarily unavailable
                            print("Failed to contact server: %s" % e,
                                  file=sys.stderr)
                    finally:
                        lock.release()

                random_offset = 0
                if opts.random_offset:
                    random_offset = random.randint(-opts.random_offset,
                                                   opts.random_offset)
                time.sleep(opts.keep_reporting + random_offset)
        else:
            with open(opts.report_file) as f:
                report = f.read()
    else:
        report = opts.report

    reporter.report(report)
    return 0
예제 #21
0
def write_aggregated_stats(base_dirs, outfile):
    '''
    Generate aggregated statistics from the given base directories
    and write them to the specified output file.
    
    @type base_dirs: list
    @param base_dirs: List of AFL base directories

    @type outfile: str
    @param outfile: Output file for aggregated statistics
    '''

    # Which fields to add
    wanted_fields_total = [
        'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total',
        'variable_paths', 'unique_crashes', 'unique_hangs'
    ]

    # Which fields to aggregate by mean
    wanted_fields_mean = ['exec_timeout']

    # Which fields should be displayed per fuzzer instance
    wanted_fields_all = ['cycles_done', 'bitmap_cvg']

    # Which fields should be aggregated by max
    wanted_fields_max = ['last_path']

    aggregated_stats = {}

    for field in wanted_fields_total:
        aggregated_stats[field] = 0

    for field in wanted_fields_mean:
        aggregated_stats[field] = (0, 0)

    for field in wanted_fields_all:
        aggregated_stats[field] = []

    def convert_num(num):
        if '.' in num:
            return float(num)
        return int(num)

    for base_dir in base_dirs:
        stats_path = os.path.join(base_dir, "fuzzer_stats")

        if os.path.exists(stats_path):
            with open(stats_path, 'r') as stats_file:
                stats = stats_file.read()

            for line in stats.splitlines():
                (field_name, field_val) = line.split(':', 1)
                field_name = field_name.strip()
                field_val = field_val.strip()

                if field_name in wanted_fields_total:
                    aggregated_stats[field_name] += convert_num(field_val)
                elif field_name in wanted_fields_mean:
                    (val, cnt) = aggregated_stats[field_name]
                    aggregated_stats[field_name] = (val +
                                                    convert_num(field_val),
                                                    cnt + 1)
                elif field_name in wanted_fields_all:
                    aggregated_stats[field_name].append(field_val)
                elif field_name in wanted_fields_max:
                    num_val = convert_num(field_val)
                    if (not field_name in aggregated_stats
                        ) or aggregated_stats[field_name] < num_val:
                        aggregated_stats[field_name] = num_val

    # If we don't have any data here, then the fuzzers haven't written any statistics yet
    if not aggregated_stats:
        return

    # Mean conversion
    for field_name in wanted_fields_mean:
        (val, cnt) = aggregated_stats[field_name]
        if cnt:
            aggregated_stats[field_name] = float(val) / float(cnt)
        else:
            aggregated_stats[field_name] = val

    # Write out data
    fields = []
    fields.extend(wanted_fields_total)
    fields.extend(wanted_fields_mean)
    fields.extend(wanted_fields_all)
    fields.extend(wanted_fields_max)

    max_keylen = max([len(x) for x in fields])

    with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f:
        for field in fields:
            if not field in aggregated_stats:
                continue

            val = aggregated_stats[field]

            if isinstance(val, list):
                val = " ".join(val)

            f.write("%s%s: %s\n" % (field, " " *
                                    (max_keylen + 1 - len(field)), val))

    return
예제 #22
0
def test_try_lock_informatively(tempfile=None):
    lock = InterProcessLock(tempfile + '.lck')
    lock_path = ensure_unicode(
        lock.path)  # can be bytes, complicates string formattingetc
    t0 = time()
    with try_lock_informatively(lock, purpose="happy life") as acquired:
        assert_true(lock.acquired)
        assert_true(acquired)
        assert_greater(
            2,
            time() -
            t0)  # should not take any notable time, we cannot be blocking
        """
        # InterProcessLock is not re-entrant so nesting should not be used, will result
        # in exception on release
        with try_lock_informatively(lock, timeouts=[dt, dt*2], proceed_unlocked=True) as acquired:
            assert_true(lock.acquired)  # due to outer cm
            assert_true(acquired)       # lock is reentrant apparently
        """
        # Let's try in a completely different subprocess
        runner = WitlessRunner(env=dict(
            os.environ, DATALAD_LOG_LEVEL='info', DATALAD_LOG_TARGET='stderr'))

        script1 = Path(tempfile + "-script1.py")
        script1_fmt = f"""
from fasteners import InterProcessLock
from time import time

from datalad.support.locking import try_lock_informatively

lock = InterProcessLock({lock_path!r})

with try_lock_informatively(lock, timeouts=[0.05, 0.15], proceed_unlocked={{proceed_unlocked}}) as acquired:
    print("Lock acquired=%s" % acquired)
"""
        script1.write_text(script1_fmt.format(proceed_unlocked=True))
        t0 = time()
        res = runner.run([sys.executable, str(script1)],
                         protocol=StdOutErrCapture)
        assert_in('Lock acquired=False', res['stdout'])
        assert_in(f'Failed to acquire lock at {lock_path} in 0.05',
                  res['stderr'])
        assert_in(f'Failed to acquire lock at {lock_path} in 0.15',
                  res['stderr'])
        assert_in('proceed without locking', res['stderr'])
        assert_greater(time() - t0, 0.19999)  # should wait for at least 0.2
        try:
            import psutil

            # PID does not correspond
            assert_in('Check following process: PID=', res['stderr'])
            assert_in(f'CWD={os.getcwd()} CMDLINE=', res['stderr'])
        except ImportError:
            pass  # psutil was not installed, cannot get list of files
        except AssertionError:
            # we must have had the other one then
            assert_in('failed to determine one', res['stderr'])
            if not on_osx:
                # so far we had only OSX reporting failing to get PIDs information
                # but if it is something else -- re-raise original exception
                raise

        # in 2nd case, lets try without proceeding unlocked
        script1.write_text(script1_fmt.format(proceed_unlocked=False))
        t0 = time()
        with assert_raises(CommandError) as cme:
            runner.run([sys.executable, str(script1)],
                       protocol=StdOutErrCapture)
        assert_in(f"Failed to acquire lock at {lock_path} in 2 attempts.",
                  str(cme.value))
        assert_in(f"RuntimeError", str(cme.value))
        assert_false(
            cme.value.stdout)  # nothing there since print should not happen
        assert_in(f'Failed to acquire lock at {lock_path} in 0.05',
                  cme.value.stderr)
        assert_in(f'Failed to acquire lock at {lock_path} in 0.15',
                  cme.value.stderr)
        assert_greater(time() - t0, 0.19999)  # should wait for at least 0.2

    # now that we left context, should work out just fine
    res = runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)
    assert_in('Lock acquired=True', res['stdout'])
    assert_not_in(f'Failed to acquire lock', res['stderr'])
    assert_not_in('PID', res['stderr'])
예제 #23
0
    if not mailpile_user or mailpile_user == 'root':
        usage(2, "Please specify a (non-root) user to launch Mailpile.")
    if not re.match(r'^[a-zA-Z0-9\._-]+$', mailpile_user):
        usage(2, "That is a strange looking username.")
    mailpile_user = pwd.getpwnam(mailpile_user)
    if not mailpile_user:
        usage(2, "Please specify a (non-root) user to launch Mailpile.")
    if not re.match(r'^[a-zA-Z0-9\.:/]+$', url):
        usage(2, "That is a strange looking URL.")

    mailpile_home = MAILPILE_HOME_PATH % mailpile_user.pw_dir
    if not os.path.exists(mailpile_home):
        usage(3, "That user has never run Mailpile. Aborting.")

    mp_lockfile = os.path.join(mailpile_home, MAILPILE_WORK_LOCK)
    mp_lock = InterProcessLock(mp_lockfile)
    if not mp_lock.acquire(blocking=False):
        # We are happy with this result, don't raise an error.
        sys.stderr.write(
            "Mailpile is already running for that user. Doing Nothing.\n")
    else:
        # We will release the lock on exec(), but make sure the user owns
        # the lockfile and will be able to take over.
        os.chown(mp_lockfile, mailpile_user.pw_uid, mailpile_user.pw_gid)
        os.execv('/bin/su', [
            '/bin/su', '-', mailpile_user.pw_name, '-c',
            ('screen -S mailpile -d -m '
             'mailpile --idlequit=%d --pid=%s/%s.pid --www=%s --interact') %
            (idlequit, MAILPILE_PIDS_PATH, mailpile_user.pw_name, url)
        ])
예제 #24
0
def write_aggregated_stats(base_dirs, outfile, cmdline_path=None):
    '''
    Generate aggregated statistics from the given base directories
    and write them to the specified output file.

    @type base_dirs: list
    @param base_dirs: List of AFL base directories

    @type outfile: str
    @param outfile: Output file for aggregated statistics

    @type cmdline_path: String
    @param cmdline_path: Optional command line file to use instead of the
                         one found inside the base directory.
    '''

    # Which fields to add
    wanted_fields_total = [
        'execs_done',
        'execs_per_sec',
        'pending_favs',
        'pending_total',
        'variable_paths',
        'unique_crashes',
        'unique_hangs']

    # Which fields to aggregate by mean
    wanted_fields_mean = ['exec_timeout']

    # Which fields should be displayed per fuzzer instance
    wanted_fields_all = ['cycles_done', 'bitmap_cvg']

    # Which fields should be aggregated by max
    wanted_fields_max = ['last_path']

    # Warnings to include
    warnings = list()

    aggregated_stats = {}

    for field in wanted_fields_total:
        aggregated_stats[field] = 0

    for field in wanted_fields_mean:
        aggregated_stats[field] = (0, 0)

    for field in wanted_fields_all:
        aggregated_stats[field] = []

    def convert_num(num):
        if '.' in num:
            return float(num)
        return int(num)

    for base_dir in base_dirs:
        stats_path = os.path.join(base_dir, "fuzzer_stats")

        if not cmdline_path:
            cmdline_path = os.path.join(base_dir, "cmdline")

        if os.path.exists(stats_path):
            with open(stats_path, 'r') as stats_file:
                stats = stats_file.read()

            for line in stats.splitlines():
                (field_name, field_val) = line.split(':', 1)
                field_name = field_name.strip()
                field_val = field_val.strip()

                if field_name in wanted_fields_total:
                    aggregated_stats[field_name] += convert_num(field_val)
                elif field_name in wanted_fields_mean:
                    (val, cnt) = aggregated_stats[field_name]
                    aggregated_stats[field_name] = (val + convert_num(field_val), cnt + 1)
                elif field_name in wanted_fields_all:
                    aggregated_stats[field_name].append(field_val)
                elif field_name in wanted_fields_max:
                    num_val = convert_num(field_val)
                    if (field_name not in aggregated_stats) or aggregated_stats[field_name] < num_val:
                        aggregated_stats[field_name] = num_val

    # If we don't have any data here, then the fuzzers haven't written any statistics yet
    if not aggregated_stats:
        return

    # Mean conversion
    for field_name in wanted_fields_mean:
        (val, cnt) = aggregated_stats[field_name]
        if cnt:
            aggregated_stats[field_name] = float(val) / float(cnt)
        else:
            aggregated_stats[field_name] = val

    # Verify fuzzmanagerconf exists and can be parsed
    _, cmdline = command_file_to_list(cmdline_path)
    target_binary = cmdline[0] if cmdline else None

    if target_binary is not None:
        if not os.path.isfile("%s.fuzzmanagerconf" % target_binary):
            warnings.append("WARNING: Missing %s.fuzzmanagerconf\n" % target_binary)
        elif ProgramConfiguration.fromBinary(target_binary) is None:
            warnings.append("WARNING: Invalid %s.fuzzmanagerconf\n" % target_binary)

    # Look for unreported crashes
    failed_reports = 0
    for base_dir in base_dirs:
        crashes_dir = os.path.join(base_dir, "crashes")
        if not os.path.isdir(crashes_dir):
            continue
        for crash_file in os.listdir(crashes_dir):
            if crash_file.endswith(".failed"):
                failed_reports += 1
    if failed_reports:
        warnings.append("WARNING: Unreported crashes detected (%d)\n" % failed_reports)

    # Write out data
    fields = []
    fields.extend(wanted_fields_total)
    fields.extend(wanted_fields_mean)
    fields.extend(wanted_fields_all)
    fields.extend(wanted_fields_max)

    max_keylen = max([len(x) for x in fields])

    with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f:
        for field in fields:
            if field not in aggregated_stats:
                continue

            val = aggregated_stats[field]

            if isinstance(val, list):
                val = " ".join(val)

            f.write("%s%s: %s\n" % (field, " " * (max_keylen + 1 - len(field)), val))

        for warning in warnings:
            f.write(warning)

    return
예제 #25
0
파일: lock.py 프로젝트: HALFpipe/HALFpipe
class AdaptiveLock:
    def __init__(self, timeout: int = 180):
        self.timeout = timeout

        self.methods: List[str] = ["fcntl", "hard_links", "delay"]

        self.lock_instance = None

    def lock(self, lock_file):
        if self.methods[0] == "hard_links":
            self.lock_instance = FluflLock(
                lock_file, lifetime=self.timeout
            )  # seconds after which the lock is broken

            try:
                self.lock_instance.lock(
                    timeout=self.timeout)  # try for a long time
                return
            except (FluflLockError, TimeOutError):  # timeouts etc.
                pass
            except OSError:  # such as PermissionError
                pass

            logger.warning("Unable to use hard link-based file locks",
                           exc_info=True)

            self.methods.pop(0)
            self.lock(lock_file)

        elif self.methods[0] == "fcntl":
            self.lock_instance = FcntlLock(lock_file)

            acquired = self.lock_instance.acquire(timeout=self.timeout,
                                                  delay=1)

            if acquired:
                return

            logger.warning("Unable to use fcntl-based file locks",
                           exc_info=True)

            self.methods.pop(0)
            self.lock(lock_file)

        else:
            # use a random delay to make write collisions unlikely
            delay = gauss(20, 5)
            if delay > 0:
                sleep(delay)

    def unlock(self):
        if self.methods[0] == "hard_links":
            assert isinstance(self.lock_instance, FluflLock)

            self.lock_instance.unlock(
                unconditionally=True)  # do not raise errors in unlock
            self.lock_instance = None

        elif self.methods[0] == "fcntl":
            assert isinstance(self.lock_instance, FcntlLock)

            self.lock_instance.release()
예제 #26
0
파일: __init__.py 프로젝트: Zetison/badger
 def acquire_lock(self):
     with InterProcessLock(self.storagepath / 'lockfile'):
         yield
예제 #27
0
class DownloadDirectory:
    def __init__(self, filepath, digests):
        #: The path to which to save the file after downloading
        self.filepath = Path(filepath)
        #: Expected hashes of the downloaded data, as a mapping from algorithm
        #: names to digests
        self.digests = digests
        #: The working directory in which downloaded data will be temporarily
        #: stored
        self.dirpath = self.filepath.with_name(self.filepath.name +
                                               ".dandidownload")
        #: The file in `dirpath` to which data will be written as it is
        #: received
        self.writefile = self.dirpath / "file"
        #: A `fasteners.InterProcessLock` on `dirpath`
        self.lock = None
        #: An open filehandle to `writefile`
        self.fp = None
        #: How much of the data has been downloaded so far
        self.offset = None

    def __enter__(self):
        from fasteners import InterProcessLock

        self.dirpath.mkdir(parents=True, exist_ok=True)
        self.lock = InterProcessLock(str(self.dirpath / "lock"))
        if not self.lock.acquire(blocking=False):
            raise RuntimeError(
                "Could not acquire download lock for {self.filepath}")
        chkpath = self.dirpath / "checksum"
        try:
            with chkpath.open() as fp:
                digests = json.load(fp)
        except (FileNotFoundError, ValueError):
            digests = {}
        matching_algs = self.digests.keys() & digests.keys()
        if matching_algs and all(self.digests[alg] == digests[alg]
                                 for alg in matching_algs):
            # Pick up where we left off, writing to the end of the file
            lgr.debug(
                "Download directory exists and has matching checksum; resuming download"
            )
            self.fp = self.writefile.open("ab")
        else:
            # Delete the file (if it even exists) and start anew
            if not chkpath.exists():
                lgr.debug("Starting new download in new download directory")
            else:
                lgr.debug(
                    "Download directory found, but digests do not match; starting new download"
                )
            try:
                self.writefile.unlink()
            except FileNotFoundError:
                pass
            self.fp = self.writefile.open("wb")
        with chkpath.open("w") as fp:
            json.dump(self.digests, fp)
        self.offset = self.fp.tell()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.fp.close()
        try:
            if exc_type is None:
                self.writefile.replace(self.filepath)
        finally:
            self.lock.release()
            if exc_type is None:
                rmtree(self.dirpath, ignore_errors=True)
            self.lock = None
            self.fp = None
            self.offset = None
        return False

    def append(self, blob):
        self.fp.write(blob)
예제 #28
0
from config import config
from fasteners import InterProcessLock

ipl = InterProcessLock(config['lock_file'])

def lock_obtained():
	print('Lock obtained, starting YeenBot...')
	from yeenbot import main
	main()

def lock_not_obtained():
	print('Lock not obtained, exiting')

# Attempt to acquire the lock
acquired = ipl.acquire(blocking=False)
try:
	if acquired:
		lock_obtained()
	else:
		lock_not_obtained()
finally:
	if acquired:
		ipl.release()
예제 #29
0
파일: lock.py 프로젝트: fossabot/Halfpipe-1
class AdaptiveLock:
    timeout = 600

    def __init__(self):
        self.method = "hard_links"

        self.lock_instance = None

        self.delay = 0.0

    def lock(self, lock_file):
        if self.method == "hard_links":
            self.lock_instance = FluflLock(
                lock_file,
                lifetime=60)  # seconds after which the lock is broken

            try:
                self.lock_instance.lock(
                    timeout=self.timeout)  # try for a long time
                return
            except TimeoutError:
                pass
            except OSError:
                pass
            except PermissionError:
                pass

            logger.warning(
                "Unable to use hard link-based file locks. "
                "Trying fcntl-based file locks",
                exc_info=True)

            self.method = "fcntl"

        if self.method == "fcntl":
            self.lock_instance = FcntlLock(lock_file)

            acquired = self.lock_instance.acquire(timeout=self.timeout)

            if acquired:
                return

            logger.warning(
                "Unable to use fcntl-based file locks. "
                "Disabling file locks",
                exc_info=True)

            self.method = None

        if self.method is None:
            # use a random delay to make write collisions unlikely
            delay = gauss(10.0, 2.5)
            if delay > 0:
                sleep(delay)

    def unlock(self):
        if self.method == "hard_links":
            assert isinstance(self.lock_instance, FluflLock)

            self.lock_instance.unlock(
                unconditionally=True)  # do not raise errors in unlock
            self.lock_instance = None

        elif self.method == "fcntl":
            assert isinstance(self.lock_instance, FcntlLock)

            self.lock_instance.release()
예제 #30
0
class Model:
    """The mod management model"""
    def __init__(self,
                 gamePath: Path,
                 configPath: Path,
                 cachePath: Path,
                 ignorelock: bool = False) -> None:
        self._gamePath: Path = Path()
        self._configPath: Path = Path()
        self._cachePath: Path = Path()
        self._modsPath: Path = Path()
        self._dlcsPath: Path = Path()

        self._modList: Dict[Tuple[str, str], Mod] = {}

        _cachePath = verifyCachePath(cachePath)
        if not _cachePath:
            raise InvalidCachePath(cachePath)
        self._cachePath = _cachePath

        self.updateCallbacks = CallbackList()
        self.updateLock = asyncio.Lock()

        self.conflicts = ModelConflicts()
        self._pool = ProcessPoolExecutor()
        self._iteration = 0

        if not ignorelock:
            self._lock = InterProcessLock(self.lockfile)
            if not self._lock.acquire(False):
                raise OtherInstanceError(self.lockfile)

        self.setPaths(gamePath, configPath)

        # TODO: enhancement: watch mod directory for changes

        logger.debug('Initialized model')
        logger.debug(f'Game path: {self._gamePath}')
        logger.debug(f'Config path: {self._configPath}')
        logger.debug(f'Cache path: {self._cachePath}')
        logger.debug(f'Mods path: {self._modsPath}')

        # TODO: incomplete: implement mod installation management

    def setPaths(self, gamePath: Path, configPath: Path) -> None:
        _gamePath = verifyGamePath(gamePath)
        _configPath = verifyConfigPath(configPath)

        if self._gamePath == _gamePath and self._configPath == _configPath:
            return

        if not _gamePath:
            raise InvalidGamePath(gamePath)
        if not _configPath:
            raise InvalidConfigPath(configPath)

        modsPath = _gamePath.joinpath('Mods')
        _modsPath = verifyModsPath(modsPath)
        dlcsPath = _gamePath.joinpath('DLC')
        _dlcsPath = verifyDlcsPath(dlcsPath)

        if not _modsPath:
            raise InvalidModsPath(modsPath)
        if not _dlcsPath:
            raise InvalidDlcsPath(dlcsPath)

        self._gamePath = _gamePath
        self._configPath = _configPath
        self._modsPath = _modsPath
        self._dlcsPath = _dlcsPath

        self.lastUpdate = datetime.now(tz=timezone.utc)
        self.lastInitialization = datetime.now(tz=timezone.utc)

        self._modList = {}

    @debounce(25)
    async def updateBundledContentsConflicts(self) -> None:
        self._iteration += 1
        conflicts = await asyncio.get_running_loop().run_in_executor(
            self._pool,
            partial(ModelConflicts.fromModList, self._modList,
                    self._iteration))
        if conflicts.iteration == self._iteration:
            self.conflicts = conflicts
            self.updateCallbacks.fire(self)

    async def loadInstalledMod(self, path: Path) -> None:
        if path.joinpath('.w3mm').is_file():
            mod = Mod.from_json(path.joinpath('.w3mm').read_bytes())
            mod.enabled = not path.name.startswith('~')
            mod.filename = re.sub(r'^(~)', r'', path.name)
            if mod.enabled:
                enabled = getSettingsValue(
                    mod.filename, 'Enabled',
                    self.configpath.joinpath('mods.settings'))
                if enabled == '0':
                    mod.enabled = False
            if (mod.filename, mod.target) in self._modList:
                logger.bind(path=path).error('Ignoring duplicate MOD')
                if not self._modList[(mod.filename, mod.target)].enabled:
                    self._modList[(mod.filename, mod.target)] = mod
            else:
                self._modList[(mod.filename, mod.target)] = mod
        else:
            try:
                for mod in await Mod.fromDirectory(path, recursive=False):
                    mod.installdate = datetime.fromtimestamp(
                        path.stat().st_ctime, tz=timezone.utc)
                    mod.target = 'mods'
                    mod.datatype = 'mod'
                    mod.enabled = not path.name.startswith('~')
                    mod.filename = re.sub(r'^(~)', r'', path.name)
                    if mod.enabled:
                        enabled = getSettingsValue(
                            mod.filename, 'Enabled',
                            self.configpath.joinpath('mods.settings'))
                        if enabled == '0':
                            mod.enabled = False
                    if (mod.filename, mod.target) in self._modList:
                        logger.bind(path=path).error('Ignoring duplicate MOD')
                        if not self._modList[(mod.filename,
                                              mod.target)].enabled:
                            self._modList[(mod.filename, mod.target)] = mod
                    else:
                        self._modList[(mod.filename, mod.target)] = mod
                        await self.update(mod)
            except InvalidPathError:
                logger.bind(path=path).debug('Invalid MOD')

    async def loadInstalledDlc(self, path: Path) -> None:
        if path.joinpath('.w3mm').is_file():
            mod = Mod.from_json(path.joinpath('.w3mm').read_bytes())
            mod.enabled = not all(
                file.name.endswith('.disabled') for file in path.glob('**/*')
                if file.is_file() and not file.name == '.w3mm')
            mod.filename = path.name
            self._modList[(mod.filename, mod.target)] = mod
        else:
            try:
                for mod in await Mod.fromDirectory(path, recursive=False):
                    mod.installdate = datetime.fromtimestamp(
                        path.stat().st_ctime, tz=timezone.utc)
                    mod.target = 'dlc'
                    mod.datatype = 'dlc'
                    mod.enabled = not all(
                        file.name.endswith('.disabled')
                        for file in path.glob('**/*')
                        if file.is_file() and not file.name == '.w3mm')
                    mod.filename = path.name
                    self._modList[(mod.filename, mod.target)] = mod
                    await self.update(mod)
            except InvalidPathError:
                logger.bind(path=path).debug('Invalid DLC')

    async def loadInstalled(self) -> None:
        await asyncio.gather(
            *[self.loadInstalledMod(path) for path in self.modspath.iterdir()],
            loop=asyncio.get_running_loop())
        await asyncio.gather(
            *[self.loadInstalledDlc(path) for path in self.dlcspath.iterdir()],
            loop=asyncio.get_running_loop())
        self.updateBundledContentsConflicts()
        self.updateCallbacks.fire(self)

    def get(self, mod: ModelIndexType) -> Mod:
        return self[mod]

    def keys(self) -> KeysView[Tuple[str, str]]:
        return self._modList.keys()

    def values(self) -> ValuesView[Mod]:
        return self._modList.values()

    def data(self) -> Dict[Tuple[str, str], Mod]:
        return self._modList

    async def add(self, mod: Mod) -> None:
        # TODO: incomplete: always override compilation trigger mod
        if self.modspath in [mod.source, *mod.source.parents]:
            raise InvalidSourcePath(
                mod.source,
                'Invalid mod source: Mods cannot be installed from the mods directory'
            )
        async with self.updateLock:
            if (mod.filename, mod.target) in self._modList:
                raise ModExistsError(mod.filename, mod.target)
            target = self.getModPath(mod)
            if target.exists():
                # TODO: incomplete: make sure the mod is tracked by the model
                raise ModExistsError(mod.filename, mod.target)
            settings = 0
            inputs = 0
            try:
                event_loop = asyncio.get_running_loop()
                target.mkdir(parents=True)
                # copy mod files
                copies = []
                logger.bind(name=mod.filename,
                            path=target).debug('Copying binary files')
                for _file in mod.files:
                    sourceFile = mod.source.joinpath(_file.source)
                    targetFile = target.joinpath(_file.source)
                    targetFile.parent.mkdir(parents=True, exist_ok=True)
                    copies.append((sourceFile, targetFile))
                await asyncio.gather(*[
                    event_loop.run_in_executor(
                        None, partial(copyfile, _copy[0], _copy[1]))
                    for _copy in copies
                ])
                copies = []
                logger.bind(name=mod.filename,
                            path=target).debug('Copying content files')
                for _content in mod.contents:
                    sourceFile = mod.source.joinpath(_content.source)
                    targetFile = target.joinpath(_content.source)
                    targetFile.parent.mkdir(parents=True, exist_ok=True)
                    copies.append((sourceFile, targetFile))
                await asyncio.gather(*[
                    event_loop.run_in_executor(
                        None, partial(copyfile, _copy[0], _copy[1]))
                    for _copy in copies
                ])
                mod.installed = True
                # update settings
                logger.bind(name=mod.filename,
                            path=target).debug('Updating settings')
                settings = addSettings(
                    mod.settings, self.configpath.joinpath('user.settings'))
                inputs = addSettings(
                    mod.inputs, self.configpath.joinpath('input.settings'))
                setSettingsValue(mod.filename, 'Enabled', '1',
                                 self.configpath.joinpath('mods.settings'))
                await self.update(mod)
            except Exception as e:
                removeDirectory(target)
                if settings:
                    removeSettings(mod.settings,
                                   self.configpath.joinpath('user.settings'))
                if inputs:
                    removeSettings(mod.inputs,
                                   self.configpath.joinpath('input.settings'))
                removeSettingsSection(
                    mod.filename, self.configpath.joinpath('mods.settings'))
                raise e
            self._modList[(mod.filename, mod.target)] = mod
        self.updateBundledContentsConflicts()
        self.setLastUpdateTime(datetime.now(tz=timezone.utc))

    async def update(self, mod: Mod) -> None:
        # serialize and store mod structure
        target = self.getModPath(mod, True)
        try:
            with target.joinpath('.w3mm').open(
                    'w', encoding='utf-8') as modInfoFile:
                modSerialized = mod.to_json()
                modInfoFile.write(modSerialized)
        except Exception as e:
            logger.exception(f'Could not update mod: {e}')

    async def replace(self, filename: str, target: str, mod: Mod) -> None:
        # TODO: incomplete: handle possible conflict with existing mods
        async with self.updateLock:
            self._modList[(filename, target)] = mod
        self.updateBundledContentsConflicts()
        self.setLastUpdateTime(datetime.now(tz=timezone.utc))

    async def remove(self, mod: ModelIndexType) -> None:
        if await self.disable(mod):
            async with self.updateLock:
                mod = self[mod]
                target = self.getModPath(mod, True)
                removeDirectory(target)
                try:
                    removeSettings(mod.settings,
                                   self.configpath.joinpath('user.settings'))
                except Exception as e:
                    logger.bind(name=mod.filename).warning(
                        f'Could not remove settings from user.settings: {e}')
                try:
                    removeSettings(mod.inputs,
                                   self.configpath.joinpath('input.settings'))
                except Exception as e:
                    logger.bind(name=mod.filename).warning(
                        f'Could not remove settings from input.settings: {e}')
                try:
                    removeSettingsSection(
                        mod.filename,
                        self.configpath.joinpath('mods.settings'))
                except Exception as e:
                    logger.bind(name=mod.filename).warning(
                        f'Could not remove settings from mods.settings: {e}')
                del self._modList[(mod.filename, mod.target)]
            self.updateBundledContentsConflicts()
            self.setLastUpdateTime(datetime.now(tz=timezone.utc))

    async def enable(self, mod: ModelIndexType) -> bool:
        async with self.updateLock:
            mod = self[mod]
            oldstat = mod.enabled
            oldpath = self.getModPath(mod, True)
            renamed = False
            undo = False
            renames = []
            settings = 0
            inputs = 0
            try:
                mod.enabled = True
                if mod.target == 'mods':
                    newpath = self.getModPath(mod)
                    if oldpath != newpath:
                        oldpath.rename(newpath)
                        renamed = True
                    setSettingsValue(mod.filename, 'Enabled', '1',
                                     self.configpath.joinpath('mods.settings'))
                if mod.target == 'dlc':
                    for file in oldpath.glob('**/*'):
                        while file.is_file() and file.suffix == '.disabled':
                            file = file.rename(file.with_suffix(''))
                            renames.append(file)
                settings = addSettings(
                    mod.settings, self.configpath.joinpath('user.settings'))
                inputs = addSettings(
                    mod.inputs, self.configpath.joinpath('input.settings'))
                await self.update(mod)
            except PermissionError:
                logger.bind(path=oldpath).exception(
                    'Could not enable mod, invalid permissions. Is it open in the explorer?'
                )
                undo = True
            except Exception as e:
                logger.exception(f'Could not enable mod: {e}')
                mod.enabled = oldstat
                undo = True
            if undo:
                newpath = self.getModPath(mod)
                mod.enabled = oldstat
                if newpath.is_dir() and newpath != oldpath and renamed:
                    newpath.rename(oldpath)
                for rename in reversed(renames):
                    rename.rename(
                        rename.with_suffix(rename.suffix + '.disabled'))
                if settings:
                    removeSettings(mod.settings,
                                   self.configpath.joinpath('user.settings'))
                if inputs:
                    removeSettings(mod.inputs,
                                   self.configpath.joinpath('input.settings'))
                if mod.target == 'mods':
                    setSettingsValue(mod.filename, 'Enabled', '0',
                                     self.configpath.joinpath('mods.settings'))
        # TODO: incomplete: handle xml and ini changes
        if not undo:
            self.updateBundledContentsConflicts()
            self.setLastUpdateTime(datetime.now(tz=timezone.utc))
            return True
        return False

    async def disable(self, mod: ModelIndexType) -> bool:
        async with self.updateLock:
            mod = self[mod]
            oldstat = mod.enabled
            oldpath = self.getModPath(mod, True)
            renamed = False
            undo = False
            renames = []
            settings = 0
            inputs = 0
            try:
                mod.enabled = False
                if mod.target == 'mods':
                    newpath = self.getModPath(mod)
                    if oldpath != newpath:
                        oldpath.rename(newpath)
                        renamed = True
                    setSettingsValue(mod.filename, 'Enabled', '0',
                                     self.configpath.joinpath('mods.settings'))
                if mod.target == 'dlc':
                    for file in oldpath.glob('**/*'):
                        if file.is_file(
                        ) and not file.name == '.w3mm' and not file.suffix == '.disabled':
                            file = file.rename(
                                file.with_suffix(file.suffix + '.disabled'))
                            renames.append(file)
                settings = removeSettings(
                    mod.settings, self.configpath.joinpath('user.settings'))
                inputs = removeSettings(
                    mod.inputs, self.configpath.joinpath('input.settings'))
                await self.update(mod)
            except PermissionError:
                logger.bind(path=oldpath).exception(
                    'Could not disable mod, invalid permissions. Is it open in the explorer?'
                )
                undo = True
            except Exception as e:
                logger.exception(f'Could not disable mod: {e}')
                undo = True
            if undo:
                newpath = self.getModPath(mod)
                mod.enabled = oldstat
                if newpath.is_dir() and newpath != oldpath and renamed:
                    newpath.rename(oldpath)
                for rename in reversed(renames):
                    rename.rename(rename.with_suffix(''))
                if settings:
                    addSettings(mod.settings,
                                self.configpath.joinpath('user.settings'))
                if inputs:
                    addSettings(mod.inputs,
                                self.configpath.joinpath('input.settings'))
                if mod.target == 'mods':
                    setSettingsValue(mod.filename, 'Enabled', '1',
                                     self.configpath.joinpath('mods.settings'))
        # TODO: incomplete: handle xml and ini changes
        if not undo:
            self.updateBundledContentsConflicts()
            self.setLastUpdateTime(datetime.now(tz=timezone.utc))
            return True
        return False

    async def setFilename(self, mod: ModelIndexType, filename: str) -> None:
        async with self.updateLock:
            mod = self[mod]
            oldname = mod.filename
            oldpath = self.getModPath(mod, True)
            mod.filename = filename
            newpath = self.getModPath(mod)
            renamed = False
            undo = False
            try:
                if oldpath != newpath:
                    oldpath.rename(newpath)
                    renamed = True
                renameSettingsSection(
                    oldname, filename,
                    self.configpath.joinpath('mods.settings'))
                await self.update(mod)
            except PermissionError:
                logger.bind(path=oldpath).exception(
                    'Could not rename mod, invalid permissions. Is it open in the explorer?'
                )
                undo = True
            except Exception as e:
                logger.exception(f'Could not rename mod: {e}')
                undo = True
            if undo:
                mod.filename = oldname
                if renamed:
                    newpath.rename(oldpath)
        self.updateBundledContentsConflicts()
        self.setLastUpdateTime(datetime.now(tz=timezone.utc), False)

    async def setPackage(self, mod: ModelIndexType, package: str) -> None:
        async with self.updateLock:
            mod = self[mod]
            mod.package = package
            await self.update(mod)
        self.setLastUpdateTime(datetime.now(tz=timezone.utc), False)

    async def setCategory(self, mod: ModelIndexType, category: str) -> None:
        async with self.updateLock:
            mod = self[mod]
            mod.category = category
            await self.update(mod)
        self.setLastUpdateTime(datetime.now(tz=timezone.utc), False)

    async def setPriority(self, mod: ModelIndexType, priority: int) -> None:
        async with self.updateLock:
            mod = self[mod]
            mod.priority = priority
            if mod.target == 'mods':
                setSettingsValue(mod.filename, 'Priority',
                                 str(priority) if priority >= 0 else '',
                                 self.configpath.joinpath('mods.settings'))
            await self.update(mod)
        self.updateBundledContentsConflicts()
        self.setLastUpdateTime(datetime.now(tz=timezone.utc), False)

    def setLastUpdateTime(self,
                          time: datetime,
                          fireUpdateCallbacks: bool = True) -> None:
        self.lastUpdate = time
        if fireUpdateCallbacks:
            self.updateCallbacks.fire(self)

    def getModPath(self, mod: ModelIndexType, resolve: bool = False) -> Path:
        if not isinstance(mod, Mod):
            mod = self[mod]
        basepath = self.gamepath.joinpath(mod.target).resolve()
        if not mod.enabled and mod.target == 'mods':
            target = basepath.joinpath(f'~{mod.filename}')
        else:
            target = basepath.joinpath(mod.filename)
        if resolve:
            if not mod.enabled and target.is_dir() \
            and target.parent.joinpath(re.sub(r'^~', r'', target.name)).is_dir():
                # if the mod is disabled but there are two directories with each enabled and disabled names,
                # resolve to the non-disabled directory
                target = target.parent.joinpath(re.sub(r'^~', r'',
                                                       target.name))
            if not target.is_dir():
                if not mod.enabled and target.parent.joinpath(
                        re.sub(r'^~', r'', target.name)).is_dir():
                    target = target.parent.joinpath(
                        re.sub(r'^~', r'', target.name))
                if not target.is_dir():
                    raise ModNotFoundError(mod.filename, mod.target)
        return target

    def __len__(self) -> int:
        return len(self._modList)

    def __getitem__(self, mod: ModelIndexType) -> Mod:
        if isinstance(mod, int):
            return list(self._modList.values())[mod]
        if isinstance(mod, tuple) and len(mod) == 2:
            if mod not in self._modList:
                raise ModNotFoundError(tuple(mod)[0], tuple(mod)[1])
            return self._modList[mod]
        if isinstance(mod, Mod):
            if mod not in self.values():
                raise ModNotFoundError(mod.filename, mod.target)
            return mod
        raise IndexError(f'invalid index type {type(mod)}')

    def __iter__(self) -> Iterator[Tuple[str, str]]:
        yield from self._modList

    @property
    def lockfile(self) -> Path:
        return self._cachePath.joinpath('w3mm.lock')

    @property
    def gamepath(self) -> Path:
        return self._gamePath

    @property
    def configpath(self) -> Path:
        return self._configPath

    @property
    def cachepath(self) -> Path:
        return self._cachePath

    @property
    def modspath(self) -> Path:
        return self._modsPath

    @property
    def dlcspath(self) -> Path:
        return self._dlcsPath