Exemple #1
0
    def get_all(self):
        """Get the whole database.

        Return:
            Returns the whole database.
        """
        with portalocker.Lock(GITCACHE_DB_LOCK):
            self._load()

        return self.database
 def _load(self):
     with portalocker.Lock(self._file, mode="rb") as f:
         data = pickle.load(f)
         self._pokemon_hist = data.get('pokemon_hist', {})
         self._pokestop_hist = data.get('pokestop_hist', {})
         self._gym_team = data.get('gym_team', {})
         self._gym_info = data.get('gym_info', {})
         self._egg_hist = data.get('egg_hist', {})
         self._raid_hist = data.get('raid_hist', {})
         log.debug("LOADED: \n {}".format(data))
Exemple #3
0
 def _lock(self):
     self.file_locker = portalocker.Lock(
         self.file_name,
         timeout=0,
         flags=portalocker.LOCK_EX | portalocker.LOCK_NB,
     )
     try:
         self.file_locker.acquire()
     except portalocker.exceptions.LockException:
         raise PermissionError()
Exemple #4
0
 def load(self):
     """
             load logs from file
     """
     self._lockfile = portalocker.Lock(self._lockfile, timeout=10)
     try:
         with lzma.open(self.pickle_file, 'rb') as infile:
             self.logs = pickle.load(infile)
     except FileNotFoundError:
         pass
Exemple #5
0
def filelock(filename=None):
    if filename is None:
        filename = os.path.join(main_file_path(), '.bd.lock')
    print('Acquiring lock...')
    with portalocker.Lock(filename, 'w', timeout=60) as lockfile:
        lockfile.flush()
        os.fsync(lockfile.fileno())
        yield
        if os.path.exists(filename):
            os.remove(filename)
Exemple #6
0
	def lock_or_open(self, path, mode):
		if self._lock_strategy == "FILE":
			return portalocker.Lock(
				path,
				mode,
				flags=portalocker.LOCK_EX,
				timeout=1,
				fail_when_locked=True)
		else:
			return open(path, mode)
Exemple #7
0
    def remove(self, path):
        """Remove an entry from the database.

        Args:
            path (str): The path of the repository mirror.
        """
        with portalocker.Lock(GITCACHE_DB_LOCK):
            self._load()
            del self.database[path]
            self._save()
Exemple #8
0
def loop(
    processor: Processor,
    interval: float,
    run_for: Optional[int],
    woke_up_file: str,
    lock_file: str,
    lock_timeout: float,
) -> None:
    """Run the main loop of the daemon.

    Args:
        processor:
            the processor to use for handling the suspension computations
        interval:
            the length of one iteration of the main loop in seconds
        run_for:
            if specified, run the main loop for the specified amount of seconds
            before terminating (approximately)
        woke_up_file:
            path of a file that marks that the system was sleeping since the
            last processing iterations
        lock_file:
            path of a file used for locking modifications to the `woke_up_file`
            to ensure consistency
        lock_timeout:
            time in seconds to wait for acquiring the lock file
    """

    start_time = datetime.datetime.now(datetime.timezone.utc)
    while (run_for is
           None) or (datetime.datetime.now(datetime.timezone.utc) <
                     (start_time + datetime.timedelta(seconds=run_for))):

        try:
            _logger.debug("New iteration, trying to acquire lock")
            with portalocker.Lock(lock_file, timeout=lock_timeout):
                _logger.debug("Acquired lock")

                just_woke_up = os.path.isfile(woke_up_file)
                if just_woke_up:
                    _logger.debug("Removing woke up file at %s", woke_up_file)
                    try:
                        os.remove(woke_up_file)
                    except FileNotFoundError:
                        _logger.warning("Just woke up file disappeared",
                                        exc_info=True)

                processor.iteration(
                    datetime.datetime.now(datetime.timezone.utc), just_woke_up)

        except portalocker.LockException:
            _logger.warning("Failed to acquire lock, skipping iteration",
                            exc_info=True)

        time.sleep(interval)
Exemple #9
0
    def clear_counters(self, path):
        """Clear all counters of a mirror.

        Args:
            path (str):    The path of the repository mirror.
        """
        with portalocker.Lock(GITCACHE_DB_LOCK):
            self._load()
            for counter in ['mirror-updates', 'clones', 'updates']:
                self.database[path][counter] = 0
            self._save()
 def purge_credential_cache(self):
     logger.info('Attempting to purge database credential cache. path=[%s]',
                 self.cache_filename)
     with portalocker.Lock(self.lock_filename, timeout=10):
         try:
             os.unlink(self.cache_filename)
         except FileNotFoundError:
             logger.info(
                 'Failed to purge Database credential cache because cache file was not found. path=[%s]',
                 self.cache_filename)
             pass
Exemple #11
0
 def handle_submit(self, sender):
     import portalocker
     with portalocker.Lock(self.results_filename, "a+") as g:
         g.write("%s::%s::%s::%s\n" %
                 (self.id, getpass.getuser(), datetime.datetime.today(),
                  sender.description))
         g.flush()
         os.fsync(g.fileno())
     self.output.clear_output()
     with self.output:
         print("Received: " + sender.description)
Exemple #12
0
    def __init__(self, path):
        self.lock = None
        self.path = path

        self.lock = portalocker.Lock(path,
                                     "w",
                                     timeout=1,
                                     fail_when_locked=True,
                                     flags=portalocker.LOCK_EX
                                     | portalocker.LOCK_NB)
        self.file = None
    def next_stage(self, id_, h):
        with portalocker.Lock(self.filepath, mode='r+b',
                              timeout=self.timeout) as handle:
            hcomb_list = self._read_hcomb_list(handle)

            h = h.__dict__

            h['STAGE'] += 1

            self._replace_at_id(hcomb_list, id_, h)
            self._write_hcomb_list(hcomb_list, handle)
Exemple #14
0
 def __init__(self, filename):
     self.lock_file = filename + ".lock"
     self.filename = filename
     self.f = None
     self.lock = portalocker.Lock(self.lock_file,
                                  "r",
                                  flags=portalocker.LOCK_SH,
                                  timeout=1)
     if not os.path.exists(self.lock_file):
         f = open(self.lock_file, "w+")
         f.close()
Exemple #15
0
def lock(obj):
    if isinstance(obj, basestring):
        lock_name = obj
    else:
        lock_name = "{0}-{1}".format(obj["type"], obj["id"])

    lock_dir = Config.lock_dir()
    if not os.path.exists(lock_dir):
        os.mkdir(lock_dir)
    return LockWrapper(lock_name,
                       portalocker.Lock(os.path.join(lock_dir, lock_name)))
Exemple #16
0
def write_results_file(output_file_path: str, results: pd.DataFrame):
    """ Write results into a file either as json or csv """

    file_extension = output_file_path.split('.')[-1]

    print(results)

    results = pd.DataFrame(results, index=[0])

    if file_extension == 'json':
        import portalocker

        # For json we have to deal with the concurrent file access therefore
        # i use portalocker to lock the file during reading, constructing the
        # new json, and writing
        with portalocker.Lock(output_file_path, mode='a+', timeout=120) as f:

            f.seek(0)

            # Read old results file if it exist
            if f.read(1) != '':
                f.seek(0)
                old_results = pd.read_json(f)

                # Delete old content
                f.seek(0)
                f.truncate()

                # Combine old and new results (even if they have different columns)
                results = pd.concat([old_results, results], axis=0, ignore_index=True)

            # Write combined results to file and retry indefinitely if it failed
            results.to_json(f)
            f.flush()
            os.fsync(f.fileno())

    elif file_extension == 'csv':

        # The initial write has to write the column headers if the file doesn't
        # exist yet
        initial_write = not os.path.isfile(output_file_path)

        # Write result to file and retry indefinitely if it failed
        while True:
            try:
                results.to_csv(
                    output_file_path, mode='a', header=initial_write, index=False
                )
            except:
                continue
            break

    else:
        print('Invalid file extension: ', file_extension)
Exemple #17
0
 def _reload_cache(self):
     with self.lock:
         fpath = self._cache_fpath()
         try:
             with portalocker.Lock(fpath, mode='rb') as cache_file:
                 try:
                     self.cache = pickle.load(cache_file)
                 except EOFError:
                     self.cache = {}
         except FileNotFoundError:
             self.cache = {}
Exemple #18
0
    def list(self, bot, update):
        """Send a message when the command /list is issued."""
        with portalocker.Lock(config.FUNKO_POP_LIST, "r",
                              timeout=1) as data_file:
            funkopop_links = json.load(data_file)

        if not funkopop_links:
            update.message.reply_text('No entries in search.')

        for elem in funkopop_links:
            update.message.reply_text(elem["url"])
Exemple #19
0
def setAccount(account_id):
    try:
        account_cache_semaphore.acquire(1)
        config.logger.debug("account:setAccount(%s)", str(account_id))
        with portalocker.Lock(account_cache_lock_path, timeout=0.5) as fh:
            return setAccountShelve(account_id, fh)
    except portalocker.exceptions.LockException as e:
        _, _, exc_tb = sys.exc_info()
        config.logger.info(
            "account: LockException in setAccount(%s) line: %s %s",
            str(account_id), exc_tb.tb_lineno, e)
        # we couldn't fetch this lock. It seems to be blocked forever (from a crash?)
        # we remove the lock file and retry with a shorter timeout
        try:
            config.logger.info("acount: clean lock %s",
                               str(account_cache_lock_path))
            Path(account_cache_lock_path).unlink()
            config.logger.debug("retry account:setAccount(%s)",
                                str(account_id))
            with portalocker.Lock(account_cache_lock_path, timeout=0.3) as fh:
                return setAccountShelve(account_id, fh)
        except portalocker.exceptions.LockException as e:
            _, _, exc_tb = sys.exc_info()
            config.logger.error(
                "account: LockException in setAccount(%s) line: %s %s",
                str(account_id), exc_tb.tb_lineno, e)
            return None
        except Exception as e:
            _, _, exc_tb = sys.exc_info()
            config.logger.error(
                "account: Exception in setAccount(%s) line: %s %s",
                str(account_id), exc_tb.tb_lineno, e)
            return None
    except Exception as e:
        _, _, exc_tb = sys.exc_info()
        config.logger.error("account: Exception in setAccount(%s) line %s: %s",
                            str(account_id), exc_tb.tb_lineno, e)
        return None
    finally:
        if account_cache_semaphore.available() < 1:
            account_cache_semaphore.release(1)
Exemple #20
0
    def save_update_time(self, path):
        """Save the current time as the last-update-time of the mirror.

        Args:
            path (str): The path of the repository mirror.
        """
        with portalocker.Lock(GITCACHE_DB_LOCK):
            self._load()
            self.database[path]["last-update-time"] = time.time()
            self.database[path][
                "mirror-updates"] = self.database[path]["mirror-updates"] + 1
            self._save()
Exemple #21
0
 def _save_cache(self, cache, key=None, hash=None):
     with self.lock:
         self.cache = cache
         fpath = self._cache_fpath()
         if key is not None:
             fpath += f'_{hashlib.sha256(pickle.dumps(key)).hexdigest()}'
         elif hash is not None:
             fpath += f'_{hash}'
         with portalocker.Lock(fpath, mode='wb') as cache_file:
             pickle.dump(cache, cache_file, protocol=4)
         if key is None:
             self._reload_cache()
Exemple #22
0
    def __init__(self, name):
        """ Initializes a new cache object for storing data between events. """
        super(FileCache, self).__init__()
        self._name = name
        self._file = get_path(os.path.join("cache", "{}.cache".format(name)))

        log.debug("Checking for previous cache at {}".format(self._file))
        if os.path.isfile(self._file):
            self._load()
        else:
            with portalocker.Lock(self._file, mode="wb+") as f:
                pickle.dump({}, f, protocol=pickle.HIGHEST_PROTOCOL)
Exemple #23
0
def read_file(filename):
    lock = portalocker.Lock(filename, mode='a+b', flags=portalocker.LOCK_EX)
    lock.acquire()
    fh = lock.fh
    fh.seek(0)
    if len(fh.read()) is 0:
        file_data = None
    else:
        fh.seek(0)
        file_data = pickle.load(fh)
    lock.release()
    return file_data
    def __init__(self, name):
        """ Initialize a new cache object, retrieving and previously saved results if possible. """
        super(FileCache, self).__init__()
        self._name = name
        self._file = get_path(os.path.join("cache", "{}.cache".format(name)))

        log.debug("Checking for previous cache at {}".format(self._file))
        if os.path.isfile(self._file):
            self._load()
        else:
            with portalocker.Lock(self._file, mode="wb+") as f:
                pickle.dump({}, f, protocol=pickle.HIGHEST_PROTOCOL)
Exemple #25
0
    def _load_checkpoint(self, pid):
        """
        Load a checkpoint from a pickle. Note that this will not properly check for
        locks and should not be called outside of this class
        """
        for check_dir in [self._running_directory, self._failed_directory, self._finished_directory]:
            filepath = path.join(check_dir, self.pickle_filename(pid))
            if path.isfile(filepath):
                with portalocker.Lock(filepath, 'r', timeout=1) as file:
                    return self.load_checkpoint_from_file_object(file)

        raise ValueError("checkpoint with pid '{}' does not exist".format(pid))
Exemple #26
0
    def increment_counter(self, path, counter):
        """Increment a counter of a mirror.

        Args:
            path (str):    The path of the repository mirror.
            counter (str): The counter to increment. Use one of 'mirror-updates', 'clones' or
                           'updates'.
        """
        with portalocker.Lock(GITCACHE_DB_LOCK):
            self._load()
            self.database[path][counter] = self.database[path][counter] + 1
            self._save()
Exemple #27
0
def thaw(file="artifacts.pickle", timeout=10):
    with portalocker.Lock(file, "rb+", timeout=timeout) as fh:
        pickles = pickle.load(fh)
        fh.seek(0)
        # Scramble the file before deleting it so that anything still trying to unpickle
        # it errors out, rather than silently creating a new file that will be
        # ignored.
        fh.write(b"all your frozen data melted, move along.")
    # After a brief delay, delete the scrambled file.
    time.sleep(0.1)
    os.remove(file)
    return pickles
Exemple #28
0
def import_try_install(package, extern_url=None):
    """Try import the specified package.
    If the package not installed, try use pip to install and import if success.

    Parameters
    ----------
    package : str
        The name of the package trying to import.
    extern_url : str or None, optional
        The external url if package is not hosted on PyPI.
        For example, you can install a package using:
         "pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx".
        In this case, you can pass the url to the extern_url.

    Returns
    -------
    <class 'Module'>
        The imported python module.

    """
    import tempfile
    import portalocker
    lockfile = os.path.join(tempfile.gettempdir(), package + '_install.lck')
    with portalocker.Lock(lockfile):
        try:
            return __import__(package)
        except ImportError:
            try:
                from pip import main as pipmain
            except ImportError:
                from pip._internal import main as pipmain
                from types import ModuleType
                # fix for pip 19.3
                if isinstance(pipmain, ModuleType):
                    from pip._internal.main import main as pipmain

            # trying to install package
            url = package if extern_url is None else extern_url
            pipmain(['install', '--user',
                     url])  # will raise SystemExit Error if fails

            # trying to load again
            try:
                return __import__(package)
            except ImportError:
                import sys
                import site
                user_site = site.getusersitepackages()
                if user_site not in sys.path:
                    sys.path.append(user_site)
                return __import__(package)
    return __import__(package)
Exemple #29
0
def cache_all_hed_xml_versions(hed_base_urls=DEFAULT_URL_LIST,
                               skip_folders=DEFAULT_SKIP_FOLDERS,
                               cache_folder=None):
    """Cache a file from a URL.

    Parameters
    ----------
    hed_base_urls: str or [str]
        Path to a directory on GitHub API, or a list of them.
        eg: https://api.github.com/repos/hed-standard/hed-specification/contents/hedxml
    skip_folders: [str]
        A list of subfolders to skip over when downloading schemas.  Default 'deprecated'
    cache_folder: str
        hed cache folder: Defaults to HED_CACHE_DIRECTORY
    Returns
    -------
    time_since_update: float
        -1 if cache failed
        positive number meaning time in seconds since last update if it didn't cache
        0 if it cached successfully this time
    """
    if not cache_folder:
        cache_folder = HED_CACHE_DIRECTORY
    if not isinstance(hed_base_urls, (list, tuple)):
        hed_base_urls = [hed_base_urls]
    os.makedirs(cache_folder, exist_ok=True)
    last_timestamp = _read_last_cached_time(cache_folder)
    current_timestamp = time.time()
    time_since_update = current_timestamp - last_timestamp
    if time_since_update < CACHE_TIME_THRESHOLD:
        return time_since_update

    try:
        cache_lock_filename = os.path.join(cache_folder, "cache_lock.lock")
        with portalocker.Lock(cache_lock_filename, timeout=1):
            for hed_base_url in hed_base_urls:
                all_hed_versions = _get_hed_xml_versions_from_url(
                    hed_base_url,
                    skip_folders=skip_folders,
                    get_all_libraries=True)
                for library_name, hed_versions in all_hed_versions.items():
                    for version, version_info in hed_versions.items():
                        _cache_hed_version(version,
                                           library_name,
                                           version_info,
                                           cache_folder=cache_folder)

            _write_last_cached_time(current_timestamp, cache_folder)
    except portalocker.exceptions.LockException:
        return -1

    return 0
Exemple #30
0
def addSync(uuid, modified_at):
    try:
        config.logger.debug("sync:addSync(" + str(uuid) + "," +
                            str(modified_at) + ")")
        sync_cache_semaphore.acquire(1)
        with portalocker.Lock(getSyncPath(lock=True), timeout=1) as _:
            with shelve.open(getSyncPath()) as db:
                db[uuid] = modified_at
    except Exception as e:
        config.logger.error("sync: Exception in addSync() %s", e)
    finally:
        if sync_cache_semaphore.available() < 1:
            sync_cache_semaphore.release(1)