Example #1
0
 def __call__(
     self,
     key,
     f,
     time_expire = DEFAULT_TIME_EXPIRE,
     ):
     dt = time_expire
     locker = open(self.locker_name,'a')
     portalocker.lock(locker, portalocker.LOCK_EX)
     storage = shelve.open(self.shelve_name)
     item = storage.get(key, None)
     if item and f == None:
         del storage[key]
     portalocker.unlock(locker)
     locker.close()
     if f is None:
         return None
     if item and (dt == None or item[0] > time.time() - dt):
         return item[1]
     value = f()
     locker = open(self.locker_name,'a')
     portalocker.lock(locker, portalocker.LOCK_EX)
     storage[key] = (time.time(), value)
     storage.sync()
     portalocker.unlock(locker)
     locker.close()
     return value
Example #2
0
 def _open_shelve_and_lock(self):
     """Open and return a shelf object, obtaining an exclusive lock
     on self.locker first. Replaces the close method of the
     returned shelf instance with one that releases the lock upon
     closing."""
     
     storage = None
     locker = None
     locked = False
     try:
         locker = locker = open(self.locker_name, 'a')
         portalocker.lock(locker, portalocker.LOCK_EX)
         locked = True
         try:
             storage = shelve.open(self.shelve_name)
         except:
             logger.error('corrupted cache file %s, will try rebuild it' \
                              % (self.shelve_name))
             storage = None
         if not storage and os.path.exists(self.shelve_name):
             os.unlink(self.shelve_name)
             storage = shelve.open(self.shelve_name)
         if not CacheAbstract.cache_stats_name in storage.keys():
             storage[CacheAbstract.cache_stats_name] = {'hit_total':0, 'misses': 0}
         storage.sync()
     except Exception, e:
         if storage:
             storage.close()
             storage = None
         if locked:
             portalocker.unlock(locker)
             locker.close()
         locked = False
         raise RuntimeError, 'unable to create/re-create cache file %s' % self.shelve_name
Example #3
0
def upgradeConfig(server):  # 更新配置文件
    _processed = False
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        old_data = yaml.safe_load(f)
    data = yaml.safe_load(default_config)
    data['config'].update(old_data['config'])
    if 'permission' in old_data:
        data['permission'].update(old_data['permission'])
    from_config_version = old_data['config_version']
    if from_config_version in range(1, 3 + 1):
        data['config']['player_id_type'] = 'name'
        _processed = True
    if from_config_version == 4:
        if 'detect_player_by' in old_data['config']:
            del data['config']['detect_player_by']
        data['config']['player_id_type'] = 'name'
        _processed = True
    if from_config_version == 5:
        _processed = True
    if _processed == True:
        data['config_version'] = config_version
        server.logger.info(
            f"Upgrading Configuration ({from_config_version} -> {config_version})"
        )
        with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
                  'w',
                  encoding='utf8') as f:
            portalocker.lock(f, portalocker.LOCK_EX)
            yaml.dump(data, f, indent=4, sort_keys=False)
Example #4
0
def test_simple():
    fh = open('tests/test_file.txt', 'r+')
    portalocker.lock(fh, portalocker.LOCK_EX)
    fh.seek(12)
    fh.write('foo')
    portalocker.unlock(fh)
    fh.close()
Example #5
0
def getPermissionList(userlevel=None,
                      usergroup=None,
                      recursion=True):  # 获取用户组可用权限列表
    if userlevel != None:
        usergroup = valid_usergroups[userlevel]
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = yaml.safe_load(f)
    if not usergroup in data['permission'].keys():
        return 'invalid_usergroup'
    permission_list = data['permission'][usergroup]
    if recursion is True:
        if 'all' in data['permission'][usergroup]:
            return valid_permissions
        inheritance_usergroups = list(
            set(data['permission'].keys()).intersection(set(permission_list)))
        if inheritance_usergroups != None:
            for i in range(len(inheritance_usergroups)):
                inheritance_usergroup = inheritance_usergroups[i]
                permission_list.pop(
                    permission_list.index(inheritance_usergroup))
                permission_list.extend(
                    getPermissionList(usergroup=inheritance_usergroup))
    return list(set(permission_list))
Example #6
0
def readHomeList():  # 读家园传送点列表
    with open(f"config/{PLUGIN_METADATA['name']}/homes.json",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = json.load(f)
    return data
    def _try_store_on_disk(self, request, response):

        # don't save if sessions not not file-based
        if response.session_db:
            return

        # don't save if no change to session
        __hash = self.__hash
        if __hash is not None:
            del self.__hash
            if __hash == hashlib.md5(str(self)).digest():
                self._close(response)
                return

        if not response.session_id or self._forget:
            self._close(response)
            return

        if response.session_new:
            # Tests if the session sub-folder exists, if not, create it
            session_folder = os.path.dirname(response.session_filename)
            if not os.path.exists(session_folder):
                os.mkdir(session_folder)
            response.session_file = open(response.session_filename, 'wb')
            portalocker.lock(response.session_file, portalocker.LOCK_EX)
            response.session_locked = True

        if response.session_file:
            cPickle.dump(dict(self), response.session_file)
            response.session_file.truncate()
            self._close(response)
Example #8
0
    def from_conf(cls, path=None, **overrides):
        '''Initialize instance from YAML configuration file,
            writing updates (only to keys, specified by "conf_update_keys") back to it.'''
        import yaml

        if path is None:
            path = cls.conf_path_default
            log.debug('Using default state-file path: {}'.format(path))
        path = os.path.expanduser(path)
        with open(path, 'r') as src:
            portalocker.lock(src, portalocker.LOCK_SH)
            # fcntl.lockf(src, fcntl.LOCK_SH)
            conf = yaml.load(src.read())
            portalocker.unlock(src)
        conf.setdefault('conf_save', path)

        conf_cls = dict()
        for ns, keys in cls.conf_update_keys.viewitems():
            for k in keys:
                try:
                    v = conf.get(ns, dict()).get(k)
                except AttributeError:
                    if not cls.conf_raise_structure_errors: raise
                    raise KeyError('Unable to get value for configuration parameter'
                                   ' "{k}" in section "{ns}", check configuration file (path: {path}) syntax'
                                   ' near the aforementioned section/value.'.format(ns=ns, k=k, path=path))
                if v is not None:
                    conf_cls['{}_{}'.format(ns, k)] = conf[ns][k]
        conf_cls.update(overrides)

        self = cls(**conf_cls)
        self.conf_save = conf['conf_save']
        return self
Example #9
0
def test():
    fileToHash = open('/Users/patrickcusack/Documents/Rebuild.MOV', 'r')
    print('Hasher: locking file...')
    portalocker.lock(fileToHash, portalocker.LOCK_SH)
    sleep(20)
    print('Hasher: unlocking file...')
    fileToHash.close()
Example #10
0
def load_json_or_empty(path, default=None, kind=None, lock=False):
    """
    Load the contents of the given file as a JSON and return it's value,
    or default if the file can't be loaded.
    """

    ret = default
    try:
        with open(path, 'r', encoding='utf-8', errors='ignore') as handle:
            if lock:
                portalocker.lock(handle, portalocker.LOCK_SH)

            ret = json.loads(handle.read())

            if lock:
                portalocker.unlock(handle)
    except IOError as ex:
        LOG.warning("Failed to open %s file: %s", kind if kind else 'json',
                    path)
        LOG.warning(ex)
    except OSError as ex:
        LOG.warning("Failed to open %s file: %s", kind if kind else 'json',
                    path)
        LOG.warning(ex)
    except ValueError as ex:
        LOG.warning("'%s' is not a valid %s file.", kind if kind else 'json',
                    path)
        LOG.warning(ex)
    except TypeError as ex:
        LOG.warning('Failed to process json file: %s', path)
        LOG.warning(ex)

    return ret
Example #11
0
def preprocess(dbPath):
    '''
	This is a preprocess module
	'''
    logging = DefaultLogger()

    if not os.path.exists(dbPath):
        logging.debug('PreProcess: can\'t find database at path')
        return

    datastore = DataStore(dbPath)
    loopcount = 0

    while True:
        sleep(5)

        if loopcount % 10 == 0:
            logging.debug('PreProcess is alive')
        loopcount += 1

        data = datastore.recordsForHashing()
        for record in data:
            logging.debug(record)

            key_id = record.id
            filePath = record.fileName

            if not os.path.exists(filePath):
                logging.debug(
                    'PreProcess: Will update record status as the file no longer exists'
                )
                datastore.updateRecordAsMissingWithID(key_id)
                continue

            try:
                logging.debug('PreProcess: locking file to calculate hash...')
                ##UPDATE HASH OPERATION START HERE
                startTime = datetime.datetime.now().strftime(
                    "%Y-%m-%d %H:%M:%S")
                datastore.updateRecordWithHashStart(startTime, key_id)

                fileToHash = open(filePath, 'rb')
                portalocker.lock(fileToHash, portalocker.LOCK_SH)
                hashString = "NO_OP"  #hashForFile(fileToHash)
                endTime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                fileToHash.close()

                logging.debug('PreProcess: unlocking file...')
                logging.debug(
                    'PreProcess: Will update record status with Hash string and times'
                )

                datastore.updateRecordWithHashForStartTimeAndEndTime(
                    hashString, startTime, endTime, key_id)

            except Exception as e:
                info = 'PreProcess: There was an error when calculating the hash for file: ' + os.path.basename(
                    filePath) + ' ' + e.message
                sendFailureEmail(info)
                logging.error(e.message)
Example #12
0
    def acquire(self) -> None:
        """
        Claim ownership of the PID file.

        :raises ~portalocker.LockException:
            If locking failed.
            For example, if already :meth:`locked`.
        """
        # Need the parent directory to exist before making the file.
        self._lock_path.parent.mkdir(parents=True, exist_ok=True)
        # Open with "append" which creates the file if missing.
        file_handle = open(  # pylint: disable=consider-using-with
            self._lock_path, "a")
        try:
            portalocker.lock(file_handle,
                             portalocker.LOCK_EX | portalocker.LOCK_NB)
        except portalocker.LockException:
            file_handle.close()
            raise
        # Acquiring the lock should not be possible
        # if the script already has a lock on it.
        # But checking anyway for logic issues.
        assert self._file_handle is None
        # Store the handle to close later.
        self._file_handle = file_handle
        # Write in the PID.
        file_handle.seek(0)
        file_handle.truncate()
        file_handle.writelines([str(os.getpid())])
        file_handle.flush()
Example #13
0
def check_sh_is_running(pidfile):
    """
    This method checks whether another smarthome process process is already running.

    :param pidfile: Name of the pidfile to check
    :type pidfile: str

    :return: True: if SmartHomeNG is running, False: if SmartHome is not running
    :rtype: bool
    """

    pid = read_pidfile(pidfile)
    #print("daemon.check_sh_is_running: pidfile={}, pid={}, psutil.pid_exists(pid)={}".format(pidfile, pid, psutil.pid_exists(pid)))
    isRunning = False
    if pid > 0 and psutil.pid_exists(pid):
        #print("daemon.check_sh_is_running: pid={}, psutil.pid_exists(pid)={}".format(pid, psutil.pid_exists(pid)))
        try:
            fh = open(pidfile, 'r')
            # LOCK_EX - acquire an exclusive lock
            # LOCK_NB - non blocking
            portalocker.lock(fh, portalocker.LOCK_EX | portalocker.LOCK_NB)
            print("daemon.check_sh_is_running: portalocker.lock erfolgreich")
            # pidfile not locked, so sh is terminated
        except portalocker.LockException:
            isRunning = True
        finally:
            if fh:
                fh.close()
    return isRunning
Example #14
0
	def __getCachedData(self,key):

		fname = self.__fname(key)

		# Pre-existing locked read
		if(self.locked.get(key)):
			self.locked[key].seek(0)
			try: str = pickle.load(self.locked[key])
			except: str = False
			self.locked[key].seek(0)
			return str
		

		fp=open(fname, "r")
		self.open[key] = fp
		portalocker.lock(fp,portalocker.LOCK_SH)
		
		# The following 2 lines handle cases where open (above) was called
		# on an empty file that was created by cache::lock()
		fp.seek(0)
		
		try: str = pickle.load(fp)
		except: str = False
		
		try: portalocker.unlock(fp)
		except: print "Cache error unlocking file with key " + key
		try: fp.close()
		except: print "Cache error closing file with key " + key
			
		del self.open[key]
		return str
Example #15
0
 def get(self, count: int = 1, readOnly: bool = False, recurlvl=0):
     with open(self.FILE, "r+") as file:
         portalocker.lock(file, portalocker.LOCK_EX)
         ports = []
         while len(ports) < count:
             file.seek(0)
             port = int(file.readline())
             if readOnly:
                 return port
             port += 1
             if port > self.maxPort:
                 port = self.minPort
             file.seek(0)
             file.write(str(port))
             try:
                 checkPortAvailable(("", port))
                 ports.append(port)
                 self.logger.debug("new port dispensed: {}".format(port))
             except:
                 if recurlvl < self.maxportretries:
                     self.logger.debug(
                         "port {} unavailable, trying again...".format(
                             port))
                     recurlvl += 1
                 else:
                     self.logger.debug(
                         "port {} unavailable, max retries {} "
                         "reached".format(port, self.maxportretries))
                     raise
         return ports
Example #16
0
    def _write_to_file(self, params, write_callback):
        full_dir = res_template(self.dir_template, params)
        dir_key = self.get_dir_key(params)

        result = self.fh_cache.get(dir_key)

        close_file = False

        new_size = start = 0

        if result:
            out, filename = result
            is_new = False
        else:
            filename = self.get_new_filename(full_dir, params)

            if not self.allow_new_file(filename, params):
                return False

            out = self._open_file(filename, params)

            is_new = True

        try:
            start = out.tell()

            write_callback(out, filename)

            out.flush()

            new_size = out.tell()

            out.seek(start)

            if self.dedup_index:
                self.dedup_index.add_urls_to_index(out, params,
                                                   filename,
                                                   new_size - start)

            return True

        except Exception as e:
            traceback.print_exc()
            close_file = True
            return False

        finally:
            # check for rollover
            if self.max_size and new_size > self.max_size:
                close_file = True

            if close_file:
                self._close_file(out)
                if not is_new:
                    self.fh_cache.pop(dir_key, None)

            elif is_new:
                if os.name != 'nt':
                    portalocker.lock(out, portalocker.LOCK_EX | portalocker.LOCK_NB)
                self.fh_cache[dir_key] = (out, filename)
    def read_delete_preprocess(self, key, db_path):
        datastore = path.join(db_path, DEFAULT_DB_NAME)

        # Check for datastore existance.
        if not path.isfile(datastore):
            return False, "Empty DataStore. Data not found for the key."

        # Read previous datastore data if exists.
        with open(datastore) as f:
            # Make sure single process only allowed to access the file at a time.
            # Locking file.
            portalocker.lock(f, portalocker.LOCK_EX)
            data = json.load(f)
            # Releasing the file lock.
            portalocker.lock(f, portalocker.LOCK_UN)

        # Check for the input key available in data.
        if key not in data.keys():
            return False, "No data found for the key provided."

        # Check for the data for the key is active or inactive.
        target = data[key]
        target_active = self.check_time_to_live(target)
        if not target_active:
            return False, "Requested data is expired for the key."

        return True, data
Example #18
0
def getUsergroups():  # 获取存在的用户组
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = yaml.safe_load(f)
    return list(data['permission'].keys())
Example #19
0
def __rewrite_instance_file(append, remove, folder=None):
    """
    This helper method reads the user's instance descriptor and manages it
    eliminating dead records, appending new ones and re-serialising the file.
    """
    __make_instance_descriptor_file(folder)

    append_pids = [i['pid'] for i in append]

    # After reading, check every instance if they are still valid and
    # make sure PID does not collide accidentally with the
    # to-be-registered instances, if any exists in the append list as it
    # would cause duplication.
    #
    # Also, we remove the records to the given PIDs, if any exists.
    instances = [i for i in get_instances(folder)
                 if i['pid'] not in append_pids and
                 (i['hostname'] + ":" + str(i['pid'])) not in remove]

    with open(__get_instance_descriptor_path(folder), 'w') as instance_file:
        portalocker.lock(instance_file, portalocker.LOCK_EX)

        instances = instances + append

        instance_file.seek(0)
        instance_file.truncate()
        json.dump(instances, instance_file, indent=2)
        portalocker.unlock(instance_file)
Example #20
0
def readReqList():  # 读请求队列
    with open(f"config/{PLUGIN_METADATA['name']}/requests.json",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = json.load(f)
    return data
Example #21
0
    def _try_store_in_file(self, request, response):
        try:
            if (not response.session_id or self._forget
                    or self._unchanged(response)):
                # self.clear_session_cookies()
                self.save_session_id_cookie()
                return False
            if response.session_new or not response.session_file:
                # Tests if the session sub-folder exists, if not, create it
                session_folder = os.path.dirname(response.session_filename)
                if not os.path.exists(session_folder):
                    os.mkdir(session_folder)
                response.session_file = recfile.open(response.session_filename,
                                                     'wb')
                portalocker.lock(response.session_file, portalocker.LOCK_EX)
                response.session_locked = True
            if response.session_file:
                session_pickled = response.session_pickled or pickle.dumps(
                    self, pickle.HIGHEST_PROTOCOL)
                response.session_file.write(session_pickled)
                response.session_file.truncate()
        finally:
            self._close(response)

        self.save_session_id_cookie()
        return True
Example #22
0
def readLastTpPosList():  # 读回溯传送队列
    with open(f"config/{PLUGIN_METADATA['name']}/lastPos.json",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = json.load(f)
    return data
Example #23
0
 def lock(self):
     if not self.locked:
         portalocker.lock(self._f, portalocker.LOCK_EX)
         self.locked = True
         return True
     else:
         return False
Example #24
0
def __rewriteInstanceFile(append, remove, folder=None):
    """This helper method reads the user's instance descriptor and manages it
    eliminating dead records, appending new ones and reserialising the file."""

    __makeInstanceDescriptorFile(folder)
    with open(__getInstanceDescriptorPath(folder), 'r+') as f:
        portalocker.lock(f, portalocker.LOCK_EX)

        # After reading, check every instance if they are still valid and
        # make sure PID does not collide accidentally with the
        # to-be-registered instances, if any exists in the append list as it
        # would cause duplication.
        #
        # Also, we remove the records to the given PIDs, if any exists.
        append_pids = [i['pid'] for i in append]
        instances = [
            i for i in json.load(f)
            if i['pid'] not in append_pids and (i['hostname'] + ":" +
                                                str(i['pid'])) not in remove
            and __checkInstance(i['hostname'], i['pid'])
        ]

        instances = instances + append

        f.seek(0)
        f.truncate()
        json.dump(instances, f, indent=2)
        portalocker.unlock(f)
Example #25
0
def findT(path, language='en-us'):
    """
    must be run by the admin app
    """
    filename = os.path.join(path, 'languages', '%s.py' % language)
    sentences = read_dict(filename)
    mp = os.path.join(path, 'models')
    cp = os.path.join(path, 'controllers')
    vp = os.path.join(path, 'views')
    for file in listdir(mp, '.+\.py', 0) + listdir(cp, '.+\.py', 0)\
         + listdir(vp, '.+\.html', 0):
        fp = open(file, 'r')
        portalocker.lock(fp, portalocker.LOCK_SH)
        data = fp.read()
        portalocker.unlock(fp)
        fp.close()
        items = regex_translate.findall(data)
        for item in items:
            try:
                message = eval(item)
                if not message.startswith('#') and not '\n' in message:
                    tokens = message.rsplit('##', 1)
                else:
                    # this allows markmin syntax in translations
                    tokens = [message]
                if len(tokens) == 2:
                    message = tokens[0].strip() + '##' + tokens[1].strip()
                if message and not message in sentences:
                    sentences[message] = message
            except:
                pass
    write_dict(filename, sentences)
def parseNewWorm(wormID, name):
	global worms

	name = name.replace("\t", " ").strip() # Do not allow tab in names, it will screw up our ranking tab-separated text-file database
	exists = False
	try:
		worm = worms[wormID]
		exists = True
	except KeyError: #Worm doesn't exist.
		worm = Worm()
	worm.Name = name
	worm.iID = wormID
	worm.Ping = []

	worms[wormID] = worm

	if io.getGameType() == "Hide and Seek":
		minSeekers = 1
		if len(worms.values()) >= 4: minSeekers = 2
		if io.getNumberWormsInTeam(1) < minSeekers:
			io.setWormTeam(wormID, 1) # Seeker
		else:
			io.setWormTeam(wormID, 0) # Hider		
	else:
		# Balance teams
		teams = [0,0,0,0]
		for w in worms.keys():
			teams[worms[w].Team] += 1
		minTeam = 0
		minTeamCount = teams[0]
		for f in range(cfg.MAX_TEAMS):
			if minTeamCount > teams[f]:
				minTeamCount = teams[f]
				minTeam = f

		io.setWormTeam(wormID, minTeam)

	if cfg.RANKING_AUTHENTICATION:
		if not name in ranking.auth:
			ranking.auth[name] = getWormSkin(wormID)
			try:
				f = open(io.getFullFileName("pwn0meter_auth.txt"),"r")
				try:
					portalocker.lock(f, portalocker.LOCK_EX)
				except:
					pass
				f.write( name + "\t" + str(ranking.auth[name][0]) + " " + ranking.auth[name][1] + "\n" )
				f.close()
			except IOError:
				msg("ERROR: Unable to open pwn0meter_auth.txt")
		else:
			if ranking.auth[name] != getWormSkin(wormID):
				io.kickWorm(wormID, "Player with name %s already registered" % name)

	wormIP = io.getWormIP(wormID).split(":")[0]
	# io.messageLog("Curtime " + str(time.time()) + " IP " + str(wormIP) + " Kicked worms: " + str(cmds.kickedUsers), io.LOG_INFO)
	if wormIP in cmds.kickedUsers and cmds.kickedUsers[ wormIP ] > time.time():
			io.kickWorm( wormID, "You can join in " + str(int(cmds.kickedUsers[ wormIP ] - time.time())/60 + 1) + " minutes" )
			return
	cmds.recheckVote()
Example #27
0
def updateparallelprogress(added_value):
    Done = False
    while (not Done):
        try:
            Done = True
            with open(glob.tmpfile, mode='r+') as f:
                portalocker.lock(f, portalocker.LOCK_EX)
                data = f.read().split(" ")
                parallel_total = float(data[0])
                log_every_percentage = float(data[1])
                current_parallel_progress = float(data[2])

                last_log = math.floor(
                    100 * current_parallel_progress / parallel_total /
                    log_every_percentage) * log_every_percentage
                current_parallel_progress += added_value
                new_log = math.floor(
                    100 * current_parallel_progress / parallel_total /
                    log_every_percentage) * log_every_percentage

                #for i in np.arange(last_log+log_every_percentage, new_log + log_every_percentage, log_every_percentage):
                #    log('{0}%'.format(int(i)))
                if new_log > last_log:
                    log('{0}%'.format(int(new_log)))

                f.seek(0)
                f.write('{0} {1} {2}'.format(parallel_total,
                                             log_every_percentage,
                                             current_parallel_progress))
                f.truncate()
        except:
            Done = False
Example #28
0
def portalock_open(myfilename):
    myfile = open(myfilename,'r+') # open in rw mode, never write mode
    ## both LOCK_EX and LOCK_SH wait indefinitely for lock to get acquired
    ## i.e. if other process has locked file, wait indefinitely
    ## till lock is released by other process
    portalocker.lock(myfile,portalocker.LOCK_EX) # try to acquire lock
    return myfile
Example #29
0
def getConfigKeyList():  # 获取配置键列表
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = yaml.safe_load(f)
    return data['config'].keys()
Example #30
0
def write_pidfile(pid, pidfile):
    """
    This method writes the PID to the pidfile and locks it while the process is running.

    :param pid: PID of SmartHomeNG
    :param pidfile: Name of the pidfile to write to
    :type pid: int
    :type pidfile: str
    """
    if os.name == 'nt':
        return

    with open(pidfile, 'w+') as fh:
        fh.write("%s" % pid)

    global _pidfile_handle
    try:
        _pidfile_handle = open(pidfile, 'r')
        #print(f"_pidfile_handle = '{_pidfile_handle}'")
        # LOCK_EX - acquire an exclusive lock
        # LOCK_NB - non blocking
        portalocker.lock(_pidfile_handle, portalocker.LOCK_EX | portalocker.LOCK_NB)
    # don't close _pidfile_handle or lock is gone!!!
    except portalocker.AlreadyLocked as e:
        print("Could not lock pid file: %d (%s)" % (e.errno, e.strerror) , file=sys.stderr)
Example #31
0
def updateConfigKey(keyname, value):  # 更新配置键
    def isDigit(meow):
        try:
            int(meow)
            return True
        except ValueError:
            return False

    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_SH)
        data = yaml.safe_load(f)
    default_data = yaml.safe_load(default_config)
    if keyname in default_data['config'].keys():
        if isDigit(value.lower()) and isinstance(
                default_data['config'][keyname], int):
            data['config'][keyname] = int(value)
        elif value.lower() == 'true' and isinstance(
                default_data['config'][keyname], bool):
            data['config'][keyname] = True
        elif value.lower() == 'false' and isinstance(
                default_data['config'][keyname], bool):
            data['config'][keyname] = False
        elif isinstance(default_data['config'][keyname], str):
            data['config'][keyname] = value
        else:
            return 'type_error'
        with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
                  'w',
                  encoding='utf8') as f:
            portalocker.lock(f, portalocker.LOCK_EX)
            yaml.dump(data, f, indent=4, sort_keys=False)
        return 'succeed'
    return 'unknown_key'
Example #32
0
	def lock(self,key):

		fname = self.__fname(key)

		prelock = int(self.__filemtime(fname))

		fp = open(fname, "ab")
		
		self.open[key] = fp

		os.chmod(fname, 0600)

		portalocker.lock(fp,portalocker.LOCK_EX)


		# Written while blocking ?
		if(prelock > 0 and self.__filemtime(fname) > prelock):
			if(self.logfile): self.__log(key+" prelock: " + str(prelock) + " postlock: "+str(self.__filemtime(fname))+" NOT writing.")
			self.unlock(key)
			del self.open[key]
			return None
		

		if(self.logfile):
			 self.__log(key+" prelock: " + str(prelock) + " postlock: "+ str(self.__filemtime(fname)) +" writing.")

		self.locked[key] = fp

		return True
Example #33
0
def updatePermissionList(usergroup,
                         permission_list,
                         add=False,
                         remove=False):  # 更新权限列表
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'r',
              encoding='utf8') as f:
        portalocker.lock(f, portalocker.LOCK_EX)
        data = yaml.safe_load(f)
    local_valid_permissions = valid_permissions + list(
        data['permission'].keys()) + ['all']
    if remove is True and not set(permission_list).issubset(
            set(data['permission'][usergroup])):
        return 'invalid_permissions'
    elif not remove is True and not set(permission_list).issubset(
            set(local_valid_permissions)):
        return 'invalid_permissions'
    if remove is True and set(permission_list).issubset(
            set(data['permission'][usergroup])):
        permission_list = list(
            set(data['permission'][usergroup]) - set(permission_list))
    if not usergroup in data['permission'].keys():
        data['permission'][usergroup] = {}
    if add is True:
        permission_list = data['permission'][usergroup] + permission_list
    data['permission'][usergroup] = list(set(permission_list))
    with open(f"config/{PLUGIN_METADATA['name']}/config.yaml",
              'w',
              encoding='utf8') as f:
        yaml.dump(data, f, indent=4, sort_keys=False)
    return 'succeed'
Example #34
0
    def acquire(self):
        """ Acquire thread and file locks.  Re-opening log for 'degraded' mode.
        """
        # handle thread lock
        if Handler:
            # under some tests Handler ends up being null due to instantiation
            # order
            Handler.acquire(self)

        # Issue a file lock.  (This is inefficient for multiple active threads
        # within a single process. But if you're worried about high-performance,
        # you probably aren't using this log handler.)
        if self.stream_lock:
            # If stream_lock=None, then assume close() was called or something
            # else weird and ignore all file-level locks.
            if self.stream_lock.closed:
                # Daemonization can close all open file descriptors, see
                # https://bugzilla.redhat.com/show_bug.cgi?id=952929
                # Try opening the lock file again.  Should we warn() here?!?
                try:
                    self._open_lockfile()
                except Exception:
                    self.handleError(NullLogRecord())
                    # Don't try to open the stream lock again
                    self.stream_lock = None
                    return
            lock(self.stream_lock, LOCK_EX)
Example #35
0
def load_module(name, code=None, name_path=""):
    # http://stackoverflow.com/a/30407477/5288758
    try:
        import importlib
    except ImportError:
        import imp as importlib

    if code is not None:
        try:
            # Try and create/open the file only if it doesn't exist.
            fd = os.open(name, os.O_CREAT | os.O_EXCL | os.O_WRONLY)

            # Lock the file exclusively to notify other processes we're still writing
            portalocker.lock(fd, portalocker.LOCK_EX)  # fcntl.flock(fd, fcntl.LOCK_EX)
            with os.fdopen(fd, 'w') as f:
                f.write(code)

        except OSError as e:
            # If the error wasn't EEXIST we should raise it.
            if e.errno != errno.EEXIST:
                raise

    # The file existed, so let's open it for reading and then try and
    # lock it. This will block on the LOCK_EX above if it's held by
    # the writing process.
    with open(name, "r") as f:
        portalocker.lock(f, portalocker.LOCK_EX)  # fcntl.flock(f, fcntl.LOCK_EX)

    return importlib.import_module(name, name_path)
Example #36
0
def findT(path, language='en-us'):
    """
    must be run by the admin app
    """
    filename = os.path.join(path, 'languages', '%s.py' % language)
    sentences = read_dict(filename)
    mp = os.path.join(path, 'models')
    cp = os.path.join(path, 'controllers')
    vp = os.path.join(path, 'views')
    for file in listdir(mp, '.+\.py', 0) + listdir(cp, '.+\.py', 0)\
         + listdir(vp, '.+\.html', 0):
        fp = open(file, 'r')
        portalocker.lock(fp, portalocker.LOCK_SH)
        data = fp.read()
        portalocker.unlock(fp)
        fp.close()
        items = regex_translate.findall(data)
        for item in items:
            try:
                msg = eval(item)
                if msg and not msg in sentences:
                    sentences[msg] = msg
            except:
                pass
    write_dict(filename, sentences)
Example #37
0
def test():
	fileToHash = open('/Users/patrickcusack/Documents/Rebuild.MOV', 'r')
	print('Hasher: locking file...')
	portalocker.lock(fileToHash, portalocker.LOCK_SH)
	sleep(20)
	print('Hasher: unlocking file...')
	fileToHash.close()
Example #38
0
    def acquire( self, wait = 5 * 60, waitInterval = 5, expire = 10 * 60 ):
        waitTotal = 0
        while ( self.checkValidLock() ):
            if ( wait is None ):
                raise Exception( 'lock is busy' )
            else:
                if ( wait != 0 ):
                    waitTotal += waitInterval
                    if ( self.debug ):
                        print( 'waitTotal: %d wait: %d waitInterval: %d' % ( waitTotal, wait, waitInterval ) )
                    if ( waitTotal > wait ):
                        raise Exception( 'exceeded max wait time on the lock' )
                    time.sleep( waitInterval )

        # don't want blocking on acquired locks - even with the loop, there is still a possibility of stolen lock and exception here
        self.handle = file( self.lockfile, 'w' )
        portalocker.lock( self.handle, portalocker.LOCK_EX | portalocker.LOCK_NB )
        if ( self.debug ):
            print( 'acquired lock %s' % self.lockfile )
        pickle.dump( os.getpid(), self.handle )
        if ( expire is None ):
            expire_time = None
        else:
            expire_time = datetime.datetime.now()
            expire_time += datetime.timedelta( seconds = expire )
        pickle.dump( expire_time, self.handle )
        pickle.dump( self.lockinfo, self.handle )
        self.handle.flush()
Example #39
0
def findT(path, language='en-us'):
    """
    must be run by the admin app
    """
    filename = os.path.join(path, 'languages', '%s.py' % language)
    sentences = read_dict(filename)
    mp = os.path.join(path, 'models')
    cp = os.path.join(path, 'controllers')
    vp = os.path.join(path, 'views')
    for file in listdir(mp, '.+\.py', 0) + listdir(cp, '.+\.py', 0)\
         + listdir(vp, '.+\.html', 0):
        fp = open(file, 'r')
        portalocker.lock(fp, portalocker.LOCK_SH)
        data = fp.read()
        portalocker.unlock(fp)
        fp.close()
        items = regex_translate.findall(data)
        for item in items:
            try:
                message = eval(item)
                if not message.startswith('#') and not '\n' in message:
                    tokens = message.rsplit('##', 1)
                else:
                    # this allows markmin syntax in translations
                    tokens = [message]
                if len(tokens) == 2:
                    message = tokens[0].strip() + '##' + tokens[1].strip()
                if message and not message in sentences:
                    sentences[message] = message
            except:
                pass
    write_dict(filename, sentences)
Example #40
0
def findT(path, language="en-us"):
    """
    must be run by the admin app
    """
    filename = os.path.join(path, "languages", "%s.py" % language)
    sentences = read_dict(filename)
    mp = os.path.join(path, "models")
    cp = os.path.join(path, "controllers")
    vp = os.path.join(path, "views")
    for file in listdir(mp, ".+\.py", 0) + listdir(cp, ".+\.py", 0) + listdir(vp, ".+\.html", 0):
        fp = open(file, "r")
        portalocker.lock(fp, portalocker.LOCK_SH)
        data = fp.read()
        portalocker.unlock(fp)
        fp.close()
        items = regex_translate.findall(data)
        for item in items:
            try:
                message = eval(item)
                if not message.startswith("#") and not "\n" in message:
                    tokens = message.rsplit("##", 1)
                else:
                    # this allows markmin syntax in translations
                    tokens = [message]
                if len(tokens) == 2:
                    message = tokens[0].strip() + "##" + tokens[1].strip()
                if message and not message in sentences:
                    sentences[message] = message
            except:
                pass
    write_dict(filename, sentences)
Example #41
0
    def __init__(self, request, folder=None):
        self.request = request

        # Lets test if the cache folder exists, if not
        # we are going to create it
        folder = folder or os.path.join(request.folder, "cache")

        if not os.path.exists(folder):
            os.mkdir(folder)

        ### we need this because of a possible bug in shelve that may
        ### or may not lock
        self.locker_name = os.path.join(request.folder, "cache/cache.lock")
        self.shelve_name = os.path.join(request.folder, "cache/cache.shelve")

        locker, locker_locked = None, False
        try:
            locker = open(self.locker_name, "a")
            portalocker.lock(locker, portalocker.LOCK_EX)
            locker_locked = True
            storage = shelve.open(self.shelve_name)

            if not storage.has_key(CacheAbstract.cache_stats_name):
                storage[CacheAbstract.cache_stats_name] = {"hit_total": 0, "misses": 0}
                storage.sync()
        except ImportError:
            pass  # no module _bsddb, ignoring exception now so it makes a ticket only if used
        except:
            logger.error("corrupted file: %s" % self.shelve_name)
        if locker_locked:
            portalocker.unlock(locker)
        if locker:
            locker.close()
Example #42
0
 def lock(self):
     if not self.locked:
         portalocker.lock(self._f, portalocker.LOCK_EX)
         self.locked = True
         return True
     else:
         return False
Example #43
0
    def __init__(self, prefix = "omero"):
        """
        Initializes a TempFileManager instance with a userDir containing
        the given prefix value, or "omero" by default. Also registers
        an atexit callback to call self.cleanup() on exit.
        """
        self.logger = logging.getLogger("omero.util.TempFileManager")
        self.is_win32 = ( sys.platform == "win32" )
        self.prefix = prefix

        self.userdir = self.tmpdir() / ("%s_%s" % (self.prefix, self.username()))
        """
        User-accessible directory of the form $TMPDIR/omero_$USERNAME.
        If the given directory is not writable, an attempt is made
        to use an alternative
        """
        if not self.create(self.userdir) and not self.access(self.userdir):
            i = 0
            while i < 10:
                t = path("%s_%s" % (self.userdir, i))
                if self.create(t) or self.access(t):
                    self.userdir = t
                    break
            raise Exception("Failed to create temporary directory: %s" % self.userdir)
        self.dir = self.userdir / self.pid()
        """
        Directory under which all temporary files and folders will be created.
        An attempt to remove a path not in this directory will lead to an
        exception.
        """

        # Now create the directory. If a later step throws an
        # exception, we should try to rollback this change.
        if not self.dir.exists():
            self.dir.makedirs()
        self.logger.debug("Using temp dir: %s" % self.dir)

        self.lock = None
        try:
            self.lock = open(str(self.dir / ".lock"), "a+")
            """
            .lock file under self.dir which is used to prevent other
            TempFileManager instances (also in other languages) from
            cleaning up this directory.
            """
            try:
                portalocker.lock(self.lock, portalocker.LOCK_EX|portalocker.LOCK_NB)
                atexit.register(self.cleanup)
            except:
                lock = self.lock
                self.lock = None
                if lock:
                    self.lock.close()
                raise
        finally:
            try:
                if not self.lock:
                    self.cleanup()
            except:
                self.logger.warn("Error on cleanup after error", exc_info = True)
Example #44
0
 def acquire(self):
     """ Acquire thread and file locks.  Re-opening log for 'degraded' mode.
     """
     # handle thread lock
     Handler.acquire(self)
     # Issue a file lock.  (This is inefficient for multiple active threads
     # within a single process. But if you're worried about high-performance,
     # you probably aren't using this log handler.)
     if self.stream_lock:
         # If stream_lock=None, then assume close() was called or something
         # else weird and ignore all file-level locks.
         if self.stream_lock.closed:
             # Daemonization can close all open file descriptors, see
             # https://bugzilla.redhat.com/show_bug.cgi?id=952929
             # Try opening the lock file again.  Should we warn() here?!?
             try:
                 self._open_lockfile()
             except Exception:
                 self.handleError(NullLogRecord())
                 # Don't try to open the stream lock again
                 self.stream_lock = None
                 return
         lock(self.stream_lock, LOCK_EX)
         # 0.9.1a: Here we got file lock
         if not os.path.isfile(self.baseFilename):
             # file not exists:
             self._close()
             self.stream = self._open()
Example #45
0
def _acquire_file_lock(lock_file_path):
    """Acquires an exclusive lock on the supplied file.

    :param lock_file_path: Path to the lock file
    :type lock_file_path: str
    :returns: Lock file
    :rtype: File
    """

    try:
        lock_file = open(lock_file_path, 'w')
    except IOError as e:
        logger.exception('Failed to open lock file: %s', lock_file_path)

        raise util.io_exception(lock_file_path, e.errno)

    acquire_mode = portalocker.LOCK_EX | portalocker.LOCK_NB

    try:
        portalocker.lock(lock_file, acquire_mode)
        return lock_file
    except portalocker.LockException:
        logger.exception(
            'Failure while tring to aquire file lock: %s',
            lock_file_path)

        lock_file.close()
        raise DCOSException('Unable to acquire the package cache lock')
Example #46
0
def set_psk(server_ip_address):
    psk = binascii.hexlify(os.urandom(psi_config.IPSEC_PSK_LENGTH))
    try:
        file = open(psi_config.IPSEC_SECRETS_FILENAME, 'r+')
        portalocker.lock(file, portalocker.LOCK_EX)
        lines = file.readlines()
        newline = '%s : PSK "%s"\n' % (server_ip_address, psk)
        newlines = []
        found = False
        for line in lines:
            if line.find(server_ip_address) == 0:
                newlines.append(newline)
                found = True
            else:
                newlines.append(line)
        if not found:
            newlines.append(newline)
        file.seek(0)
        file.truncate()
        file.writelines(newlines)
        file.flush()
        call (['sudo', 'ipsec', 'auto', '--rereadsecrets'])
    finally:
        file.close()
    return psk
Example #47
0
    def __init__(self, request):
        self.request = request

        # Lets test if the cache folder exists, if not
        # we are going to create it
        folder = os.path.join(request.folder, 'cache')

        if not os.path.exists(folder):
            os.mkdir(folder)

        ### we need this because of a possible bug in shelve that may
        ### or may not lock
        self.locker_name = os.path.join(request.folder,
                                        'cache/cache.lock')
        self.shelve_name = os.path.join(request.folder,
                'cache/cache.shelve')

        try:
            locker = open(self.locker_name, 'a')
            portalocker.lock(locker, portalocker.LOCK_EX)
            
            storage = shelve.open(self.shelve_name)
        
            if not storage.has_key(CacheAbstract.cache_stats_name):
                storage[CacheAbstract.cache_stats_name] = {
                    'hit_total': 0,
                    'misses': 0,
                    }            
                storage.sync()
        except ImportError, e:
            pass # no module _bsddb, ignoring exception now so it makes a ticket only if used
Example #48
0
 def get_logs(self, path, last_clock):
     import portalocker
     import tailer
     import time
     pn = self.get_path(path)
     with open(pn, "r+") as log:
         while True:
             try:
                 portalocker.lock(log, portalocker.LOCK_EX)
                 break
             except:
                 dbg.dbg("lock failed")
                 time.sleep(0.1)
         curtime = int(util.current_sec())
         lines = tailer.tail(log, 20)
     ret = []
     if last_clock is None: last_clock = 0
     for line in lines:
         sp = line.strip().split("\t")
         if(int(sp[0]) < last_clock): continue
         #log = {
         #    'time': eval(sp[0]),
         #    'message': sp[1]
         #}
         #ret.append(log)
         ret.append(sp[1])
     return ret, curtime
Example #49
0
    def _try_store_on_disk(self, request, response):

        # don't save if sessions not not file-based
        if response.session_db:
            return

        # don't save if no change to session
        __hash = self.__hash
        if __hash is not None:
            del self.__hash
            if __hash == hashlib.md5(str(self)).digest():
                self._close(response)
                return

        if not response.session_id or self._forget:
            self._close(response)
            return

        if response.session_new:
            # Tests if the session sub-folder exists, if not, create it
            session_folder = os.path.dirname(response.session_filename)
            if not os.path.exists(session_folder):
                os.mkdir(session_folder)
            response.session_file = open(response.session_filename, 'wb')
            portalocker.lock(response.session_file, portalocker.LOCK_EX)
            response.session_locked = True

        if response.session_file:
            cPickle.dump(dict(self), response.session_file)
            response.session_file.truncate()
            self._close(response)
Example #50
0
def planet():
    #return ""
    import gluon.contrib.rss2 as rss2

    # store planet rss entries in disk (forever...)
    import portalocker
    import os, cPickle as pickle
    path = os.path.join(request.folder,'cache', "planet.rss")
    if not os.path.exists(path):
        f = open(path, "w+")
        rss = get_planet_rss(None)
        rss = [{'title': item.title, 'author': item.author, 'pubDate': item.pubDate, 'link': item.link, 'description': item.description} for item in rss.items]
    else:
        f = open(path, "r+")
        rss = None
    portalocker.lock(f, portalocker.LOCK_EX)
    if not rss:
        rss = pickle.load(f)
    else:
        f.seek(0)
        pickle.dump(rss, f)
    portalocker.unlock(f)
    f.close()

    # .rss requests
    if request.extension == "rss":
        # return new rss feed xml
        response.headers['Content-Type']='application/rss+xml'
        return rss2.dumps(rss)

    # else send the rss object to be processed by
    # the view
    
    return response.render(dict(rss = rss, rss2 = rss2))
 def get(self, count: int=1, readOnly: bool=False, recurlvl=0):
     with open(self.FILE, "r+") as file:
         portalocker.lock(file, portalocker.LOCK_EX)
         ports = []
         while len(ports) < count:
             file.seek(0)
             port = int(file.readline())
             if readOnly:
                 return port
             port += 1
             if port > self.maxPort:
                 port = self.minPort
             file.seek(0)
             file.write(str(port))
             try:
                 checkPortAvailable(("", port))
                 ports.append(port)
                 self.logger.debug("new port dispensed: {}".format(port))
             except Exception:
                 if recurlvl < self.maxportretries:
                     self.logger.debug("port {} unavailable, trying again...".
                                       format(port))
                     recurlvl += 1
                 else:
                     self.logger.debug("port {} unavailable, max retries {} "
                                       "reached".
                                       format(port, self.maxportretries))
                     raise
         return ports
Example #52
0
def load_storage(filename):
    fp = open(filename, 'rb')
    portalocker.lock(fp, portalocker.LOCK_EX)
    storage = cPickle.load(fp)
    portalocker.unlock(fp)
    fp.close()
    return Storage(storage)
Example #53
0
def save_storage(storage, filename):
    fp = open(filename, 'wb')
    try:
        portalocker.lock(fp, portalocker.LOCK_EX)
        cPickle.dump(dict(storage), fp)
        portalocker.unlock(fp)
    finally:
        fp.close()
 def acquire(self):
     """ Acquire thread and file locks. Also re-opening log file when running
     in 'degraded' mode. """
     # handle thread lock
     Handler.acquire(self)
     lock(self.stream_lock, LOCK_EX)
     if self.stream.closed:
         self._openFile(self.mode)
Example #55
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if self.gpu_id is not None:
         logger.info("Releasing GPU {}.".format(self.gpu_id))
     if self.lock_file is not None:
         if self._acquired_lock:
             portalocker.lock(self.lock_file, portalocker.LOCK_UN)
         self.lock_file.close()
         os.remove(self.lockfile_path)
Example #56
0
File: fs.py Project: pyblub/pyload
def lopen(*args, **kwargs):
    if kwargs.get('blocking', True):
        flags = portalocker.LOCK_EX
    else:
        flags = portalocker.LOCK_EX | portalocker.LOCK_NB
    fp = io.open(*args, **kwargs)
    portalocker.lock(fp, flags)
    return fp
Example #57
0
 def lock(self):
     "if not locked, lock the file for writing"
     if not self.locked:
         portalocker.lock(self._f, portalocker.LOCK_EX)
         self.locked = True
         return True
     else:
         return False
Example #58
0
def lopen(*args, **kwargs):
    if kwargs.get('blocking', True):
        flags = portalocker.LOCK_EX
    else:
        flags = portalocker.LOCK_EX | portalocker.LOCK_NB
    fp = io.open(*args, **kwargs)
    portalocker.lock(fp, flags)
    return fp
Example #59
0
def save_storage(storage, filename):
    fp = open(filename, 'wb')
    try:
        portalocker.lock(fp, portalocker.LOCK_EX)
        cPickle.dump(dict(storage), fp)
        portalocker.unlock(fp)
    finally:
        fp.close()
Example #60
0
 def acquire(self):
     """ Acquire thread and file locks. Also re-opening log file when running
     in 'degraded' mode. """
     # handle thread lock
     Handler.acquire(self)
     lock(self.stream_lock, LOCK_EX)
     if self.stream.closed:
         self._openFile(self.mode)