def get_block_locations(self, path, start=0, length=0): """ Fetch physical locations of blocks """ if not self._handle: raise IOError("Filesystem not connected") start = int(start) or 0 length = int(length) or self.info(path)['size'] nblocks = ctypes.c_int(0) with lock_file(lock_name): out = _lib.hdfsGetFileBlockLocations(self._handle, ensure_bytes(path), ctypes.c_int64(start), ctypes.c_int64(length), ctypes.byref(nblocks)) locs = [] for i in range(nblocks.value): block = out[i] hosts = [block.hosts[i] for i in range(block.numOfNodes)] locs.append({ 'hosts': hosts, 'length': block.length, 'offset': block.offset }) with lock_file(lock_name): _lib.hdfsFreeFileBlockLocations(out, nblocks) return locs
def the_same_lock_file_object_is_used_for_the_same_path_with_different_arguments( lock_path): # We explicitly check the same lock is used to ensure that the lock isn't # re-entrant, even if the underlying platform lock is re-entrant. first_lock = locket.lock_file(lock_path, timeout=None) second_lock = locket.lock_file(lock_path, timeout=0) assert first_lock._lock is second_lock._lock
def read(self, length=None): """ Read bytes from open file """ with lock_file(lock_name): if not _lib.hdfsFileIsOpenForRead(self._handle): raise IOError('File not read mode') buffers = [] if length is None: out = 1 while out: out = self.read(2**16) buffers.append(out) else: with lock_file(lock_name): while length: bufsize = min(2**16, length) p = ctypes.create_string_buffer(bufsize) ret = _lib.hdfsRead(self._fs, self._handle, p, ctypes.c_int32(bufsize)) if ret == 0: break if ret > 0: if ret < bufsize: buffers.append(p.raw[:ret]) elif ret == bufsize: buffers.append(p.raw) length -= ret else: raise IOError('Read file %s Failed:' % self.path, -ret) return b''.join(buffers)
def thread_cannot_obtain_lock_using_same_path_twice_without_release(lock_path): with locket.lock_file(lock_path, timeout=0): lock = locket.lock_file(lock_path, timeout=0) try: lock.acquire() assert False, "Expected LockError" except locket.LockError: pass
def thread_cannot_obtain_lock_using_same_path_with_different_arguments_without_release(lock_path): lock1 = locket.lock_file(lock_path, timeout=None) lock2 = locket.lock_file(lock_path, timeout=0) lock1.acquire() try: lock2.acquire() assert False, "Expected LockError" except locket.LockError: pass
def thread_cannot_obtain_lock_using_same_path_with_different_arguments_without_release( lock_path): lock1 = locket.lock_file(lock_path, timeout=None) lock2 = locket.lock_file(lock_path, timeout=0) lock1.acquire() try: lock2.acquire() assert False, "Expected LockError" except locket.LockError: pass
def on_disk( cls, path, capacity=None, *, scorer=None, ): """ An on-disk cache of data from the server This is useful to ensure that data is not downloaded repeatedly unless it has been updated since the last download. This uses file-based locking to ensure consistency when the cache is shared by multiple processes. Parameters ---------- path : Path or str A directory will be created at this path if it does not yet exist. It is safe to reuse an existing cache directory and to share a cache directory between multiple processes. capacity : integer, optional e.g. 2e9 to use up to 2 GB of disk space. If None, this will consume up to (X - 1 GB) where X is the free space remaining on the volume containing `path`. scorer : Scorer Determines which items to evict from the cache when it grows full. See tiled.client.cache.Scorer for example. """ import locket path = Path(path) path.mkdir(parents=True, exist_ok=True) if capacity is None: # By default, use (X - 1 GB) where X is the current free space # on the volume containing `path`. import shutil capacity = shutil.disk_usage(path).free - 1e9 etag_to_content_cache = FileBasedCache(path / "etag_to_content_cache") instance = cls( capacity, url_to_headers_cache=FileBasedUrlCache(path / "url_to_headers_cache"), etag_to_content_cache=etag_to_content_cache, global_lock=locket.lock_file(path / "global.lock"), lock_factory=lambda etag: locket.lock_file( path / "etag_to_content_cache" / f"{etag}.lock"), state=OnDiskState(path), scorer=scorer, ) return instance
def lock_is_released_by_context_manager_exit(lock_path): has_run = False # Keep a reference to first_lock so it holds onto the lock first_lock = locket.lock_file(lock_path, timeout=0) with first_lock: pass with locket.lock_file(lock_path, timeout=0): has_run = True assert has_run
def __init__(self, repo, remote=None, git_ssh=None, pkey=None, cache=None, #pylint: disable=W0613 path_for_study_fn=None): """Create a GitAction object to interact with a Git repository Example: gd = GitAction(repo="/home/user/git/foo") Note that this requires write access to the git repository directory, so it can create a lockfile in the .git directory. """ self.repo = repo self.git_dir = os.path.join(repo, '.git') self._lock_file = os.path.join(self.git_dir, "API_WRITE_LOCK") self._lock_timeout = 30 self._lock = locket.lock_file(self._lock_file, timeout=self._lock_timeout) self.repo_remote = remote self.git_ssh = git_ssh self.pkey = pkey if os.path.isdir("{}/.git".format(self.repo)): self.gitdir = "--git-dir={}/.git".format(self.repo) self.gitwd = "--work-tree={}".format(self.repo) else: #EJM needs a test? raise ValueError('Repo "{repo}" is not a git repo'.format(repo=self.repo)) if path_for_study_fn is None: self.path_for_study_fn = get_filepath_for_simple_id else: self.path_for_study_fn = path_for_study_fn
def get_measurement(self): """ Gets the K30's CO2 concentration in ppmv via UART""" if not self.serial_device: # Don't measure if device isn't validated return None self._co2 = None co2= None lock_acquired = False # Set up lock lock = locket.lock_file(self.k30_lock_file, timeout=120) try: lock.acquire() lock_acquired = True except: self.logger.error("Could not acquire lock. Breaking for future locking.") os.remove(self.k30_lock_file) if lock_acquired: self.ser.flushInput() time.sleep(1) self.ser.write(bytearray([0xfe, 0x44, 0x00, 0x08, 0x02, 0x9f, 0x25])) time.sleep(.01) resp = self.ser.read(7) if len(resp) != 0: high = resp[3] low = resp[4] co2 = (high * 256) + low lock.release() return co2
def __init__(self, doc_type, repo, remote=None, git_ssh=None, pkey=None, cache=None, # pylint: disable=W0613 path_for_doc_fn=None, max_file_size=None, path_for_doc_id_fn=None): self.repo = repo self.doc_type = doc_type self.git_dir = os.path.join(repo, '.git') self._lock_file = os.path.join(self.git_dir, "API_WRITE_LOCK") self._lock_timeout = 30 # in seconds self._lock = locket.lock_file(self._lock_file, timeout=self._lock_timeout) self.repo_remote = remote self.git_ssh = git_ssh self.pkey = pkey self.max_file_size = max_file_size self.path_for_doc_fn = path_for_doc_fn self.path_for_doc_id_fn = path_for_doc_id_fn if os.path.isdir("{}/.git".format(self.repo)): self.gitdir = "--git-dir={}/.git".format(self.repo) self.gitwd = "--work-tree={}".format(self.repo) else: # EJM needs a test? raise ValueError('Repo "{repo}" is not a git repo'.format(repo=self.repo))
def chmod(self, path, mode): """Change access control of given path Exactly what permissions the file will get depends on HDFS configurations. Parameters ---------- path : string file/directory to change mode : integer As with the POSIX standard, each octal digit refers to user-group-all, in that order, with read-write-execute as the bits of each group. Examples -------- >>> hdfs.chmod('/path/to/file', 0o777) # make read/writeable to all # doctest: +SKIP >>> hdfs.chmod('/path/to/file', 0o700) # make read/writeable only to user # doctest: +SKIP >>> hdfs.chmod('/path/to/file', 0o100) # make read-only to user # doctest: +SKIP """ if not self.exists(path): raise FileNotFoundError(path) with lock_file(lock_name): out = _lib.hdfsChmod(self._handle, ensure_bytes(path), ctypes.c_short(mode)) if out != 0: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError("chmod failed on %s %s" % (path, msg))
def get_measurement(self): """ Gets the MH-Z19's CO2 concentration in ppmv via UART""" lock_acquired = False self._co2 = None co2 = None if not self.serial_device: # Don't measure if device isn't validated return None # Set up lock lock = locket.lock_file(self.mhz19_lock_file, timeout=120) try: lock.acquire() lock_acquired = True except: self.logger.error( "Could not acquire lock. Breaking for future locking.") os.remove(self.mhz19_lock_file) if lock_acquired: self.ser.flushInput() time.sleep(1) self.ser.write("\xff\x01\x86\x00\x00\x00\x00\x00\x79".encode()) time.sleep(.01) resp = self.ser.read(9) if len(resp) != 0: high = resp[2] low = resp[3] co2 = (high * 256) + low lock.release() else: self.logger.error("Could not acquire MHZ19 lock") return co2
def read(self): """ Take a measurement """ self._voltage = None lock_acquired = False try: # Set up lock lock = locket.lock_file(self.lock_file, timeout=120) try: lock.acquire() lock_acquired = True except: self.logger.error( "Could not acquire lock. Breaking for future locking.") os.remove(self.lock_file) if lock_acquired: sleep(0.1) self._voltage = (self.adc.read_adc(self.adc_channel) / 1023.0) * self.adc_volts_max lock.release() except Exception as e: self.logger.exception("{cls} raised exception during read(): " "{err}".format(cls=type(self).__name__, err=e)) return 1
def seek(self, offset, from_what=0): """ Set file read position. Read mode only. Attempt to move out of file bounds raises an exception. Note that, by the convention in python file seek, offset should be <=0 if from_what is 2. Parameters ---------- offset : int byte location in the file. from_what : int 0, 1, 2 if 0 (befault), relative to file start; if 1, relative to current location; if 2, relative to file end. Returns ------- new position """ if from_what not in {0, 1, 2}: raise ValueError('seek mode must be 0, 1 or 2') info = self.info() if from_what == 1: offset = offset + self.tell() elif from_what == 2: offset = info['size'] + offset if offset < 0 or offset > info['size']: raise ValueError('Attempt to seek outside file') with lock_file(lock_name): out = _lib.hdfsSeek(self._fs, self._handle, ctypes.c_int64(offset)) if out == -1: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('Seek Failed on file %s' % (self.path, msg)) # pragma: no cover return self.tell()
def query(self, query_str): """ Send command and return reply """ lock_acquired = False lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.serial_device.replace( "/", "-")) try: # Set up lock lock = locket.lock_file(lock_file_amend, timeout=120) try: lock.acquire() lock_acquired = True except: self.logger.error( "Could not acquire lock. Breaking for future locking.") os.remove(lock_file_amend) if lock_acquired: self.send_cmd(query_str) time.sleep(1.3) response = self.read_lines() lock.release() return response os.remove(lock_file_amend) except Exception as err: self.logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) return None
def mkdir(self, path): """ Make directory at path """ with lock_file(lock_name): out = _lib.hdfsCreateDirectory(self._handle, ensure_bytes(path)) if out != 0: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('Create directory failed: {}'.format(msg))
def read_block_from_hdfs(filename, offset, length, host=None, port=None, delimiter=None): from locket import lock_file with lock_file('.lock'): hdfs = HDFileSystem(host=host, port=port) bytes = hdfs.read_block(filename, offset, length, delimiter) return bytes
def ls(self, path, detail=True): """ List files at path Parameters ---------- path : string/bytes location at which to list files detail : bool (=True) if True, each list item is a dict of file properties; otherwise, returns list of filenames """ if not self.exists(path): raise FileNotFoundError(path) num = ctypes.c_int(0) with lock_file(lock_name): fi = _lib.hdfsListDirectory(self._handle, ensure_bytes(path), ctypes.byref(num)) out = [ ensure_string(info_to_dict(fi[i])) for i in range(num.value) ] _lib.hdfsFreeFileInfo(fi, num.value) if detail: return out else: return [o['name'] for o in out]
def tell(self): """ Get current byte location in a file """ with lock_file(lock_name): out = _lib.hdfsTell(self._fs, self._handle) if out == -1: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('Tell Failed on file %s %s' % (self.path, msg)) return out
def mv(self, path1, path2): """ Move file at path1 to path2 """ if not self.exists(path1): raise FileNotFoundError(path1) with lock_file(lock_name): out = _lib.hdfsRename(self._handle, ensure_bytes(path1), ensure_bytes(path2)) return out == 0
def disconnect(self): """ Disconnect from name node """ if self._handle: logger.debug("Disconnect from handle %d", self._handle.contents.filesystem) with lock_file(lock_name): _lib.hdfsDisconnect(self._handle) self._handle = None
def can_use_acquire_and_release_to_control_lock(lock_path): has_run = False lock = locket.lock_file(lock_path) lock.acquire() try: has_run = True finally: lock.release() assert has_run
def rm(self, path, recursive=True): "Use recursive for `rm -r`, i.e., delete directory and contents" if not self.exists(path): raise FileNotFoundError(path) with lock_file(lock_name): out = _lib.hdfsDelete(self._handle, ensure_bytes(path), bool(recursive)) if out != 0: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('Remove failed on %s %s' % (path, msg))
def info(self, path): """ File information (as a dict) """ if not self.exists(path): raise FileNotFoundError(path) with lock_file(lock_name): fi = _lib.hdfsGetPathInfo(self._handle, ensure_bytes(path)).contents out = info_to_dict(fi) _lib.hdfsFreeFileInfo(ctypes.byref(fi), 1) return ensure_string(out)
def write(self, data): """ Write bytes to open file (which must be in w or a mode) """ data = ensure_bytes(data) if not data: return with lock_file(lock_name): if not _lib.hdfsFileIsOpenForWrite(self._handle): msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('File not write mode: {}'.format(msg)) write_block = 64 * 2**20 with lock_file(lock_name): for offset in range(0, len(data), write_block): d = ensure_bytes(data[offset:offset + write_block]) if not _lib.hdfsWrite(self._fs, self._handle, d, len(d)) == len(d): msg = ensure_string(_lib.hdfsGetLastError()) raise IOError('Write failed on file %s, %s' % (self.path, msg)) return len(data)
def chown(self, path, owner, group): """ Change owner/group """ if not self.exists(path): raise FileNotFoundError(path) with lock_file(lock_name): out = _lib.hdfsChown(self._handle, ensure_bytes(path), ensure_bytes(owner), ensure_bytes(group)) if out != 0: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError("chown failed on %s %s" % (path, msg))
def df(self): """ Used/free disc space on the HDFS system """ with lock_file(lock_name): cap = _lib.hdfsGetCapacity(self._handle) used = _lib.hdfsGetUsed(self._handle) return { 'capacity': cap, 'used': used, 'percent-free': 100 * (cap - used) / cap }
def read_block_from_hdfs(host, port, filename, offset, length, delimiter): from hdfs3 import HDFileSystem if sys.version_info[0] == 2: from locket import lock_file with lock_file('.lock'): hdfs = HDFileSystem(host=host, port=port) bytes = hdfs.read_block(filename, offset, length, delimiter) else: hdfs = HDFileSystem(host=host, port=port) bytes = hdfs.read_block(filename, offset, length, delimiter) return bytes
def _set_handle(self): with lock_file(lock_name): out = _lib.hdfsOpenFile(self._fs, ensure_bytes(self.path), mode_numbers[self.mode], self.buff, ctypes.c_short(self.replication), ctypes.c_int64(self.block_size)) if not out: msg = ensure_string(_lib.hdfsGetLastError()) raise IOError("Could not open file: %s, mode: %s %s" % (self.path, self.mode, msg)) self._handle = out
def lock_setup(self): # Set up lock self.input_lock = locket.lock_file(self.lock_file, timeout=30) try: self.input_lock.acquire() self.pre_output_locked = True except locket.LockError: self.logger.error("Could not acquire input lock. Breaking for future locking.") try: os.remove(self.lock_file) except OSError: self.logger.error("Can't delete lock file: Lock file doesn't exist.")
def __init__(self, path=None): if not path: path = tempfile.mkdtemp('.partd') self._explicitly_given_path = False else: self._explicitly_given_path = True self.path = path if not os.path.exists(path): with ignoring(OSError): os.makedirs(path) self.lock = locket.lock_file(self.filename('.lock')) Interface.__init__(self)
def __init__(self, repo): """Create a GitData object to interact with a Git repository Example: gd = GitData(repo="/home/user/git/foo") Note that this requires write access to the git repository directory, so it can create a lockfile in the .git directory. """ self.repo = repo self.lock_file = "%s/.git/API_WRITE_LOCK" % self.repo self.lock_timeout = 30 self.lock = locket.lock_file(self.lock_file, timeout=self.lock_timeout)
def run(self): try: self.running = True self.logger.info("Activated in {:.1f} ms".format( (timeit.default_timer() - self.thread_startup_timer) * 1000)) self.ready.set() # Set up edge detection if self.device == 'EDGE': GPIO.setmode(GPIO.BCM) GPIO.setup(int(self.gpio_location), GPIO.IN) GPIO.add_event_detect(int(self.gpio_location), self.switch_edge_gpio, callback=self.edge_detected, bouncetime=self.switch_bouncetime) while self.running: # Pause loop to modify conditional statements. # Prevents execution of conditional while variables are # being modified. if self.pause_loop: self.verify_pause_loop = True while self.pause_loop: time.sleep(0.1) if self.force_measurements_trigger: self.acquire_measurements_now() self.force_measurements_trigger = False if self.device not in ['EDGE']: now = time.time() # Signal that a measurement needs to be obtained if now > self.next_measurement and not self.get_new_measurement: self.get_new_measurement = True self.trigger_cond = True while self.next_measurement < now: self.next_measurement += self.period # if signaled and a pre output is set up correctly, turn the # output on or on for the set duration if (self.get_new_measurement and self.pre_output_setup and not self.pre_output_activated): # Set up lock self.input_lock = locket.lock_file(self.lock_file, timeout=30) try: self.input_lock.acquire() self.pre_output_locked = True except locket.LockError: self.logger.error("Could not acquire input lock. Breaking for future locking.") try: os.remove(self.lock_file) except OSError: self.logger.error("Can't delete lock file: Lock file doesn't exist.") self.pre_output_timer = time.time() + self.pre_output_duration self.pre_output_activated = True # Only run the pre-output before measurement # Turn on for a duration, measure after it turns off if not self.pre_output_during_measure: output_on = threading.Thread( target=self.control.output_on, args=(self.pre_output_id, self.pre_output_duration,)) output_on.start() # Run the pre-output during the measurement # Just turn on, then off after the measurement else: output_on = threading.Thread( target=self.control.output_on, args=(self.pre_output_id,)) output_on.start() # If using a pre output, wait for it to complete before # querying the input for a measurement if self.get_new_measurement: if (self.pre_output_setup and self.pre_output_activated and now > self.pre_output_timer): if self.pre_output_during_measure: # Measure then turn off pre-output self.update_measure() output_off = threading.Thread( target=self.control.output_off, args=(self.pre_output_id,)) output_off.start() else: # Pre-output has turned off, now measure self.update_measure() self.pre_output_activated = False self.get_new_measurement = False # release pre-output lock try: if self.pre_output_locked: self.input_lock.release() self.pre_output_locked = False except AttributeError: self.logger.error("Can't release lock: " "Lock file not present.") elif not self.pre_output_setup: # Pre-output not enabled, just measure self.update_measure() self.get_new_measurement = False # Add measurement(s) to influxdb if self.measurement_success: add_measurements_influxdb( self.unique_id, self.create_measurements_dict()) self.measurement_success = False self.trigger_cond = False time.sleep(self.sample_rate) self.running = False if self.device == 'EDGE': GPIO.setmode(GPIO.BCM) GPIO.cleanup(int(self.gpio_location)) self.logger.info("Deactivated in {:.1f} ms".format( (timeit.default_timer() - self.thread_shutdown_timer) * 1000)) except requests.ConnectionError: self.logger.error("Could not connect to influxdb. Check that it " "is running and accepting connections") except Exception as except_msg: self.logger.exception("Error: {err}".format( err=except_msg))
def the_same_lock_file_object_is_used_for_the_same_path_with_different_arguments(lock_path): # We explicitly check the same lock is used to ensure that the lock isn't # re-entrant, even if the underlying platform lock is re-entrant. first_lock = locket.lock_file(lock_path, timeout=None) second_lock = locket.lock_file(lock_path, timeout=0) assert first_lock._lock is second_lock._lock
def __init__(self, path, **kwargs): base = os.path.splitext(path)[0] self._path = ".".join((base, "lock")) self._lock = lock_file(self._path, **kwargs)
def average(): # look if there is data for this source in the cache cachedir = self.config['cachedir'] or gettempdir() cachefile = os.path.join(cachedir, 'avg{}.h5'.format(hashargs(s))) cachefile = os.path.abspath(cachefile) log.debug('cachefile %s', cachefile) def average_cached(): if not self.config['cachedir']: raise # always fail it cache is disabled with tables.openFile(cachefile) as cacheh5: cachetable = cacheh5.getNode('/data') progr_factor = 1.0 / cachetable.nrows / len(expr_data) log.info('reading averaged data from cache') for row in cachetable.iterrows(): self.progress = progr_prev + row.nrow * progr_factor yield row def average_computed(): try: log.debug('creating averaged data cachefile') cacheh5 = tables.openFile(cachefile, 'w') except: log.exception('failed opening %s', cachefile) raise RuntimeError('cache for {} in use or corrupt, try again in a few seconds'.format(s)) with cacheh5: # use tables col descriptor and append fields rate and count log.debug('caching averaged data') coldesc = OrderedDict() # keep the order for k in table.colnames: d = table.coldescrs[k] if isinstance(d, tables.BoolCol): # make bool to float for averaging coldesc[k] = tables.FloatCol(pos = len(coldesc)) else: coldesc[k] = d coldesc['count'] = tables.IntCol(pos = len(coldesc)) coldesc['weight'] = tables.FloatCol(pos = len(coldesc)) coldesc['rate'] = tables.FloatCol(pos = len(coldesc)) cachetable = cacheh5.createTable('/', 'data', coldesc, 'cached data') cachetable.attrs.source = s cacherow = cachetable.row assert 0 < shift <= 1 it = table.colnames.index('time') # index of time column ta = table[0][it] # window left edge tb = ta + window # window right edge wd = [] # window data cols = table.colnames wdlen = len(cols) + 1 fweight = compile_function(weight) def append(r): wd.append(np.fromiter(chain(r[:], [fweight(r)]), dtype = np.float, count = wdlen)) progr_factor = 1.0 / table.nrows / len(expr_data) for row in table.iterrows(): if row[it] < tb: # add row if in window append(row) else: # calculate av and shift window n = len(wd) if n > 0: wdsum = reduce(lambda a, b: a + b, wd) for i, c in enumerate(cols): cacherow[c] = wdsum[i] / n cacherow['time'] = (ta + tb) * 0.5 # overwrite with interval center cacherow['count'] = n cacherow['weight'] = wdsum[-1] / n cacherow['rate'] = n / window self.progress = progr_prev + row.nrow * progr_factor yield cacherow cacherow.append() ta += shift * window # shift window tb = ta + window if row[it] >= tb: ta = row[it] # shift window tb = ta + window if shift == 1: # windows must be empty wd = [] else: # remove data outside new window wd = filter(lambda x: ta <= x[it] < tb, wd) append(row) if not self.config['cachedir']: log.debug('removing averaged data cachefile') os.remove(cachefile) try: # try using data from cache for x in average_cached(): yield x except: # if cache fails with lock_file(cachefile + '.lock'): try: # try cache again (maybe it was populated while waiting for the lock) for x in average_cached(): yield x except: # if it fails again, compute the data for x in average_computed(): yield x
def the_same_lock_file_object_is_used_for_the_same_path(lock_path): first_lock = locket.lock_file(lock_path, timeout=0) second_lock = locket.lock_file(lock_path, timeout=0) assert first_lock is second_lock
def _lock(self): _mkdir_p(os.path.dirname(self._path)) lock_path = "{0}.lock".format(self._path) # raise immediately if the lock already exists return locket.lock_file(lock_path, timeout=0)
import os try: repo = sys.argv[1] git_dir = os.path.join(repo, '.git') assert os.path.isdir(git_dir) except: sys.exit('''Expecting a phylesystem shard (or one for collections, amendments, etc.) as the only argument. This script looks for the .git dir in the first argument, and locks that .git dir to prevent simultaneous operations by the phylesystem-api. An example usage would be: $ ssh api $ source venv/bin/activate $ cd repo/phylesystem-1_par/phylesystem-1 $ python ~/repo/peyotl/extras/lock-phylesystem.py . ... <Ctrl-D> to release the lock. ''') lf = os.path.join(git_dir, "API_WRITE_LOCK") with locket.lock_file(lf, timeout=10): print('Lock acquired. Use Control-D to release') try: x = sys.stdin.read() finally: print('Lock released') sys.exit(0) sys.exit('timeout waiting for lock\n')
def lock_can_be_acquired_with_timeout_of_zero(lock_path): has_run = False with locket.lock_file(lock_path, timeout=0): has_run = True assert has_run
def single_process_can_obtain_uncontested_lock(lock_path): has_run = False with locket.lock_file(lock_path): has_run = True assert has_run
def different_file_objects_are_used_for_different_paths(lock_path): first_lock = locket.lock_file(lock_path, timeout=0) second_lock = locket.lock_file(lock_path + "-2", timeout=0) assert first_lock is not second_lock
import locket def _print(output): print(output) sys.stdout.flush() if __name__ == "__main__": print(os.getpid()) lock_path = sys.argv[1] if sys.argv[2] == "None": timeout = None else: timeout = float(sys.argv[2]) lock = locket.lock_file(lock_path, timeout=timeout) _print("Send newline to stdin to acquire") sys.stdin.readline() try: lock.acquire() except locket.LockError: _print("LockError") exit(1) _print("Acquired") _print("Send newline to stdin to release") sys.stdin.readline() lock.release() _print("Released")
orig_args[h] = fa_dict[p] except: raise HTTP(400, T('firstAvailable***ID args must be non-negative integers')) INPUT_FILENAME = 'in.nex' PROV_FILENAME = 'provenance.json' RETURN_ATT_FILENAME = 'bundle_properties.json' NEXML_FILENAME = 'out.xml' ERR_FILENAME = 'err.txt' INPUT_FILEPATH = os.path.join(working_dir, INPUT_FILENAME) INP_LOCKFILEPATH = os.path.join(working_dir, INPUT_FILENAME + '.lock') RETURN_ATT_FILEPATH = os.path.join(working_dir, RETURN_ATT_FILENAME) inpfp = os.path.join(working_dir, INPUT_FILENAME) if is_upload: with locket.lock_file(INP_LOCKFILEPATH): if not os.path.exists(INPUT_FILEPATH): if request.vars.file is not None: upf = request.vars.file upload_stream = upf.file filename = upf.filename elif request.vars.content is not None: upload_stream = request.vars.content # stream is a bad name, but write_input_files does the write thing. file_extensions = {'nexson':'.json', 'nexus':'.nex', 'nexml':'.nexml', 'newick':'.tre', 'relaxedphyliptree':'.tre'} filename = 'PASTED%s' % (file_extensions.get(inp_format),) else: raise HTTP(400, 'Expecting a "file" argument with an input file or a "content" argument with the contents of in input file') write_input_files(request, working_dir, [(INPUT_FILENAME, upload_stream)]) prov_info = { 'filename' : filename, 'dateTranslated': datetime.datetime.utcnow().isoformat(),