예제 #1
0
def next_sequence_index(shot_basedir, dt, increment=True):
    """Return the next sequence index for sequences in the given base directory (i.e.
    <experiment_shot_storage>/<script_basename>) and the date of the given datetime
    object, and increment the sequence index atomically on disk if increment=True. If
    not setting increment=True, then the result is indicative only and may be used by
    other code at any time. One must increment the sequence index prior to use."""
    from labscript_utils.ls_zprocess import Lock
    from labscript_utils.shared_drive import path_to_agnostic

    DATE_FORMAT = '%Y-%m-%d'
    # The file where we store the next sequence index on disk:
    sequence_index_file = os.path.join(shot_basedir, '.next_sequence_index')
    # Open with zlock to prevent race conditions with other code:
    with Lock(path_to_agnostic(sequence_index_file), read_only=not increment):
        try:
            with open(sequence_index_file) as f:
                datestr, sequence_index = json.load(f)
                if datestr != dt.strftime(DATE_FORMAT):
                    # New day, start from zero again:
                    sequence_index = 0
        except (OSError, IOError) as exc:
            if exc.errno != errno.ENOENT:
                raise
            # File doesn't exist yet, start from zero
            sequence_index = 0
        if increment:
            # Write the new file with the incremented sequence index
            mkdir_p(os.path.dirname(sequence_index_file))
            with open(sequence_index_file, 'w') as f:
                json.dump([dt.strftime(DATE_FORMAT), sequence_index + 1], f)
        return sequence_index
예제 #2
0
파일: __init__.py 프로젝트: synqs/blacs
 def mainloop(self):
     # We delete shots in a separate thread so that we don't slow down the queue waiting on
     # network communication to acquire the lock,
     while True:
         try:
             event = self.event_queue.get()
             if event == 'close':
                 break
             elif event == 'shot complete':
                 while len(self.delete_queue) > self.n_shots_to_keep:
                     with self.delete_queue_lock:
                         h5_filepath = self.delete_queue.pop(0)
                     # Acquire a lock on the file so that we don't
                     # delete it whilst someone else has it open:
                     with Lock(path_to_agnostic(h5_filepath)):
                         try:
                             os.unlink(h5_filepath)
                             logger.info("Deleted repeated shot file %s" %
                                         h5_filepath)
                         except OSError:
                             logger.exception(
                                 "Couldn't delete shot file %s" %
                                 h5_filepath)
             else:
                 raise ValueError(event)
         except Exception:
             logger.exception(
                 "Exception in repeated shot deletion loop, ignoring.")
 def __init__(self, name, mode=None, driver=None, libver=None, **kwds):
     if not isinstance(name, h5py._objects.ObjectID):
         kwargs = {}
         if mode == 'r':
             kwargs['read_only'] = True
         # Do not terminate upon SIGTERM while the file is open:
         self.kill_lock = kill_lock
         self.kill_lock.acquire()
         # Ask other zlock users not to open the file while we have it open:
         self.zlock = Lock(path_to_agnostic(name), **kwargs)
         self.zlock.acquire()
     try:
         _File.__init__(self, name, mode, driver, libver, **kwds)
     except:
         if hasattr(self, 'zlock'):
             self.zlock.release()
         if hasattr(self, 'kill_lock'):
             self.kill_lock.release()
         raise
class File(_File):
    def __init__(self, name, mode=None, driver=None, libver=None, **kwds):
        if not isinstance(name, h5py._objects.ObjectID):
            kwargs = {}
            if mode == 'r':
                kwargs['read_only'] = True
            # Do not terminate upon SIGTERM while the file is open:
            self.kill_lock = kill_lock
            self.kill_lock.acquire()
            # Ask other zlock users not to open the file while we have it open:
            self.zlock = Lock(path_to_agnostic(name), **kwargs)
            self.zlock.acquire()
        try:
            _File.__init__(self, name, mode, driver, libver, **kwds)
        except:
            if hasattr(self, 'zlock'):
                self.zlock.release()
            if hasattr(self, 'kill_lock'):
                self.kill_lock.release()
            raise

    def close(self):
        _File.close(self)
        if hasattr(self, 'zlock'):
            self.zlock.release()
        if hasattr(self, 'kill_lock'):
            self.kill_lock.release()

    # Overriding __exit__ is crucial. Since h5py.File.__exit__() holds h5py's
    # library-wide lock "phil", it calls close() whilst holding that lock. Our close()
    # method does not need the lock (h5py.File.close() does, but it acquires it itself
    # as needed), but this means they we're holding phil when we call
    # kill_lock.release(), which, in order to be thread-safe, attempts to aquire
    # kill_lock._lock, briefly. If another thread holds kill_lock._lock at the time, and
    # whilst holding it, Python garbage collection runs in that thread, and there are
    # HDF5 objects waiting to be deallocated, then Python deadlocks. This is beause the
    # deallocation in h5py is done while holding phil. But phil is held by our close()
    # method, so deallocation must wait for our close method to return. However our
    # close method is waiting to acquire kill_lock._lock, which will not be released by
    # the other thread until garbage collection is complete. Python hangs.
    #
    # The solution is just not not hold phil in __exit__. It does not appear to be
    # necessary. I will report this as an issue in h5py, and will remove this workaround
    # if it is fixed in a future version.
    def __exit__(self, *args):
        self.close()