예제 #1
0
def _AttemptPseudoLockRelease(pseudo_lock_fd):
    """Try to release the pseudo lock and return a boolean indicating whether
  the release was succesful.

  This whole operation is guarded with the global cloud storage lock, which
  prevents race conditions that might otherwise cause multiple processes to
  believe they hold the same pseudo lock (see _FileLock for more details).
  """
    pseudo_lock_path = pseudo_lock_fd.name
    try:
        with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file:
            with lock.FileLock(global_file, lock.LOCK_EX | lock.LOCK_NB):
                lock.ReleaseFileLock(pseudo_lock_fd)
                pseudo_lock_fd.close()
                try:
                    os.remove(pseudo_lock_path)
                except OSError:
                    # We don't care if the pseudo lock gets removed elsewhere before
                    # we have a chance to do so.
                    pass
                return True
    except (lock.LockException, IOError):
        # We failed to acquire the global cloud storage lock and are thus unable to
        # release the pseudo lock.
        return False
예제 #2
0
    def testContextualLock(self):
        tf = tempfile.NamedTemporaryFile(delete=False)
        tf.close()
        temp_status_file = tf.name
        try:
            with open(self.temp_file_path, 'r') as f:
                with lock.FileLock(f, lock.LOCK_EX):
                    # Within this block, accessing self.temp_file_path from another
                    # process should raise exception.
                    p = multiprocessing.Process(
                        target=_ReadFileWithExclusiveLockNonBlocking,
                        args=(self.temp_file_path, temp_status_file))
                    p.start()
                    p.join()
                    with open(temp_status_file, 'r') as f:
                        self.assertEquals('LockException raised', f.read())

                # Accessing self.temp_file_path here should not raise exception.
                p = multiprocessing.Process(
                    target=_ReadFileWithExclusiveLockNonBlocking,
                    args=(self.temp_file_path, temp_status_file))
                p.start()
                p.join()
            with open(temp_status_file, 'r') as f:
                self.assertEquals('LockException was not raised', f.read())
        finally:
            os.remove(temp_status_file)
예제 #3
0
def _AttemptPseudoLockAcquisition(pseudo_lock_path, pseudo_lock_fd_return):
    """Try to acquire the lock and return a boolean indicating whether the attempt
  was successful. If the attempt was successful, pseudo_lock_fd_return, which
  should be an empty array, will be modified to contain a single entry: the file
  descriptor of the (now acquired) lock file.

  This whole operation is guarded with the global cloud storage lock, which
  prevents race conditions that might otherwise cause multiple processes to
  believe they hold the same pseudo lock (see _FileLock for more details).
  """
    pseudo_lock_fd = None
    try:
        with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file:
            with lock.FileLock(global_file, lock.LOCK_EX | lock.LOCK_NB):
                # Attempt to acquire the lock in a non-blocking manner. If we block,
                # then we'll cause deadlock because another process will be unable to
                # acquire the cloud storage global lock in order to release the pseudo
                # lock.
                pseudo_lock_fd = open(pseudo_lock_path, 'w')
                lock.AcquireFileLock(pseudo_lock_fd,
                                     lock.LOCK_EX | lock.LOCK_NB)
                pseudo_lock_fd_return.append(pseudo_lock_fd)
                return True
    except (lock.LockException, IOError):
        # We failed to acquire either the global cloud storage lock or the pseudo
        # lock.
        if pseudo_lock_fd:
            pseudo_lock_fd.close()
        return False
def _FileLock(base_path):
    pseudo_lock_path = '%s.pseudo_lock' % base_path
    _CreateDirectoryIfNecessary(os.path.dirname(pseudo_lock_path))

    # We need to make sure that there is no other process which is acquiring the
    # lock on |base_path| and has not finished before proceeding further to create
    # the |pseudo_lock_path|. Otherwise, |pseudo_lock_path| may be deleted by
    # that other process after we create it in this process.
    while os.path.exists(pseudo_lock_path):
        time.sleep(0.1)

    # Guard the creation & acquiring lock of |pseudo_lock_path| by the global lock
    # to make sure that there is no race condition on creating the file.
    with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file:
        with lock.FileLock(global_file, lock.LOCK_EX):
            fd = open(pseudo_lock_path, 'w')
            lock.AcquireFileLock(fd, lock.LOCK_EX)
    try:
        yield
    finally:
        lock.ReleaseFileLock(fd)
        try:
            fd.close()
            os.remove(pseudo_lock_path)
        except OSError:
            # We don't care if the pseudo-lock gets removed elsewhere before we have
            # a chance to do so.
            pass
예제 #5
0
 def testGetPseudoLockUnavailableCausesTimeout(self):
     with tempfile.NamedTemporaryFile(
             suffix='.pseudo_lock') as pseudo_lock_fd:
         with lock.FileLock(pseudo_lock_fd, lock.LOCK_EX | lock.LOCK_NB):
             with self.assertRaises(py_utils.TimeoutException):
                 file_path = pseudo_lock_fd.name.replace('.pseudo_lock', '')
                 cloud_storage.GetIfChanged(file_path,
                                            cloud_storage.PUBLIC_BUCKET)
예제 #6
0
 def testGetGlobalLockUnavailableCausesTimeout(self):
     with open(_CLOUD_STORAGE_GLOBAL_LOCK_PATH) as global_lock_fd:
         with lock.FileLock(global_lock_fd, lock.LOCK_EX | lock.LOCK_NB):
             tmp_dir = tempfile.mkdtemp()
             try:
                 file_path = os.path.join(tmp_dir, 'foo')
                 with self.assertRaises(py_utils.TimeoutException):
                     cloud_storage.GetIfChanged(file_path,
                                                cloud_storage.PUBLIC_BUCKET)
             finally:
                 shutil.rmtree(tmp_dir)
def _trace_enable(log_file=None):
    global _enabled
    if _enabled:
        raise TraceException("Already enabled")
    if not _control_allowed:
        raise TraceException("Tracing control not allowed in child processes.")
    _enabled = True
    global _log_file
    if log_file == None:
        if sys.argv[0] == '':
            n = 'trace_event'
        else:
            n = sys.argv[0]
        log_file = open("%s.json" % n, "ab", False)
        _note("trace_event: tracelog name is %s.json" % n)
    elif isinstance(log_file, basestring):
        _note("trace_event: tracelog name is %s" % log_file)
        log_file = open("%s" % log_file, "ab", False)
    elif not hasattr(log_file, 'fileno'):
        raise TraceException(
            "Log file must be None, a string, or file-like object with a fileno()"
        )

    _log_file = log_file
    with lock.FileLock(_log_file, lock.LOCK_EX):
        _log_file.seek(0, os.SEEK_END)

        lastpos = _log_file.tell()
        creator = lastpos == 0
        if creator:
            _note("trace_event: Opened new tracelog, lastpos=%i", lastpos)
            _log_file.write('[')

            tid = threading.current_thread().ident
            if not tid:
                tid = os.getpid()
            x = {
                "ph": "M",
                "category": "process_argv",
                "pid": os.getpid(),
                "tid": threading.current_thread().ident,
                "ts": trace_time.Now(),
                "name": "process_argv",
                "args": {
                    "argv": sys.argv
                }
            }
            _log_file.write("%s\n" % json.dumps(x))
        else:
            _note("trace_event: Opened existing tracelog")
        _log_file.flush()
예제 #8
0
파일: log.py 프로젝트: nick331999/catapult
def _flush(close=False):
  global _log_file
  with lock.FileLock(_log_file, lock.LOCK_EX):
    _log_file.seek(0, os.SEEK_END)
    if len(_cur_events):
      _write_cur_events()
    if close:
      _write_footer()
    _log_file.flush()

  if close:
    _note("trace_event: Closed")
    _log_file.close()
    _log_file = None
  else:
    _note("trace_event: Flushed")
예제 #9
0
파일: log.py 프로젝트: sretineni/catapult
def _trace_enable(log_file=None, format=None):
    global _format
    _format = format
    global _enabled
    if _enabled:
        raise TraceException("Already enabled")
    if not _control_allowed:
        raise TraceException("Tracing control not allowed in child processes.")
    _enabled = True
    global _log_file
    if log_file == None:
        if sys.argv[0] == '':
            n = 'trace_event'
        else:
            n = sys.argv[0]
        if _format == PROTOBUF:
            log_file = open("%s.pb" % n, "ab", False)
        else:
            log_file = open("%s.json" % n, "ab", False)
    elif isinstance(log_file, basestring):
        log_file = open("%s" % log_file, "ab", False)
    elif not hasattr(log_file, 'fileno'):
        raise TraceException(
            "Log file must be None, a string, or file-like object with a fileno()"
        )

    _note("trace_event: tracelog name is %s" % log_file)

    _log_file = log_file
    with lock.FileLock(_log_file, lock.LOCK_EX):
        _log_file.seek(0, os.SEEK_END)

        lastpos = _log_file.tell()
        creator = lastpos == 0
        if creator:
            _note("trace_event: Opened new tracelog, lastpos=%i", lastpos)
            _write_header()
        else:
            _note("trace_event: Opened existing tracelog")
        _log_file.flush()
    # Monkeypatch in our process replacement for the multiprocessing.Process class
    if multiprocessing.Process != multiprocessing_shim.ProcessShim:
        multiprocessing.Process = multiprocessing_shim.ProcessShim
def _flush(close=False):
    global _log_file
    with lock.FileLock(_log_file, lock.LOCK_EX):
        _log_file.seek(0, os.SEEK_END)
        if len(_cur_events):
            _log_file.write(",\n")
            _log_file.write(",\n".join([json.dumps(e) for e in _cur_events]))
            del _cur_events[:]

        if close:
            # We might not be the only process writing to this logfile. So,
            # we will simply close the file rather than writign the trailing ] that
            # it technically requires. The trace viewer understands that this may
            # happen and will insert a trailing ] during loading.
            pass
        _log_file.flush()

    if close:
        _note("trace_event: Closed")
        _log_file.close()
        _log_file = None
    else:
        _note("trace_event: Flushed")