def __init__(self, conf=None): super(MemoryBackend, self).__init__(conf) if self._path is None: self._path = os.sep self.memory = FakeFilesystem(deep_copy=self._conf.get('deep_copy', True)) self.lock = lock_utils.ReaderWriterLock()
def test_no_double_writers(self): lock = lock_utils.ReaderWriterLock() watch = timing.StopWatch(duration=5) watch.start() dups = collections.deque() active = collections.deque() def acquire_check(me): with lock.write_lock(): if len(active) >= 1: dups.append(me) dups.extend(active) active.append(me) try: time.sleep(random.random() / 100) finally: active.remove(me) def run(): me = threading.current_thread() while not watch.expired(): acquire_check(me) threads = [] for i in range(0, self.THREAD_COUNT): t = threading_utils.daemon_thread(run) threads.append(t) t.start() while threads: t = threads.pop() t.join() self.assertEqual([], list(dups)) self.assertEqual([], list(active))
def _spawn_variation(readers, writers, max_workers=None): start_stops = collections.deque() lock = lock_utils.ReaderWriterLock() def read_func(): with lock.read_lock(): start_stops.append(('r', time.time(), time.time())) def write_func(): with lock.write_lock(): start_stops.append(('w', time.time(), time.time())) if max_workers is None: max_workers = max(0, readers) + max(0, writers) if max_workers > 0: with futures.ThreadPoolExecutor(max_workers=max_workers) as e: for i in range(0, readers): e.submit(read_func) for i in range(0, writers): e.submit(write_func) writer_times = [] reader_times = [] for (t, start, stop) in list(start_stops): if t == 'w': writer_times.append((start, stop)) else: reader_times.append((start, stop)) return (writer_times, reader_times)
def test_writer_chaotic(self): lock = lock_utils.ReaderWriterLock() activated = collections.deque() def chaotic_writer(blow_up): with lock.write_lock(): if blow_up: raise RuntimeError("Broken") else: activated.append(lock.owner) def happy_reader(): with lock.read_lock(): activated.append(lock.owner) with futures.ThreadPoolExecutor(max_workers=20) as e: for i in range(0, 20): if i % 2 == 0: e.submit(chaotic_writer, blow_up=bool(i % 4 == 0)) else: e.submit(happy_reader) writers = [a for a in activated if a == 'w'] readers = [a for a in activated if a == 'r'] self.assertEqual(5, len(writers)) self.assertEqual(10, len(readers))
def test_double_reader_writer(self): lock = lock_utils.ReaderWriterLock() activated = collections.deque() active = threading.Event() def double_reader(): with lock.read_lock(): active.set() while not lock.has_pending_writers: time.sleep(0.001) with lock.read_lock(): activated.append(lock.owner) def happy_writer(): with lock.write_lock(): activated.append(lock.owner) reader = threading.Thread(target=double_reader) reader.start() active.wait() writer = threading.Thread(target=happy_writer) writer.start() reader.join() writer.join() self.assertEqual(2, len(activated)) self.assertEqual(['r', 'w'], list(activated))
def __init__(self, conf): super(DirBackend, self).__init__(conf) self.file_cache = {} self.encoding = self._conf.get('encoding', 'utf-8') if not self._path: raise ValueError("Empty path is disallowed") self._path = os.path.abspath(self._path) self.lock = lock_utils.ReaderWriterLock()
def test_reader_abort(self): lock = lock_utils.ReaderWriterLock() self.assertFalse(lock.owner) def blow_up(): with lock.read_lock(): self.assertEqual(lock.READER, lock.owner) raise RuntimeError("Broken") self.assertRaises(RuntimeError, blow_up) self.assertFalse(lock.owner)
def test_double_reader(self): lock = lock_utils.ReaderWriterLock() with lock.read_lock(): self.assertTrue(lock.is_reader()) self.assertFalse(lock.is_writer()) with lock.read_lock(): self.assertTrue(lock.is_reader()) self.assertTrue(lock.is_reader()) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_writer())
def test_writer_to_reader(self): lock = lock_utils.ReaderWriterLock() def reader_func(): with lock.read_lock(): pass with lock.write_lock(): self.assertRaises(RuntimeError, reader_func) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_writer())
def test_single_reader_writer(self): results = [] lock = lock_utils.ReaderWriterLock() with lock.read_lock(): self.assertTrue(lock.is_reader()) self.assertEqual(0, len(results)) with lock.write_lock(): results.append(1) self.assertTrue(lock.is_writer()) with lock.read_lock(): self.assertTrue(lock.is_reader()) self.assertEqual(1, len(results)) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_writer())
def test_writer_to_reader(self): lock = lock_utils.ReaderWriterLock() def reader_func(): with lock.read_lock(): self.assertTrue(lock.is_writer()) self.assertTrue(lock.is_reader()) with lock.write_lock(): self.assertIsNone(reader_func()) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_reader()) self.assertFalse(lock.is_writer())
def _spawn_variation(readers, writers, max_workers=None): start_stops = collections.deque() lock = lock_utils.ReaderWriterLock() def read_func(ident): with lock.read_lock(): # TODO(harlowja): sometime in the future use a monotonic clock here # to avoid problems that can be caused by ntpd resyncing the clock # while we are actively running. enter_time = now() time.sleep(WORK_TIMES[ident % len(WORK_TIMES)]) exit_time = now() start_stops.append((lock.READER, enter_time, exit_time)) time.sleep(NAPPY_TIME) def write_func(ident): with lock.write_lock(): enter_time = now() time.sleep(WORK_TIMES[ident % len(WORK_TIMES)]) exit_time = now() start_stops.append((lock.WRITER, enter_time, exit_time)) time.sleep(NAPPY_TIME) if max_workers is None: max_workers = max(0, readers) + max(0, writers) if max_workers > 0: with futures.ThreadPoolExecutor(max_workers=max_workers) as e: count = 0 for _i in range(0, readers): e.submit(read_func, count) count += 1 for _i in range(0, writers): e.submit(write_func, count) count += 1 writer_times = [] reader_times = [] for (lock_type, start, stop) in list(start_stops): if lock_type == lock.WRITER: writer_times.append((start, stop)) else: reader_times.append((start, stop)) return (writer_times, reader_times)
def __init__(self, flow_detail, backend=None, scope_fetcher=None): self._result_mappings = {} self._reverse_mapping = {} if backend is None: # Err on the likely-hood that most people don't make there # objects able to be deepcopyable (resources, locks and such # can't be deepcopied)... backend = impl_memory.MemoryBackend({'deep_copy': False}) with contextlib.closing(backend.get_connection()) as conn: conn.update_flow_details(flow_detail, ignore_missing=True) self._backend = backend self._flowdetail = flow_detail self._transients = {} self._injected_args = {} self._lock = lock_utils.ReaderWriterLock() self._ensure_matchers = [ ((task.BaseTask, ), self._ensure_task), ((retry.Retry, ), self._ensure_retry), ] if scope_fetcher is None: scope_fetcher = lambda atom_name: None self._scope_fetcher = scope_fetcher # NOTE(imelnikov): failure serialization looses information, # so we cache failures here, in atom name -> failure mapping. self._failures = {} for ad in self._flowdetail: if ad.failure is not None: self._failures[ad.name] = ad.failure self._atom_name_to_uuid = dict( (ad.name, ad.uuid) for ad in self._flowdetail) try: injector_td = self._atomdetail_by_name( self.injector_name, expected_type=logbook.TaskDetail) except exceptions.NotFound: pass else: names = six.iterkeys(injector_td.results) self._set_result_mapping(injector_td.name, dict((name, name) for name in names))
def test_no_concurrent_readers_writers(self): lock = lock_utils.ReaderWriterLock() watch = timing.StopWatch(duration=5) watch.start() dups = collections.deque() active = collections.deque() def acquire_check(me, reader): if reader: lock_func = lock.read_lock else: lock_func = lock.write_lock with lock_func(): if not reader: # There should be no-one else currently active, if there # is ensure we capture them so that we can later blow-up # the test. if len(active) >= 1: dups.append(me) dups.extend(active) active.append(me) try: time.sleep(random.random() / 100) finally: active.remove(me) def run(): me = threading.current_thread() while not watch.expired(): acquire_check(me, random.choice([True, False])) threads = [] for i in range(0, self.THREAD_COUNT): t = threading_utils.daemon_thread(run) threads.append(t) t.start() while threads: t = threads.pop() t.join() self.assertEqual([], list(dups)) self.assertEqual([], list(active))
def test_double_reader_abort(self): lock = lock_utils.ReaderWriterLock() activated = collections.deque() def double_bad_reader(): with lock.read_lock(): with lock.read_lock(): raise RuntimeError("Broken") def happy_writer(): with lock.write_lock(): activated.append(lock.owner) with futures.ThreadPoolExecutor(max_workers=20) as e: for i in range(0, 20): if i % 2 == 0: e.submit(double_bad_reader) else: e.submit(happy_writer) self.assertEqual(10, len([a for a in activated if a == 'w']))
def __init__(self): self._data = {} self._lock = lu.ReaderWriterLock()
def __init__(self, conf=None): super(MemoryBackend, self).__init__(conf) self._memory = _Memory() self._helper = _MemoryHelper(self._memory) self._lock = lock_utils.ReaderWriterLock()