def test_deferred_flushing(self, adaptive, level): # Time logging of 1000 messages with slow handler. This should take at # least 10 seconds with standard logging handlers, but only fraction of # the time with deferred flushing. target = Handler(0.01) with threaded_handler(1000, target, adaptive=adaptive) as (handler, logger): handler.start() def worker(n): for i in range(100): logger.log(level, "thread %02d:%03d", n, i) start = time.time() concurrent.tmap(worker, range(10)) elapsed = time.time() - start # All messages should be logged. self.assertEqual(len(target.messages), 1000) # This takes 0.09 seconds on my laptop. Use more time to avoid random # failures on overloaded slave. self.assertLess(elapsed, 1.0) print("Logged %d messages in %.2f seconds" % (len(target.messages), elapsed))
def test_deferred_flushing(self, adaptive, level): # Time logging of 1000 messages with slow handler. This should take at # least 10 seconds with standard logging handlers, but only fraction of # the time with deferred flushing. target = Handler(0.01) with threaded_handler( 1000, target, adaptive=adaptive) as (handler, logger): handler.start() def worker(n): for i in range(100): logger.log(level, "thread %02d:%03d", n, i) start = time.time() # pylint: disable=range-builtin-not-iterating concurrent.tmap(worker, range(10)) elapsed = time.time() - start # All messages should be logged. self.assertEqual(len(target.messages), 1000) # This takes 0.09 seconds on my laptop. Use more time to avoid random # failures on overloaded slave. self.assertLess(elapsed, 1.0) print("Logged %d messages in %.2f seconds" % ( len(target.messages), elapsed))
def test_slow_handler(self, adaptive, level): # Test that logging threads are not delayed by a slow handler. target = Handler(0.1) with threaded_handler( 10, target, adaptive=adaptive) as (handler, logger): handler.start() def worker(n): start = time.time() logger.log(level, "thread %02d", n) return time.time() - start results = concurrent.tmap(worker, iter(range(10))) workers_time = [r.value for r in results] # All messages should be logged. self.assertEqual(len(target.messages), 10) # No thread should be delayed. # Here is typical (sorted) result: # [0.000039, 0.000071, 0.000076, 0.000086, 0.000112, 0.000191, # 0.000276, 0.000285, 0.000413, 0.000590] print("workers_time %s" % workers_time) self.assertLess(max(workers_time), 0.1)
def test_slow_handler(self, adaptive, level): # Test that logging threads are not delayed by a slow handler. target = Handler(0.1) with threaded_handler( 10, target, adaptive=adaptive) as (handler, logger): handler.start() def worker(n): start = time.time() logger.log(level, "thread %02d", n) return time.time() - start # pylint: disable=range-builtin-not-iterating results = concurrent.tmap(worker, range(10)) workers_time = [r.value for r in results] # All messages should be logged. self.assertEqual(len(target.messages), 10) # No thread should be delayed. # Here is typical (sorted) result: # [0.000039, 0.000071, 0.000076, 0.000086, 0.000112, 0.000191, # 0.000276, 0.000285, 0.000413, 0.000590] print("workers_time %s" % workers_time) self.assertLess(max(workers_time), 0.1)
def test_results_order(self): def func(x): time.sleep(x) return x values = tuple(random.random() * 0.1 for x in range(10)) results = concurrent.tmap(func, values) expected = [concurrent.Result(True, x) for x in values] self.assertEqual(results, expected)
def test_error(self): error = RuntimeError("No result for you!") def func(x): raise error results = concurrent.tmap(func, range(10)) expected = [concurrent.Result(False, error)] * 10 self.assertEqual(results, expected)
def connect_all(cls, prep_cons): results = [] logins = [] # Prepare connections and setup iSCSI nodes serially. These operations # happen on the host and are very fast, so there's no need to run them # in parallel. Also, running them concurrently could cause locking # issues when multiple threads try to access local iscsi database. for con in prep_cons: try: con.setup_node() except Exception as err: log.error( "Could not configure connection to %s and iface %s: %s", con.target, con.iface, err) status, _ = cls.translate_error(err) results.append((con, status)) else: logins.append(con) if not logins: return results # Run login to nodes in parallel. This operations happen on remote # iscsi server and if the some targets are not available, the operation # can take quite some time (by default 120 seconds) and can cause # engine command times out. Running login to targets in parallel should # mitigate this issue. max_workers = cls.max_workers(logins) log.info("Log in to %s targets using %s workers", len(logins), max_workers) def iscsi_login(con): try: iscsi.loginToIscsiNode(con.iface, con.target) return con, 0 except Exception as e: log.exception("Could not login to target") status, _ = cls.translate_error(e) return con, status login_results = concurrent.tmap(iscsi_login, logins, max_workers=max_workers, name="iscsi-login") for res in login_results: results.append(res.value) # Wait for all new devices to be settled. cls.settle_devices() return results
def test_error(self): error = RuntimeError("No result for you!") def func(x): raise error # pylint: disable=range-builtin-not-iterating results = concurrent.tmap(func, range(10)) expected = [concurrent.Result(False, error)] * 10 self.assertEqual(results, expected)
def test_errors(self): error = RuntimeError("No result for you!") def func(x): raise error results = list(concurrent.tmap(func, iter(range(10)))) expected = [concurrent.Result(False, error)] * 10 assert results == expected
def test_thread_name(self): thread_names = set() barrier = concurrent.Barrier(4) def func(x): # Ensure that all threads are used. barrier.wait(1) thread_names.add(threading.current_thread().name) list(concurrent.tmap(func, [1, 2, 3, 4], name="test")) assert thread_names == {"test/0", "test/1", "test/2", "test/3"}
def scanDomains(pattern="*"): log = logging.getLogger("storage.scanDomains") mntList = _getMountsList(pattern) def collectMetaFiles(mountPoint): try: # removes the path to the data center's mount directory from # the mount point. if mountPoint.startswith(sc.REPO_MOUNT_DIR): client_name = mountPoint[len(sc.REPO_MOUNT_DIR):] # Since glob treats values between brackets as character ranges, # and since IPV6 addresses contain brackets, we should escape the # mountPoint that we pass to glob. # <data-center>/mnt/mountpoint/<uuid>/dom_mdm mdPattern = os.path.join(glob_escape(mountPoint), UUID_GLOB_PATTERN, sd.DOMAIN_META_DATA) metaFiles = oop.getProcessPool(client_name).glob.glob(mdPattern) for metaFile in metaFiles: if (os.path.basename(os.path.dirname(metaFile)) != sd.MASTER_FS_DIR): sdUUID = os.path.basename(os.path.dirname(metaFile)) return (sdUUID, os.path.dirname(metaFile)) except Exception: log.warn("Could not collect metadata file for domain path %s", mountPoint, exc_info=True) # Run collectMetaFiles in extenral processes. # The amount of processes that can be initiated in the same time is the # amount of stuck domains we are willing to handle +1. # We Use 30% of the available slots. # TODO: calculate it right, now we use same value of max process per # domain. for res in concurrent.tmap(collectMetaFiles, mntList, max_workers=oop.HELPERS_PER_DOMAIN): if res.value is None: continue yield res.value
def test_max_workers(self, values, max_workers, actual_workers): workers = set() done = threading.Event() barrier = concurrent.Barrier(actual_workers) def func(x): # Ensure that all threads are used. if not done.is_set(): barrier.wait(1) done.set() workers.add(threading.current_thread().ident) list( concurrent.tmap(func, iter(range(values)), max_workers=max_workers)) assert len(workers) == actual_workers
def test_results_iter(self): for res in concurrent.tmap(lambda x: x, [1, 2, 3, 4]): assert res.succeeded
def test_results(self): values = tuple(range(10)) results = concurrent.tmap(lambda x: x, values) expected = [concurrent.Result(True, x) for x in values] self.assertEqual(results, expected)
def test_concurrency(self): start = monotonic_time() list(concurrent.tmap(time.sleep, [0.5] * 10)) elapsed = monotonic_time() - start assert 0.5 <= elapsed < 1.0
def test_concurrency(self): start = time.time() concurrent.tmap(time.sleep, [0.5] * 10) elapsed = time.time() - start self.assertGreater(elapsed, 0.5) self.assertLess(elapsed, 1.0)
def test_invalid_max_workers(self): with pytest.raises(ValueError): list(concurrent.tmap(lambda x: x, [1], max_workers=0))
def test_no_values(self): results = list(concurrent.tmap(lambda x: x, [])) assert results == []
def test_concurrency(self): start = monotonic_time() concurrent.tmap(time.sleep, [0.5] * 10) elapsed = monotonic_time() - start self.assertGreaterEqual(elapsed, 0.5) self.assertLess(elapsed, 1.0)
def test_results(self): values = tuple(range(10)) results = set(concurrent.tmap(lambda x: x, values)) expected = set(concurrent.Result(True, x) for x in values) assert results == expected
def test_many_values(self, max_workers): results = concurrent.tmap(lambda x: x, itertools.repeat(True, 1000), max_workers=max_workers) assert all(r.value for r in results)