def test_exception(self): CobaConfig.Logger = BasicLogger(MemorySink()) list(MultiprocessFilter([ExceptionFilter()], 2, 1).filter(range(4))) for item in CobaConfig.Logger.sink.items: self.assertIn("Unexpected exception:", item)
def test_multiprocess_sleeping_task(self): start_time = time.time() list( MultiprocessFilter([SleepingFilter()], 2, 1).filter([2, 2, 0.25, 0.25])) end_time = time.time() self.assertLess(end_time - start_time, 4)
def test_not_picklable_sans_reduce(self): CobaConfig.Logger = BasicLogger(MemorySink()) list( MultiprocessFilter([ProcessNameFilter()], 2, 1).filter([NotPicklableFilter()])) self.assertEqual(1, len(CobaConfig.Logger.sink.items)) self.assertIn("pickle", CobaConfig.Logger.sink.items[0])
def test_logging(self): #this is an important example. Even if we set the main logger's #with_stamp to false it doesn't propogate to the processes. logger_sink = MemorySink() logger = IndentLogger(logger_sink, with_stamp=False, with_name=True) CobaConfig.Logger = logger items = list( MultiprocessFilter([ProcessNameFilter()], 2, 1).filter(range(4))) self.assertEqual(len(logger_sink.items), 4) self.assertEqual(items, [l.split(' ')[3] for l in logger_sink.items]) self.assertEqual(items, [l.split(' ')[-1] for l in logger_sink.items])
def evaluate(self, learners: Sequence[Learner], result_file: str = None, seed: int = 1) -> Result: """Collect observations of a Learner playing the benchmark's simulations to calculate Results. Args: learners: The collection of learners that we'd like to evalute. result_file: The file we'd like to use for writing/restoring results for the requested evaluation. seed: The random seed we'd like to use when choosing which action to take from the learner's predictions. Returns: See the base class for more information. """ restored = Result.from_file(result_file) if result_file and Path( result_file).exists() else Result() n_given_learners = len(learners) n_given_simulations = len(self._simulations) if len(restored.benchmark) != 0: assert n_given_learners == restored.benchmark[ 'n_learners'], "The currently evaluating benchmark doesn't match the given transaction log" assert n_given_simulations == restored.benchmark[ 'n_simulations'], "The currently evaluating benchmark doesn't match the given transaction log" preamble = [] preamble.append(Transaction.version()) preamble.append( Transaction.benchmark(n_given_learners, n_given_simulations)) preamble.extend(Transaction.learners(learners)) preamble.extend(Transaction.simulations(self._simulations)) cb = self._chunk_by if self._chunk_by else CobaConfig.Benchmark[ 'chunk_by'] mp = self._processes if self._processes else CobaConfig.Benchmark[ 'processes'] mt = self._maxtasksperchild if self._maxtasksperchild_set else CobaConfig.Benchmark[ 'maxtasksperchild'] tasks = Tasks(self._simulations, learners, seed) unfinished = Unfinished(restored) chunked = ChunkByTask() if cb == 'task' else ChunkByNone( ) if cb == 'none' else ChunkBySource() process = Transactions() transaction_sink = TransactionSink(result_file, restored) if mp > 1 or mt is not None: process = MultiprocessFilter([process], mp, mt) #type: ignore try: Pipe.join(MemorySource(preamble), [], transaction_sink).run() Pipe.join(tasks, [unfinished, chunked, process], transaction_sink).run() except KeyboardInterrupt: CobaConfig.Logger.log( "Benchmark evaluation was manually aborted via Ctrl-C") except CobaFatal: raise except Exception as ex: CobaConfig.Logger.log_exception(ex) return transaction_sink.result
def test_multiprocess_singletask(self): items = list( MultiprocessFilter([ProcessNameFilter()], 2, 1).filter(range(4))) self.assertEqual(len(set(items)), 4)
def test_function(): list( MultiprocessFilter([ProcessNameFilter()], 2, 1).filter([Test()] * 2))
def test_empty_list(self): items = list( MultiprocessFilter([ProcessNameFilter()], 1, 1).filter([])) self.assertEqual(len(items), 0)