def test_children(self): mon = PerformanceMonitor('test') mon1 = mon('child1') mon2 = mon('child2') with mon1: time.sleep(0.1) with mon2: time.sleep(0.1) mon.flush() data = mon.collect_performance() total_time = data['time_sec'].sum() self.assertGreaterEqual(total_time, 0.2)
def test_children(self): mon = PerformanceMonitor('test', tempfile.mkdtemp()) mon1 = mon('child1') mon2 = mon('child2') with mon1: time.sleep(0.1) with mon2: time.sleep(0.1) mon.flush() data = mon.collect_performance() total_time = data['time_sec'].sum() self.assertGreaterEqual(total_time, 0.2) shutil.rmtree(mon.monitor_dir)
def get_data_transfer(dstore): """ Determine the amount of data transferred from the controller node to the workers and back in a classical calculation. :param dstore: a :class:`openquake.commonlib.datastore.DataStore` instance :returns: (block_info, to_send_forward, to_send_back) """ oqparam = OqParam.from_(dstore.attrs) sitecol = dstore['sitecol'] rlzs_assoc = dstore['rlzs_assoc'] info = dstore['job_info'] sources = dstore['composite_source_model'].get_sources() num_gsims_by_trt = groupby(rlzs_assoc, operator.itemgetter(0), lambda group: sum(1 for row in group)) gsims_assoc = rlzs_assoc.gsims_by_trt_id to_send_forward = 0 to_send_back = 0 block_info = [] for block in split_in_blocks(sources, oqparam.concurrent_tasks or 1, operator.attrgetter('weight'), operator.attrgetter('trt_model_id')): num_gsims = num_gsims_by_trt.get(block[0].trt_model_id, 0) back = info['n_sites'] * info['n_levels'] * info['n_imts'] * num_gsims to_send_back += back * 8 # 8 bytes per float args = (block, sitecol, gsims_assoc, PerformanceMonitor('')) to_send_forward += sum(len(p) for p in parallel.pickle_sequence(args)) block_info.append((len(block), block.weight)) return numpy.array(block_info, block_dt), to_send_forward, to_send_back
def info(name, filtersources=False, weightsources=False, report=False): """ Give information. You can pass the name of an available calculator, a job.ini file, or a zip archive with the input files. """ logging.basicConfig(level=logging.INFO) with PerformanceMonitor('info', measuremem=True) as mon: if report: print('Generated', reportwriter.build_report(name)) else: _info(name, filtersources, weightsources) if mon.duration > 1: print(mon)
def get_calc(self, testfile, job_ini, **kw): """ Return the outputs of the calculation as a dictionary """ self.testdir = os.path.dirname(testfile) if os.path.isfile(testfile) \ else testfile inis = [os.path.join(self.testdir, ini) for ini in job_ini.split(',')] params = readinput.get_params(inis) params.update(kw) oq = oqvalidation.OqParam(**params) oq.validate() # change this when debugging the test monitor = PerformanceMonitor(self.testdir) return base.calculators(oq, monitor)
def filter_sources(sources, sitecol, maxdist): """ Filter a list of hazardlib sources according to the maximum distance. :param sources: the original sources :param sitecol: a SiteCollection instance :param maxdist: maximum distance :returns: the filtered sources ordered by source_id """ mon = PerformanceMonitor('filter sources') if len(sources) * len(sitecol) > LOTS_OF_SOURCES_SITES: # filter in parallel on all available cores sources = parallel.TaskManager.apply_reduce( _filter_sources, (sources, sitecol, maxdist, mon), operator.add, []) else: # few sources and sites, filter sequentially on a single core sources = _filter_sources.task_func(sources, sitecol, maxdist, mon) return sorted(sources, key=operator.attrgetter('source_id'))
def setUpClass(cls): tmpdir = tempfile.mkdtemp() cls.mon = PerformanceMonitor('test', tmpdir)