def execute(self): # build loss maps if ('all_loss_ratios' in self.datastore and self.oqparam.conditional_loss_poes): assetcol = self.assetcol rlzs = self.rlzs_assoc.realizations stats = self.oqparam.risk_stats() builder = self.riskmodel.curve_builder A = len(assetcol) R = len(self.datastore['realizations']) # create loss_maps datasets self.datastore.create_dset('loss_maps-rlzs', builder.loss_maps_dt, (A, R), fillvalue=None) if R > 1: self.datastore.create_dset('loss_maps-stats', builder.loss_maps_dt, (A, len(stats)), fillvalue=None) mon = self.monitor('loss maps') if self.oqparam.hazard_calculation_id and ( 'asset_loss_table' in self.datastore.parent): Starmap = parallel.Starmap # we can parallelize fully lrgetter = riskinput.LossRatiosGetter(self.datastore.parent) # avoid OSError: Can't read data (Wrong b-tree signature) self.datastore.parent.close() else: # there is a single datastore # we cannot read from it in parallel while writing Starmap = parallel.Sequential lrgetter = riskinput.LossRatiosGetter(self.datastore) Starmap.apply(build_loss_maps, (assetcol, builder, lrgetter, rlzs, stats, mon), self.oqparam.concurrent_tasks).reduce( self.save_loss_maps) if self.oqparam.hazard_calculation_id: self.datastore.parent.open() # build an aggregate loss curve per realization if 'agg_loss_table' in self.datastore: self.build_agg_curve()
def execute(self): """ Parallelize on the riskinputs and returns a dictionary of results. Require a `.core_task` to be defined with signature (riskinputs, riskmodel, rlzs_assoc, monitor). """ if not hasattr(self, 'riskinputs'): # in the reportwriter return res = Starmap.apply( self.core_task.__func__, (self.riskinputs, self.riskmodel, self.param, self.monitor()), concurrent_tasks=self.oqparam.concurrent_tasks or 1, weight=get_weight).reduce(self.combine) return res
def execute(self): """ Parallelize on the riskinputs and returns a dictionary of results. Require a `.core_task` to be defined with signature (riskinputs, riskmodel, rlzs_assoc, monitor). """ if not hasattr(self, 'riskinputs'): # in the reportwriter return res = Starmap.apply( self.core_task.__func__, (self.riskinputs, self.riskmodel, self.param, self.monitor()), concurrent_tasks=self.oqparam.concurrent_tasks or 1, weight=get_weight ).reduce(self.combine) return res
def pfilter(self, sources, monitor): """ Filter the sources in parallel by using Starmap.apply :param sources: a sequence of sources :param monitor: a Monitor instance :returns: a dictionary src_group_id -> sources """ sources_by_grp = Starmap.apply(prefilter, (sources, self, monitor), distribute=self.distribute, name=self.__class__.__name__).reduce() Starmap.shutdown() # close the processpool Starmap.init() # reopen it when necessary # avoid task ordering issues for sources in sources_by_grp.values(): sources.sort(key=operator.attrgetter('source_id')) return sources_by_grp