Пример #1
0
    def compute_risk(self):
        """
        Generate the GMFs and optionally the hazard curves too, then
        compute the risk.
        """
        getter_builders = []
        risk_models = []
        with self.monitor('associating assets<->sites'):
            for risk_model in self.risk_models.itervalues():
                logs.LOG.info('associating assets<->sites for taxonomy %s',
                              risk_model.taxonomy)
                try:
                    with db.transaction.commit_on_success(using='job_init'):
                        gbuilder = GetterBuilder(self.rc, risk_model.taxonomy)
                        getter_builders.append(gbuilder)
                        risk_models.append(risk_model)
                except AssetSiteAssociationError as e:
                    logs.LOG.warn(str(e))
                    continue

        # notice that here the commit is really needed, since
        # combine_builders save the loss curve containers on the db
        with db.transaction.commit_on_success(using='job_init'):
            outputdict = writers.combine_builders(
                [ob(self) for ob in self.output_builders])

        args = []
        # compute the risk by splitting by sites
        for sites in split_site_collection(
                self.hc.site_collection, self.concurrent_tasks):
            args.append((self.job.id, sites, self.rc,
                         risk_models, getter_builders, outputdict,
                         self.calculator_parameters))
        self.acc = tasks.map_reduce(event_based_fr, args, self.agg_result, {})
Пример #2
0
 def task_arg_gen(self):
     """
     Yields the argument to be submitted to run_subtasks. Tasks with
     fewer assets are submitted first.
     """
     outputdict = writers.combine_builders(
         [ob(self) for ob in self.output_builders])
     ct = sorted((counts, taxonomy) for taxonomy, counts
                 in self.taxonomies_asset_count.iteritems())
     for counts, taxonomy in ct:
         yield self.job.id, self, taxonomy, counts, outputdict
Пример #3
0
 def execute(self):
     """
     Method responsible for the distribution strategy.
     """
     self.outputdict = writers.combine_builders(
         [ob(self) for ob in self.output_builders])
     ct = sorted((counts, taxonomy) for taxonomy, counts
                 in self.taxonomies_asset_count.iteritems())
     self.acc = tasks.apply_reduce(
         build_getters, (self.job.id, ct, self),
         lambda acc, otm: otm.aggregate_results(self.agg_result, acc),
         self.acc, self.concurrent_tasks)
Пример #4
0
    def prepare_risk(self):
        """
        Associate assets and sites.
        """
        self.outputdict = writers.combine_builders(
            [ob(self) for ob in self.output_builders])

        # build the initializers hazard -> risk
        ct = sorted((counts, taxonomy) for taxonomy, counts
                    in self.taxonomies_asset_count.iteritems())
        tasks.apply_reduce(prepare_risk, (ct, self, self.monitor),
                           concurrent_tasks=self.concurrent_tasks)
Пример #5
0
    def prepare_risk(self):
        """
        Associate assets and sites and for some calculator generate the
        epsilons.
        """
        self.outputdict = writers.combine_builders(
            [ob(self) for ob in self.output_builders])

        # build the initializers hazard -> risk
        ct = sorted((counts, taxonomy) for taxonomy, counts
                    in self.taxonomies_asset_count.iteritems())
        tasks.apply_reduce(prepare_risk, (self.job.id, ct, self.rc),
                           concurrent_tasks=self.concurrent_tasks)
Пример #6
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)
                ]

                num_tasks += 1
                yield [
                    self.job.id, calculation_units, output_containers,
                    self.calculator_parameters
                ]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' %
                               (expected_tasks, num_tasks))
Пример #7
0
    def prepare_risk(self):
        """
        Associate assets and sites and for some calculator generate the
        epsilons.
        """
        self.outputdict = writers.combine_builders(
            [ob(self) for ob in self.output_builders])

        # build the initializers hazard -> risk
        ct = sorted(
            (counts, taxonomy)
            for taxonomy, counts in self.taxonomies_asset_count.iteritems())
        tasks.apply_reduce(prepare_risk, (ct, self, self.monitor),
                           concurrent_tasks=self.concurrent_tasks)
Пример #8
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)]

                num_tasks += 1
                yield [self.job.id,
                       calculation_units,
                       output_containers,
                       self.calculator_parameters]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' % (
                               expected_tasks, num_tasks))