Exemplo n.º 1
0
    def get_error(self):
        loss_types = models.loss_types(self.calc.risk_models)

        for loss_type in loss_types:
            if not self.calc.rc.exposure_model.supports_loss_type(loss_type):
                return ("Invalid exposure "
                        "for computing loss type %s. " % loss_type)
Exemplo n.º 2
0
 def task_completed_hook(self, message):
     """
     Updates the event loss table
     """
     for loss_type in models.loss_types(self.risk_models):
         task_loss_table = message['event_loss_tables'][loss_type]
         self.event_loss_tables[loss_type] += task_loss_table
Exemplo n.º 3
0
 def task_completed_hook(self, message):
     """
     Updates the event loss table
     """
     for loss_type in models.loss_types(self.risk_models):
         task_loss_table = message['event_loss_tables'][loss_type]
         self.event_loss_tables[loss_type] += task_loss_table
Exemplo n.º 4
0
    def get_error(self):
        loss_types = models.loss_types(self.calc.risk_models)

        for loss_type in loss_types:
            if not self.calc.rc.exposure_model.supports_loss_type(loss_type):
                return ("Invalid exposure "
                        "for computing loss type %s. " % loss_type)
Exemplo n.º 5
0
 def task_completed(self, event_loss_tables):
     """
     Updates the event loss table
     """
     self.log_percent(event_loss_tables)
     for loss_type in models.loss_types(self.risk_models):
         task_loss_table = event_loss_tables[loss_type]
         self.event_loss_tables[loss_type] += task_loss_table
Exemplo n.º 6
0
    def task_completed_hook(self, message):
        aggregate_losses_dict = message.get('aggregate_losses')

        for loss_type in models.loss_types(self.risk_models):
            aggregate_losses = aggregate_losses_dict.get(loss_type)

            if aggregate_losses is not None:
                if self.aggregate_losses.get(loss_type) is None:
                    self.aggregate_losses[loss_type] = (numpy.zeros(
                        aggregate_losses.shape))
                self.aggregate_losses[loss_type] += aggregate_losses

        if self.rc.insured_losses:
            insured_losses_dict = message.get('insured_losses')
            for loss_type in models.loss_types(self.risk_models):
                insured_losses = insured_losses_dict.get(loss_type)
                if insured_losses is not None:
                    if self.insured_losses.get(loss_type) is None:
                        self.insured_losses[loss_type] = numpy.zeros(
                            insured_losses.shape)
                    self.insured_losses[loss_type] += insured_losses
Exemplo n.º 7
0
    def task_completed_hook(self, message):
        aggregate_losses_dict = message.get('aggregate_losses')

        for loss_type in models.loss_types(self.risk_models):
            aggregate_losses = aggregate_losses_dict.get(loss_type)

            if aggregate_losses is not None:
                if self.aggregate_losses.get(loss_type) is None:
                    self.aggregate_losses[loss_type] = (
                        numpy.zeros(aggregate_losses.shape))
                self.aggregate_losses[loss_type] += aggregate_losses

        if self.rc.insured_losses:
            insured_losses_dict = message.get('insured_losses')
            for loss_type in models.loss_types(self.risk_models):
                insured_losses = insured_losses_dict.get(
                    loss_type)
                if insured_losses is not None:
                    if self.insured_losses.get(loss_type) is None:
                        self.insured_losses[loss_type] = numpy.zeros(
                            insured_losses.shape)
                    self.insured_losses[loss_type] += insured_losses
Exemplo n.º 8
0
    def task_completed(self, task_result):
        self.log_percent(task_result)
        aggregate_losses_dict, insured_losses_dict = task_result

        for loss_type in models.loss_types(self.risk_models):
            aggregate_losses = aggregate_losses_dict.get(loss_type)

            if aggregate_losses is not None:
                if self.aggregate_losses.get(loss_type) is None:
                    self.aggregate_losses[loss_type] = (
                        numpy.zeros(aggregate_losses.shape))
                self.aggregate_losses[loss_type] += aggregate_losses

        if self.rc.insured_losses:
            for loss_type in models.loss_types(self.risk_models):
                insured_losses = insured_losses_dict.get(
                    loss_type)
                if insured_losses is not None:
                    if self.insured_losses.get(loss_type) is None:
                        self.insured_losses[loss_type] = numpy.zeros(
                            insured_losses.shape)
                    self.insured_losses[loss_type] += insured_losses
Exemplo n.º 9
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)
                ]

                num_tasks += 1
                yield [
                    self.job.id, calculation_units, output_containers,
                    self.calculator_parameters
                ]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' %
                               (expected_tasks, num_tasks))
Exemplo n.º 10
0
    def task_arg_gen(self, block_size):
        """
        Generator function for creating the arguments for each task.

        It is responsible for the distribution strategy. It divides
        the considered exposure into chunks of homogeneous assets
        (i.e. having the same taxonomy). The chunk size is given by
        the `block_size` openquake config parameter

        :param int block_size:
            The number of work items per task (sources, sites, etc.).

        :returns:
            An iterator over a list of arguments. Each contains:

            1. the job id
            2. a getter object needed to get the hazard data
            3. the needed risklib calculators
            4. the output containers to be populated
            5. the specific calculator parameter set
        """
        output_containers = writers.combine_builders(
            [builder(self) for builder in self.output_builders])

        num_tasks = 0
        for taxonomy, assets_nr in self.taxonomies_asset_count.items():
            asset_offsets = range(0, assets_nr, block_size)

            for offset in asset_offsets:
                with logs.tracing("getting assets"):
                    assets = models.ExposureData.objects.get_asset_chunk(
                        self.rc, taxonomy, offset, block_size)

                calculation_units = [
                    self.calculation_unit(loss_type, assets)
                    for loss_type in models.loss_types(self.risk_models)]

                num_tasks += 1
                yield [self.job.id,
                       calculation_units,
                       output_containers,
                       self.calculator_parameters]

        # sanity check to protect against future changes of the distribution
        # logic
        expected_tasks = self.expected_tasks(block_size)
        if num_tasks != expected_tasks:
            raise RuntimeError('Expected %d tasks, generated %d!' % (
                               expected_tasks, num_tasks))
Exemplo n.º 11
0
def combine_builders(builders):
    outputs = OutputDict()

    if not builders:
        return outputs

    a_builder = builders[0]

    loss_types = models.loss_types(a_builder.calc.risk_models)
    hazard_outputs = a_builder.calc.rc.hazard_outputs()

    for builder in builders:
        for loss_type in loss_types:

            if len(hazard_outputs) > 1:
                outputs.extend(builder.statistical_outputs(loss_type))

            for hazard in hazard_outputs:
                outputs.extend(builder.individual_outputs(loss_type, hazard))

    return outputs
Exemplo n.º 12
0
def combine_builders(builders):
    outputs = OutputDict()

    if not builders:
        return outputs

    a_builder = builders[0]

    loss_types = models.loss_types(a_builder.calc.risk_models)
    hazard_outputs = a_builder.calc.rc.hazard_outputs()

    for builder in builders:
        for loss_type in loss_types:

            if len(hazard_outputs) > 1:
                outputs.extend(builder.statistical_outputs(loss_type))

            for hazard in hazard_outputs:
                outputs.extend(builder.individual_outputs(loss_type, hazard))

    return outputs