コード例 #1
0
    def test_scenario_loss_map_writer_creation(self):
        # XML writer
        writer = risk_output.create_loss_map_writer(None, ['xml'],
                                                    "fakepath.xml", True)
        self.assertEqual(type(writer), risk_output.LossMapXMLWriter)

        # database writer
        writer = risk_output.create_loss_map_writer(1, ['db'], "fakepath.xml",
                                                    True)
        self.assertEqual(type(writer), risk_output.LossMapDBWriter)
コード例 #2
0
    def test_scenario_loss_map_writer_creation(self):
        # XML writer
        writer = risk_output.create_loss_map_writer(
            None, ['xml'], "fakepath.xml", True)
        self.assertEqual(type(writer), risk_output.LossMapXMLWriter)

        # database writer
        writer = risk_output.create_loss_map_writer(
            1, ['db'], "fakepath.xml", True)
        self.assertEqual(type(writer), risk_output.LossMapDBWriter)
コード例 #3
0
    def test_nondeterministic_loss_map_writer_creation(self):
        # XML writer
        writer = risk_output.create_loss_map_writer(
            None, ['xml'], "fakepath.xml", False)
        self.assertEqual(writer, None)

        # database writer
        writer = risk_output.create_loss_map_writer(
            1, ['db'], "fakepath.xml", False)
        self.assertEqual(type(writer), risk_output.LossMapDBWriter)
コード例 #4
0
    def test_nonscenario_loss_map_writer_creation(self):
        # XML writer
        writer = risk_output.create_loss_map_writer(None, ['xml'],
                                                    "fakepath.xml", False)
        self.assertEqual(type(writer), risk_output.LossMapNonScenarioXMLWriter)

        # database writer is the same for scenario and non-scenario
        writer = risk_output.create_loss_map_writer(1, ['db'], "fakepath.xml",
                                                    False)

        self.assertEqual(type(writer), risk_output.LossMapDBWriter)
コード例 #5
0
    def test_nonscenario_loss_map_writer_creation(self):
        # XML writer
        writer = risk_output.create_loss_map_writer(
            None, ['xml'], "fakepath.xml", False)
        self.assertEqual(type(writer),
                risk_output.LossMapNonScenarioXMLWriter)

        # database writer is the same for scenario and non-scenario
        writer = risk_output.create_loss_map_writer(
            1, ['db'], "fakepath.xml", False)

        self.assertEqual(type(writer),
                risk_output.LossMapDBWriter)
コード例 #6
0
ファイル: general.py プロジェクト: kpanic/openquake
    def write_output(self):
        """Write the output of a block to db/xml.
        """
        calc_proxy = self.calc_proxy

        for block_id in calc_proxy.blocks_keys:
            #pylint: disable=W0212
            self._write_output_for_block(calc_proxy.job_id, block_id)

        for loss_poe in conditional_loss_poes(calc_proxy.params):
            path = os.path.join(calc_proxy.base_path,
                                calc_proxy.params['OUTPUT_DIR'],
                                "losses_at-%s.xml" % loss_poe)
            writer = risk_output.create_loss_map_writer(
                calc_proxy.job_id, calc_proxy.serialize_results_to, path,
                False)

            if writer:
                metadata = {
                    "scenario": False,
                    "timeSpan": calc_proxy.params["INVESTIGATION_TIME"],
                    "poE": loss_poe,
                }

                writer.serialize(
                    [metadata]
                    + self.asset_losses_per_site(
                        loss_poe,
                        self.grid_assets_iterator(
                            calc_proxy.region.grid)))
                LOG.info('Loss Map is at: %s' % path)
コード例 #7
0
ファイル: core.py プロジェクト: arbeit/openquake-packages
    def post_execute(self):
        loss_map_path = os.path.join(
            self.job_ctxt["BASE_PATH"],
            self.job_ctxt["OUTPUT_DIR"],
            self._output_filename % self.job_ctxt.job_id)

        loss_map_writer = risk_output.create_loss_map_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            loss_map_path, True)

        if loss_map_writer:
            LOGGER.debug("Starting serialization of the loss map...")

            # Add a metadata dict in the first list position
            # Note: the metadata is still incomplete (see bug 809410)
            loss_map_metadata = {"scenario": True}
            self._loss_map_data.insert(0, loss_map_metadata)
            loss_map_writer.serialize(self._loss_map_data)

        # For now, just print these values.
        # These are not debug statements; please don't remove them!
        print "Mean region loss value: %s" % numpy.mean(
            self._sum_region_losses)

        print "Standard deviation region loss value: %s" % numpy.std(
            self._sum_region_losses, ddof=1)
コード例 #8
0
    def write_output(self):
        """Write the output of a block to db/xml.
        """
        job_ctxt = self.job_ctxt

        for block_id in job_ctxt.blocks_keys:
            #pylint: disable=W0212
            self._write_output_for_block(job_ctxt.job_id, block_id)

        for loss_poe in conditional_loss_poes(job_ctxt.params):
            path = os.path.join(job_ctxt.base_path,
                                job_ctxt.params['OUTPUT_DIR'],
                                "losses_at-%s.xml" % loss_poe)
            writer = risk_output.create_loss_map_writer(
                job_ctxt.job_id, job_ctxt.serialize_results_to, path, False)

            if writer:
                metadata = {
                    "scenario": False,
                    "timeSpan": job_ctxt.params["INVESTIGATION_TIME"],
                    "poE": loss_poe,
                }

                writer.serialize([metadata] + self.asset_losses_per_site(
                    loss_poe, self.grid_assets_iterator(job_ctxt.region.grid)))
                LOG.info('Loss Map is at: %s' % path)
コード例 #9
0
ファイル: core.py プロジェクト: bwyss/oq-engine
    def post_execute(self):
        loss_map_path = os.path.join(
            self.job_ctxt["BASE_PATH"], self.job_ctxt["OUTPUT_DIR"],
            self._output_filename % self.job_ctxt.job_id)

        loss_map_writer = risk_output.create_loss_map_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            loss_map_path, True)

        if loss_map_writer:
            LOGGER.debug("Starting serialization of the loss map...")

            # Add a metadata dict in the first list position
            # Note: the metadata is still incomplete (see bug 809410)
            loss_map_metadata = {"scenario": True}
            self._loss_map_data.insert(0, loss_map_metadata)
            loss_map_writer.serialize(self._loss_map_data)

        # For now, just print these values.
        # These are not debug statements; please don't remove them!
        print "Mean region loss value: %s" % numpy.mean(
            self._sum_region_losses)

        print "Standard deviation region loss value: %s" % numpy.std(
            self._sum_region_losses, ddof=1)
コード例 #10
0
ファイル: core.py プロジェクト: leoalvar/oq-engine
    def execute(self):
        """Entry point for triggering the computation."""
        LOGGER.debug("Executing scenario risk computation.")
        LOGGER.debug("This will calculate mean and standard deviation loss"
            "values for the region defined in the job config.")

        tasks = []

        vuln_model = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)

        sum_per_gmf = SumPerGroundMotionField(vuln_model, epsilon_provider)

        region_loss_map_data = {}

        for block_id in self.job_ctxt.blocks_keys:
            LOGGER.debug("Dispatching task for block %s of %s"
                % (block_id, len(self.job_ctxt.blocks_keys)))
            a_task = general.compute_risk.delay(
                self.job_ctxt.job_id, block_id, vuln_model=vuln_model,
                epsilon_provider=epsilon_provider)
            tasks.append(a_task)

        for task in tasks:
            task.wait()
            if not task.successful():
                raise Exception(task.result)

            block_loss, block_loss_map_data = task.result

            # do some basic validation on our results
            assert block_loss is not None, "Expected a result != None"
            assert isinstance(block_loss, numpy.ndarray), \
                "Expected a numpy array"

            # our result should be a 1-dimensional numpy.array of loss values
            sum_per_gmf.sum_losses(block_loss)

            collect_region_data(
                block_loss_map_data, region_loss_map_data)

        loss_map_data = [(site, data)
                for site, data in region_loss_map_data.iteritems()]

        # serialize the loss map data to XML
        loss_map_path = os.path.join(
            self.job_ctxt['BASE_PATH'],
            self.job_ctxt['OUTPUT_DIR'],
            'loss-map-%s.xml' % self.job_ctxt.job_id)
        loss_map_writer = risk_output.create_loss_map_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            loss_map_path, True)

        if loss_map_writer:
            LOGGER.debug("Starting serialization of the loss map...")

            # Add a metadata dict in the first list position
            # Note: the metadata is still incomplete (see bug 809410)
            loss_map_metadata = {'scenario': True}
            loss_map_data.insert(0, loss_map_metadata)
            loss_map_writer.serialize(loss_map_data)

        # For now, just print these values.
        # These are not debug statements; please don't remove them!
        print "Mean region loss value: %s" % sum_per_gmf.mean
        print "Standard deviation region loss value: %s" % sum_per_gmf.stddev
コード例 #11
0
    def execute(self):
        """Entry point for triggering the computation."""
        LOGGER.debug("Executing scenario risk computation.")
        LOGGER.debug("This will calculate mean and standard deviation loss"
                     "values for the region defined in the job config.")

        tasks = []

        vuln_model = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)

        sum_per_gmf = SumPerGroundMotionField(vuln_model, epsilon_provider)

        region_loss_map_data = {}

        for block_id in self.job_ctxt.blocks_keys:
            LOGGER.debug("Dispatching task for block %s of %s" %
                         (block_id, len(self.job_ctxt.blocks_keys)))
            a_task = general.compute_risk.delay(self.job_ctxt.job_id,
                                                block_id,
                                                vuln_model=vuln_model)
            tasks.append(a_task)

        for task in tasks:
            task.wait()
            if not task.successful():
                raise Exception(task.result)

            block_loss, block_loss_map_data = task.result

            # do some basic validation on our results
            assert block_loss is not None, "Expected a result != None"
            assert isinstance(block_loss, numpy.ndarray), \
                "Expected a numpy array"

            # our result should be a 1-dimensional numpy.array of loss values
            sum_per_gmf.sum_losses(block_loss)

            collect_region_data(block_loss_map_data, region_loss_map_data)

        loss_map_data = [(site, data)
                         for site, data in region_loss_map_data.iteritems()]

        # serialize the loss map data to XML
        loss_map_path = os.path.join(self.job_ctxt['BASE_PATH'],
                                     self.job_ctxt['OUTPUT_DIR'],
                                     'loss-map-%s.xml' % self.job_ctxt.job_id)
        loss_map_writer = risk_output.create_loss_map_writer(
            self.job_ctxt.job_id, self.job_ctxt.serialize_results_to,
            loss_map_path, True)

        if loss_map_writer:
            LOGGER.debug("Starting serialization of the loss map...")

            # Add a metadata dict in the first list position
            # Note: the metadata is still incomplete (see bug 809410)
            loss_map_metadata = {'scenario': True}
            loss_map_data.insert(0, loss_map_metadata)
            loss_map_writer.serialize(loss_map_data)

        # For now, just print these values.
        # These are not debug statements; please don't remove them!
        print "Mean region loss value: %s" % sum_per_gmf.mean
        print "Standard deviation region loss value: %s" % sum_per_gmf.stddev