Beispiel #1
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini')
        expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525]
        expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110]
        expected_mean_poes = [0.794898, 0.760778, 0.331005]

        job = self.run_hazard(cfg)

        # Test the poe values for the two curves.
        actual_curve_b1, actual_curve_b2 = (
            models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id,
                hazard_curve__lt_realization__isnull=False).order_by(
                    'hazard_curve__lt_realization__lt_model__sm_lt_path'))

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(
            ['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(
            ['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path)

        numpy.testing.assert_array_almost_equal(expected_curve_poes_b1,
                                                actual_curve_b1.poes,
                                                decimal=3)

        numpy.testing.assert_array_almost_equal(expected_curve_poes_b2,
                                                actual_curve_b2.poes,
                                                decimal=3)

        # Test the mean curve:
        [mean_curve] = models.HazardCurveData.objects\
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean')
        numpy.testing.assert_array_almost_equal(expected_mean_poes,
                                                mean_curve.poes,
                                                decimal=3)

        # Test the exports as well:
        exported_file_b1 = hazard_export.export(
            actual_curve_b1.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1)

        exported_file_b2 = hazard_export.export(
            actual_curve_b2.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2)

        # mean:
        exported_file_mean = hazard_export.export(
            mean_curve.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean)

        shutil.rmtree(result_dir)
Beispiel #2
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        try:
            cfg = os.path.join(os.path.dirname(__file__), 'job.ini')
            expected_curve_poes_b1_b2 = [0.095163, 0.012362, 0.002262, 0.0]
            expected_curve_poes_b1_b3 = [0.009950, 0.00076, 9.99995E-6, 0.0]
            expected_curve_poes_b1_b4 = [0.0009995, 4.5489E-5, 4.07365E-6, 0.0]

            job = self.run_hazard(cfg)

            # Test the poe values for the three curves:
            curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
                models.HazardCurveData.objects
                .filter(hazard_curve__output__oq_job=job.id)
                .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
            )

            # Sanity check, to make sure we have the curves ordered correctly:
            self.assertEqual(
                ['b1', 'b2'],
                curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
            self.assertEqual(
                ['b1', 'b3'],
                curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
            self.assertEqual(
                ['b1', 'b4'],
                curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)

            numpy.testing.assert_array_almost_equal(
                expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=3)
            numpy.testing.assert_array_almost_equal(
                expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=3)
            numpy.testing.assert_array_almost_equal(
                expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=3)

            # Test the exports as well:
            exported_file_b1_b2 = hazard_export.export(
                curve_b1_b2.hazard_curve.output.id, result_dir)
            check_equal(__file__, 'expected_b1_b2.xml', exported_file_b1_b2)

            exported_file_b1_b3 = hazard_export.export(
                curve_b1_b3.hazard_curve.output.id, result_dir)
            check_equal(__file__, 'expected_b1_b3.xml', exported_file_b1_b3)

            exported_file_b1_b4 = hazard_export.export(
                curve_b1_b4.hazard_curve.output.id, result_dir)
            check_equal(__file__, 'expected_b1_b4.xml', exported_file_b1_b4)
        except:
            raise
        else:
            shutil.rmtree(result_dir)
Beispiel #3
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini')
        expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525]
        expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110]
        expected_mean_poes = [0.794898, 0.760778, 0.331005]

        job = self.run_hazard(cfg)

        # Test the poe values for the two curves.
        actual_curve_b1, actual_curve_b2 = (
            models.HazardCurveData.objects
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__lt_realization__isnull=False)
            .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
        )

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(
            ['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(
            ['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path)

        numpy.testing.assert_array_almost_equal(
            expected_curve_poes_b1, actual_curve_b1.poes, decimal=3)

        numpy.testing.assert_array_almost_equal(
            expected_curve_poes_b2, actual_curve_b2.poes, decimal=3)

        # Test the mean curve:
        [mean_curve] = models.HazardCurveData.objects\
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean')
        numpy.testing.assert_array_almost_equal(
            expected_mean_poes, mean_curve.poes, decimal=3)

        # Test the exports as well:
        exported_file_b1 = hazard_export.export(
            actual_curve_b1.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1)

        exported_file_b2 = hazard_export.export(
            actual_curve_b2.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2)

        # mean:
        exported_file_mean = hazard_export.export(
            mean_curve.hazard_curve.output.id, result_dir)
        check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean)

        shutil.rmtree(result_dir)
Beispiel #4
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_8.__file__), 'job.ini')
        expected_curve_poes_b1_b2 = [0.095163, 0.012362, 0.002262, 0.0]
        expected_curve_poes_b1_b3 = [0.009950, 0.00076, 9.99995E-6, 0.0]
        expected_curve_poes_b1_b4 = [0.0009995, 4.5489E-5, 4.07365E-6, 0.0]

        job = self.run_hazard(cfg)

        # Test the poe values for the three curves:
        curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
            models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id).order_by(
                    'hazard_curve__lt_realization__lt_model__sm_lt_path'))

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(['b1', 'b2'],
                         curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(['b1', 'b3'],
                         curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(['b1', 'b4'],
                         curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)

        numpy.testing.assert_array_almost_equal(expected_curve_poes_b1_b2,
                                                curve_b1_b2.poes,
                                                decimal=3)
        numpy.testing.assert_array_almost_equal(expected_curve_poes_b1_b3,
                                                curve_b1_b3.poes,
                                                decimal=3)
        numpy.testing.assert_array_almost_equal(expected_curve_poes_b1_b4,
                                                curve_b1_b4.poes,
                                                decimal=3)

        # Test the exports as well:
        exported_file_b1_b2 = hazard_export.export(
            curve_b1_b2.hazard_curve.output.id, result_dir)
        check_equal(case_8.__file__, 'expected_b1_b2.xml', exported_file_b1_b2)

        exported_file_b1_b3 = hazard_export.export(
            curve_b1_b3.hazard_curve.output.id, result_dir)
        check_equal(case_8.__file__, 'expected_b1_b3.xml', exported_file_b1_b3)

        exported_file_b1_b4 = hazard_export.export(
            curve_b1_b4.hazard_curve.output.id, result_dir)
        check_equal(case_8.__file__, 'expected_b1_b4.xml', exported_file_b1_b4)

        shutil.rmtree(result_dir)
Beispiel #5
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_19.__file__), 'job.ini')
        job = self.run_hazard(cfg)

        curves = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id,
            hazard_curve__statistics='mean')
        [pga_curve] = curves.filter(hazard_curve__imt='PGA')

        exported_file = hazard_export.export(pga_curve.hazard_curve.output.id,
                                             result_dir, 'csv')
        # NB: the format of the exported file is 'lon lat poe1 ... poeN'
        # we discard lon and lat and extract the poes
        actual = [
            ' '.join(line.split(' ')[2:]).strip()
            for line in open(exported_file)
        ]
        fname = os.path.join(os.path.dirname(case_19.__file__), 'expected',
                             'hazard_curve-mean.csv')
        # NB: the format of the expected file is lon lat, poe1 ... poeN, ...
        # we extract the poes
        # TODO: unify the engine and oq-lite export formats
        expected = [line.split(',')[1] for line in open(fname)]
        self.assertEqual(actual, expected)

        shutil.rmtree(result_dir)
Beispiel #6
0
def check_export(output_id, target):
    """
    Call export by checking that the exported file is valid
    """
    out_file = core.export(output_id, target, 'xml')
    nrml.read(out_file)
    return out_file
Beispiel #7
0
def export_output(output_id, target_dir, export_type):
    """
    Simple UI wrapper around
    :func:`openquake.engine.export.core.export` which prints a summary
    of files exported, if any.
    """
    queryset = models.Output.objects.filter(pk=output_id)
    if not queryset.exists():
        print 'No output found for OUTPUT_ID %s' % output_id
        return

    if queryset.all()[0].oq_job.status != "complete":
        print(
            "Exporting output produced by a job which did not run "
            "successfully. Results might be uncomplete")

    the_file = core.export(output_id, target_dir, export_type)
    if the_file.endswith('.zip'):
        dname = os.path.dirname(the_file)
        fnames = zipfile.ZipFile(the_file).namelist()
        print('Files exported:')
        for fname in fnames:
            print(os.path.join(dname, fname))
    else:
        print('File exported: %s' % the_file)
Beispiel #8
0
    def export(self, *args, **kwargs):
        """
        If requested by the user, automatically export all result artifacts to
        the specified format. (NOTE: The only export format supported at the
        moment is NRML XML.

        :param exports:
            Keyword arg. List of export types.
        :returns:
            A list of the export filenames, including the absolute path to each
            file.
        """
        exported_files = []

        with logs.tracing('exports'):
            export_dir = self.job.get_param('export_dir')
            export_type = kwargs['exports']
            if export_type:
                outputs = self._get_outputs_for_export()
                for output in outputs:
                    with self.monitor('exporting %s to %s'
                                      % (output.output_type, export_type)):
                        fname = core.export(output.id, export_dir, export_type)
                        if fname:
                            logs.LOG.info('exported %s', fname)
                            exported_files.append(fname)

        return exported_files
Beispiel #9
0
    def test(self):
        result_dir = tempfile.mkdtemp()
        aaae = numpy.testing.assert_array_almost_equal

        try:
            cfg = os.path.join(os.path.dirname(__file__), 'job.ini')
            expected_curve_poes = [0.75421006, 0.08098179, 0.00686616]

            job = self.run_hazard(cfg)

            # Test the poe values of the single curve:
            [curve] = models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id)

            aaae(expected_curve_poes, curve.poes, decimal=2)

            # Test the exports as well:
            exported_file = hazard_export.export(
                curve.hazard_curve.output.id, result_dir)
            check_equal(__file__, 'expected_hazard_curves.xml',
                           exported_file)
        except:
            raise
        else:
            shutil.rmtree(result_dir)
Beispiel #10
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini')
        expected_gmf = os.path.join(os.path.dirname(case_2.__file__),
                                    'expected', '0-SadighEtAl1997.csv')

        expected_curve_poes = [0.00853479861, 0., 0., 0.]

        job = self.run_hazard(cfg)

        # Test the GMF exported values
        gmf_output = models.Output.objects.get(
            output_type='gmf', oq_job=job)

        fname = core.export(gmf_output.id, result_dir, 'csv')
        gotlines = sorted(open(fname).readlines())
        expected = sorted(open(expected_gmf).readlines())
        self.assertEqual(gotlines, expected)

        # Test the poe values of the single curve:
        [actual_curve] = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id,
            hazard_curve__imt__isnull=False)

        self.assert_equals_var_tolerance(
            expected_curve_poes, actual_curve.poes
        )
        shutil.rmtree(result_dir)
Beispiel #11
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini')
        expected_gmf = os.path.join(os.path.dirname(case_2.__file__),
                                    'expected', '0-SadighEtAl1997.csv')

        expected_curve_poes = [0.00853479861, 0., 0., 0.]

        job = self.run_hazard(cfg)

        # Test the GMF exported values
        gmf_output = models.Output.objects.get(output_type='gmf', oq_job=job)

        fname = core.export(gmf_output.id, result_dir, 'csv')
        gotlines = sorted(open(fname).readlines())
        expected = sorted(open(expected_gmf).readlines())
        self.assertEqual(gotlines, expected)

        # Test the poe values of the single curve:
        [actual_curve] = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id,
            hazard_curve__imt__isnull=False)

        self.assert_equals_var_tolerance(expected_curve_poes,
                                         actual_curve.poes)
        shutil.rmtree(result_dir)
Beispiel #12
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        try:
            cfg = os.path.join(os.path.dirname(__file__), 'job.ini')
            expected_curve_poes = [0.632120, 0.54811, 0.15241]

            job = self.run_hazard(cfg)

            # Test the poe values of the single curve:
            [actual_curve] = models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id)

            numpy.testing.assert_array_almost_equal(
                expected_curve_poes, actual_curve.poes, decimal=3)

            # Test the export as well:
            exported_file = hazard_export.export(
                actual_curve.hazard_curve.output.id, result_dir)
            check_equal(__file__, 'expected_hazard_curves.xml',
                           exported_file)
        except:
            raise
        else:
            shutil.rmtree(result_dir)
Beispiel #13
0
def check_export(output_id, target):
    """
    Call export by checking that the exported file is valid
    """
    out_file = core.export(output_id, target, 'xml')
    nrml.read(out_file)
    return out_file
Beispiel #14
0
    def export(self, *args, **kwargs):
        """
        If requested by the user, automatically export all result artifacts to
        the specified format. (NOTE: The only export format supported at the
        moment is NRML XML.

        :param exports:
            Keyword arg. List of export types.
        :returns:
            A list of the export filenames, including the absolute path to each
            file.
        """
        exported_files = []

        with logs.tracing('exports'):
            export_dir = self.job.get_param('export_dir')
            export_type = kwargs['exports']
            if export_type:
                outputs = self._get_outputs_for_export()
                for output in outputs:
                    with self.monitor('exporting %s to %s' %
                                      (output.output_type, export_type)):
                        fname = core.export(output.id, export_dir, export_type)
                        if fname:
                            logs.LOG.info('exported %s', fname)
                            exported_files.append(fname)

        return exported_files
Beispiel #15
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_19.__file__), 'job.ini')
        job = self.run_hazard(cfg)

        curves = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id,
            hazard_curve__statistics='mean'
        )
        [pga_curve] = curves.filter(hazard_curve__imt='PGA')

        exported_file = hazard_export.export(
            pga_curve.hazard_curve.output.id, result_dir, 'csv')
        # NB: the format of the exported file is 'lon lat poe1 ... poeN'
        # we discard lon and lat and extract the poes
        actual = [' '.join(line.split(' ')[2:]).strip()
                  for line in open(exported_file)]
        fname = os.path.join(os.path.dirname(case_19.__file__), 'expected',
                             'hazard_curve-mean.csv')
        # NB: the format of the expected file is lon lat, poe1 ... poeN, ...
        # we extract the poes
        # TODO: unify the engine and oq-lite export formats
        expected = [line.split(',')[1] for line in open(fname)]
        self.assertEqual(actual, expected)

        shutil.rmtree(result_dir)
Beispiel #16
0
def get_result(request, result_id):
    """
    Download a specific result, by ``result_id``.

    The common abstracted functionality for getting hazard or risk results.

    :param request:
        `django.http.HttpRequest` object. Can contain a `export_type` GET
        param (the default is 'xml' if no param is specified).
    :param result_id:
        The id of the requested artifact.
    :returns:
        If the requested ``result_id`` is not available in the format
        designated by the `export_type`.

        Otherwise, return a `django.http.HttpResponse` containing the content
        of the requested artifact.

    Parameters for the GET request can include an `export_type`, such as 'xml',
    'geojson', 'csv', etc.
    """
    # If the result for the requested ID doesn't exist, OR
    # the job which it is related too is not complete,
    # throw back a 404.
    try:
        output = oqe_models.Output.objects.get(id=result_id)
        job = output.oq_job
        if not job.status == 'complete':
            return HttpResponseNotFound()
    except ObjectDoesNotExist:
        return HttpResponseNotFound()

    etype = request.GET.get('export_type')
    export_type = etype or DEFAULT_EXPORT_TYPE

    tmpdir = tempfile.mkdtemp()
    try:
        exported = core.export(result_id, tmpdir, export_type=export_type)
    except DataStoreExportError as exc:
        # TODO: there should be a better error page
        return HttpResponse(content='%s: %s' % (exc.__class__.__name__, exc),
                            content_type='text/plain',
                            status=500)
    if exported is None:
        # Throw back a 404 if the exact export parameters are not supported
        return HttpResponseNotFound('export_type=%s is not supported for %s' %
                                    (export_type, output.ds_key))

    content_type = EXPORT_CONTENT_TYPE_MAP.get(export_type,
                                               DEFAULT_CONTENT_TYPE)
    try:
        fname = 'output-%s-%s' % (result_id, os.path.basename(exported))
        data = open(exported).read()
        response = HttpResponse(data, content_type=content_type)
        response['Content-Length'] = len(data)
        response['Content-Disposition'] = 'attachment; filename=%s' % fname
        return response
    finally:
        shutil.rmtree(tmpdir)
Beispiel #17
0
def get_result(request, result_id):
    """
    Download a specific result, by ``result_id``.

    The common abstracted functionality for getting hazard or risk results.

    :param request:
        `django.http.HttpRequest` object. Can contain a `export_type` GET
        param (the default is 'xml' if no param is specified).
    :param result_id:
        The id of the requested artifact.
    :returns:
        If the requested ``result_id`` is not available in the format
        designated by the `export_type`.

        Otherwise, return a `django.http.HttpResponse` containing the content
        of the requested artifact.

    Parameters for the GET request can include an `export_type`, such as 'xml',
    'geojson', 'csv', etc.
    """
    # If the result for the requested ID doesn't exist, OR
    # the job which it is related too is not complete,
    # throw back a 404.
    try:
        output = oqe_models.Output.objects.get(id=result_id)
        job = output.oq_job
        if not job.status == 'complete':
            return HttpResponseNotFound()
    except ObjectDoesNotExist:
        return HttpResponseNotFound()

    etype = request.GET.get('export_type')
    export_type = etype or DEFAULT_EXPORT_TYPE

    tmpdir = tempfile.mkdtemp()
    try:
        exported = core.export(result_id, tmpdir, export_type=export_type)
    except DataStoreExportError as exc:
        # TODO: there should be a better error page
        return HttpResponse(content='%s: %s' % (exc.__class__.__name__, exc),
                            content_type='text/plain', status=500)
    if exported is None:
        # Throw back a 404 if the exact export parameters are not supported
        return HttpResponseNotFound(
            'export_type=%s is not supported for %s' %
            (export_type, output.ds_key))

    content_type = EXPORT_CONTENT_TYPE_MAP.get(
        export_type, DEFAULT_CONTENT_TYPE)
    try:
        fname = 'output-%s-%s' % (result_id, os.path.basename(exported))
        data = open(exported).read()
        response = HttpResponse(data, content_type=content_type)
        response['Content-Length'] = len(data)
        response['Content-Disposition'] = 'attachment; filename=%s' % fname
        return response
    finally:
        shutil.rmtree(tmpdir)
Beispiel #18
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        try:
            cfg = os.path.join(os.path.dirname(__file__), 'job.ini')
            expected_curve_pga = [0.4570, 0.0587, 0.0069]
            expected_curve_sa = [
                0.608675003748, 0.330831513139, 0.201472214825
            ]

            job = self.run_hazard(cfg)

            # Test the poe values of the single curve:
            curves = models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id
            )

            [pga_curve] = curves.filter(hazard_curve__imt='PGA')
            numpy.testing.assert_array_almost_equal(
                expected_curve_pga, pga_curve.poes, decimal=4
            )

            [sa_curve] = curves.filter(
                hazard_curve__imt='SA', hazard_curve__sa_period=0.1
            )
            numpy.testing.assert_array_almost_equal(
                expected_curve_sa, sa_curve.poes, decimal=4
            )

            # Test the exports as well:
            exported_file = hazard_export.export(
                pga_curve.hazard_curve.output.id, result_dir)
            self.assert_xml_equal(
                StringIO.StringIO(self.EXPECTED_PGA_XML), exported_file)

            exported_file = hazard_export.export(
                sa_curve.hazard_curve.output.id, result_dir)
            self.assert_xml_equal(
                StringIO.StringIO(self.EXPECTED_SA_XML), exported_file)
        finally:
            shutil.rmtree(result_dir)
Beispiel #19
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_10.__file__), 'job.ini')
        expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0]
        expected_curve_poes_b1_b3 = [0.043, 0.0012, 7.394E-5, 0.0]

        job = self.run_hazard(cfg)

        # Test the poe values for the two curves:
        curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects\
            .filter(hazard_curve__output__oq_job=job.id)\
            .order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(
            ['b1', 'b2'],
            curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(
            ['b1', 'b3'],
            curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)

        numpy.testing.assert_array_almost_equal(
            expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
        numpy.testing.assert_array_almost_equal(
            expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)

        # Test the exports as well:
        exported_file_b1_b2 = hazard_export.export(
            curve_b1_b2.hazard_curve.output.id, result_dir)
        check_equal(case_10.__file__, 'expected_b1_b2.xml',
                    exported_file_b1_b2)

        exported_file_b1_b3 = hazard_export.export(
            curve_b1_b3.hazard_curve.output.id, result_dir)
        check_equal(case_10.__file__, 'expected_b1_b3.xml',
                    exported_file_b1_b3)

        shutil.rmtree(result_dir)
Beispiel #20
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_1.__file__), 'job.ini')
        expected_curve_pga = [0.4570, 0.0587, 0.0069]
        expected_curve_sa = [0.608675003748, 0.330831513139, 0.201472214825]

        job = self.run_hazard(cfg)

        # Test the poe values of the single curve:
        curves = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id)

        [pga_curve] = curves.filter(hazard_curve__imt='PGA')
        numpy.testing.assert_array_almost_equal(expected_curve_pga,
                                                pga_curve.poes,
                                                decimal=4)

        [sa_curve] = curves.filter(hazard_curve__imt='SA',
                                   hazard_curve__sa_period=0.1)
        numpy.testing.assert_array_almost_equal(expected_curve_sa,
                                                sa_curve.poes,
                                                decimal=4)

        # Test the exports as well:
        exported_file = hazard_export.export(pga_curve.hazard_curve.output.id,
                                             result_dir)
        self.assert_xml_equal(StringIO.StringIO(self.EXPECTED_PGA_XML),
                              exported_file)

        exported_file = hazard_export.export(sa_curve.hazard_curve.output.id,
                                             result_dir)
        self.assert_xml_equal(StringIO.StringIO(self.EXPECTED_SA_XML),
                              exported_file)

        shutil.rmtree(result_dir)
Beispiel #21
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_17.__file__), 'job.ini')
        expected_curves_pga = [[0.0, 0.0, 0.0], [1.0, 1.0,
                                                 0.0], [1.0, 1.0, 0.0],
                               [1.0, 1.0, 0.0], [1.0, 1.0, 0.0]]

        job = self.run_hazard(cfg)
        j = job.id
        tags = models.SESRupture.objects.filter(
            rupture__ses_collection__trt_model__lt_model__hazard_calculation=j
        ).values_list('tag', flat=True)

        t1_tags = [t for t in tags if t.startswith('col=00')]
        t2_tags = [t for t in tags if t.startswith('col=01')]
        t3_tags = [t for t in tags if t.startswith('col=02')]
        t4_tags = [t for t in tags if t.startswith('col=03')]
        t5_tags = [t for t in tags if t.startswith('col=04')]

        self.assertEqual(len(t1_tags), 0)
        self.assertEqual(len(t2_tags), 2816)
        self.assertEqual(len(t3_tags), 2775)
        self.assertEqual(len(t4_tags), 2736)
        self.assertEqual(len(t5_tags), 2649)

        # check the total number of exported GMFs among the 4 realizations
        countlines = 0
        for gmf_output in models.Output.objects.filter(output_type='gmf',
                                                       oq_job=job):
            fname = core.export(gmf_output.id, result_dir, 'csv')
            countlines += len(open(fname).readlines())
        self.assertEqual(countlines, len(tags))

        curves = [
            c.poes for c in models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id,
                hazard_curve__imt='PGA').order_by('hazard_curve')
        ]
        numpy.testing.assert_array_almost_equal(expected_curves_pga,
                                                curves,
                                                decimal=7)

        shutil.rmtree(result_dir)
Beispiel #22
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_17.__file__), 'job.ini')
        expected_curves_pga = [[0.0, 0.0, 0.0],
                               [1.0, 1.0, 0.0],
                               [1.0, 1.0, 0.0],
                               [1.0, 1.0, 0.0],
                               [1.0, 1.0, 0.0]]

        job = self.run_hazard(cfg)
        j = job.id
        tags = models.SESRupture.objects.filter(
            rupture__ses_collection__trt_model__lt_model__hazard_calculation=j
        ).values_list('tag', flat=True)

        t1_tags = [t for t in tags if t.startswith('col=00')]
        t2_tags = [t for t in tags if t.startswith('col=01')]
        t3_tags = [t for t in tags if t.startswith('col=02')]
        t4_tags = [t for t in tags if t.startswith('col=03')]
        t5_tags = [t for t in tags if t.startswith('col=04')]

        self.assertEqual(len(t1_tags), 0)
        self.assertEqual(len(t2_tags), 2816)
        self.assertEqual(len(t3_tags), 2775)
        self.assertEqual(len(t4_tags), 2736)
        self.assertEqual(len(t5_tags), 2649)

        # check the total number of exported GMFs among the 4 realizations
        countlines = 0
        for gmf_output in models.Output.objects.filter(
                output_type='gmf', oq_job=job):
            fname = core.export(gmf_output.id, result_dir, 'csv')
            countlines += len(open(fname).readlines())
        self.assertEqual(countlines, len(tags))

        curves = [c.poes for c in models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id, hazard_curve__imt='PGA'
        ).order_by('hazard_curve')]
        numpy.testing.assert_array_almost_equal(
            expected_curves_pga, curves, decimal=7)

        shutil.rmtree(result_dir)
Beispiel #23
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_6.__file__), 'job.ini')
        expected_curve_poes = [0.86466, 0.82460, 0.36525]

        job = self.run_hazard(cfg)

        # Test the poe values of the single curve:
        [actual_curve] = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id)

        numpy.testing.assert_array_almost_equal(
            expected_curve_poes, actual_curve.poes, decimal=2)

        # Test the export as well:
        exported_file = hazard_export.export(
            actual_curve.hazard_curve.output.id, result_dir)
        check_equal(case_6.__file__, 'expected_hazard_curves.xml',
                    exported_file)

        shutil.rmtree(result_dir)
Beispiel #24
0
def export(output_id, target_dir, export_type):
    """
    Simple UI wrapper around
    :func:`openquake.engine.export.core.export` which prints a summary
    of files exported, if any.
    """
    queryset = models.Output.objects.filter(pk=output_id)
    if not queryset.exists():
        print 'No output found for OUTPUT_ID %s' % output_id
        return

    if queryset.all()[0].oq_job.status != "complete":
        print ("Exporting output produced by a job which did not run "
               "successfully. Results might be uncomplete")

    try:
        the_file = core.export(output_id, target_dir, export_type)
        print 'File Exported:'
        print the_file
    except NotImplementedError as err:
        print err
        print 'This feature is probably not implemented yet'
Beispiel #25
0
    def test(self):
        result_dir = tempfile.mkdtemp()
        aaae = numpy.testing.assert_array_almost_equal

        cfg = os.path.join(os.path.dirname(case_12.__file__), 'job.ini')
        expected_curve_poes = [0.75421006, 0.08098179, 0.00686616]

        job = self.run_hazard(cfg)

        # Test the poe values of the single curve:
        [curve] = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id)

        aaae(expected_curve_poes, curve.poes, decimal=2)

        # Test the exports as well:
        exported_file = hazard_export.export(curve.hazard_curve.output.id,
                                             result_dir)
        check_equal(case_12.__file__, 'expected_hazard_curves.xml',
                    exported_file)

        shutil.rmtree(result_dir)
Beispiel #26
0
    def test(self):
        result_dir = tempfile.mkdtemp()

        cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini')
        expected_curve_poes = [0.0095, 0.00076, 0.000097, 0.0]

        job = self.run_hazard(cfg)

        # Test the poe values of the single curve:
        [actual_curve] = models.HazardCurveData.objects.filter(
            hazard_curve__output__oq_job=job.id)

        numpy.testing.assert_array_almost_equal(expected_curve_poes,
                                                actual_curve.poes,
                                                decimal=3)

        # Test the export as well:
        exported_file = hazard_export.export(
            actual_curve.hazard_curve.output.id, result_dir)
        check_equal(case_2.__file__, 'expected_hazard_curves.xml',
                    exported_file)
        shutil.rmtree(result_dir)
Beispiel #27
0
def export(output_id, target_dir, export_type):
    """
    Simple UI wrapper around
    :func:`openquake.engine.export.core.export` which prints a summary
    of files exported, if any.
    """
    queryset = models.Output.objects.filter(pk=output_id)
    if not queryset.exists():
        print 'No output found for OUTPUT_ID %s' % output_id
        return

    if queryset.all()[0].oq_job.status != "complete":
        print(
            "Exporting output produced by a job which did not run "
            "successfully. Results might be uncomplete")

    try:
        the_file = core.export(output_id, target_dir, export_type)
        print 'File Exported:'
        print the_file
    except NotImplementedError, err:
        print err.message
        print 'This feature is probably not implemented yet'
Beispiel #28
0
def export_output(output_id, target_dir, export_type):
    """
    Simple UI wrapper around
    :func:`openquake.engine.export.core.export` which prints a summary
    of files exported, if any.
    """
    queryset = models.Output.objects.filter(pk=output_id)
    if not queryset.exists():
        print 'No output found for OUTPUT_ID %s' % output_id
        return

    if queryset.all()[0].oq_job.status != "complete":
        print ("Exporting output produced by a job which did not run "
               "successfully. Results might be uncomplete")

    the_file = core.export(output_id, target_dir, export_type)
    if the_file.endswith('.zip'):
        dname = os.path.dirname(the_file)
        fnames = zipfile.ZipFile(the_file).namelist()
        print('Files exported:')
        for fname in fnames:
            print(os.path.join(dname, fname))
    else:
        print('File exported: %s' % the_file)
Beispiel #29
0
    def test_event_based_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_event_based.ini')
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_event_based.ini')

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(risk_cfg,
                                       hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            agg_loss_curve_outputs = risk_outputs.filter(
                output_type='agg_loss_curve')
            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # (1 mean + 2 quantiles) * 2 (as there also insured curves)
            self.assertEqual(6, loss_curve_outputs.count())

            # 16 rlzs + 16 (due to insured curves)
            event_loss_curve_outputs = risk_outputs.filter(
                output_type='event_loss_curve')
            self.assertEqual(32, event_loss_curve_outputs.count())
            self.assertEqual(16, agg_loss_curve_outputs.count())

            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job)
            # sanity check (16 aggregate loss curve + 38 loss curves)
            self.assertEqual(54, loss_curves.count())
            # mean
            self.assertEqual(2, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(4,
                             loss_curves.filter(statistics='quantile').count())

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 16 event loss table (1 per rlz)
            event_loss_tables = risk_outputs.filter(output_type="event_loss")
            self.assertEqual(16, event_loss_tables.count())

            # 32 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(32, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in loss_fraction_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in event_loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            agg_loss_curve_files = []
            for o in agg_loss_curve_outputs:
                agg_loss_curve_files.append(
                    core.export(o.id, target_dir, 'xml'))

            event_loss_table_files = []
            for o in event_loss_tables:
                event_loss_table_files.append(
                    core.export(o.id, target_dir, 'csv'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(70, len(loss_curve_files))
            self.assertEqual(16, len(agg_loss_curve_files))
            self.assertEqual(16, len(event_loss_table_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #30
0
    def test_classical_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_classical.ini')
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_classical.ini')

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(risk_cfg,
                                       hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # 16 logic tree realizations + 1 mean + 2 quantiles = 19
            # + 19 insured loss curves
            self.assertEqual(38, loss_curve_outputs.count())
            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job, insured=False)
            # sanity check
            self.assertEqual(19, loss_curves.count())

            insured_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job, insured=True)
            # sanity check
            self.assertEqual(19, insured_curves.count())

            # mean
            self.assertEqual(1, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(2,
                             loss_curves.filter(statistics='quantile').count())

            # mean
            self.assertEqual(1,
                             insured_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2,
                insured_curves.filter(statistics='quantile').count())

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 19 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(19, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(38, len(loss_curve_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #31
0
    def test(self):
        current = case_11.__file__
        result_dir = tempfile.mkdtemp()
        aaae = numpy.testing.assert_array_almost_equal

        cfg = os.path.join(os.path.dirname(current), 'job.ini')
        expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0]
        expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0]
        expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0]

        expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0]

        expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0]
        expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0]

        job = self.run_hazard(cfg)

        # Test the poe values for the two curves:
        curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
            models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id,
                hazard_curve__lt_realization__isnull=False).order_by(
                    'hazard_curve__lt_realization__lt_model__sm_lt_path'))

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(['b1', 'b2'],
                         curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(['b1', 'b3'],
                         curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(['b1', 'b4'],
                         curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)

        aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
        aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
        aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4)

        # Test the mean curve:
        [mean_curve] = models.HazardCurveData.objects\
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean')
        aaae(expected_mean_poes, mean_curve.poes, decimal=4)

        # Test the quantile curves:
        quantile_0_1_curve, quantile_0_9_curve = \
            models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id,
                hazard_curve__statistics='quantile'
            ).order_by('hazard_curve__quantile')
        aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4)
        aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4)

        # Test the exports as well:
        exported_file_b1_b2 = hazard_export.export(
            curve_b1_b2.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2)

        exported_file_b1_b3 = hazard_export.export(
            curve_b1_b3.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3)

        exported_file_b1_b4 = hazard_export.export(
            curve_b1_b4.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4)

        exported_file_mean = hazard_export.export(
            mean_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_mean.xml', exported_file_mean)

        q01_file = hazard_export.export(
            quantile_0_1_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_quantile_0_1.xml', q01_file)

        q09_file = hazard_export.export(
            quantile_0_9_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_quantile_0_9.xml', q09_file)

        shutil.rmtree(result_dir)
Beispiel #32
0
    def test(self):
        current = case_11.__file__
        result_dir = tempfile.mkdtemp()
        aaae = numpy.testing.assert_array_almost_equal

        cfg = os.path.join(os.path.dirname(current), 'job.ini')
        expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0]
        expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0]
        expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0]

        expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0]

        expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0]
        expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0]

        job = self.run_hazard(cfg)

        # Test the poe values for the two curves:
        curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
            models.HazardCurveData.objects
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__lt_realization__isnull=False)
            .order_by(
                'hazard_curve__lt_realization__lt_model__sm_lt_path'))

        # Sanity check, to make sure we have the curves ordered correctly:
        self.assertEqual(
            ['b1', 'b2'],
            curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(
            ['b1', 'b3'],
            curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
        self.assertEqual(
            ['b1', 'b4'],
            curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)

        aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
        aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
        aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4)

        # Test the mean curve:
        [mean_curve] = models.HazardCurveData.objects\
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean')
        aaae(expected_mean_poes, mean_curve.poes, decimal=4)

        # Test the quantile curves:
        quantile_0_1_curve, quantile_0_9_curve = \
            models.HazardCurveData.objects.filter(
                hazard_curve__output__oq_job=job.id,
                hazard_curve__statistics='quantile'
            ).order_by('hazard_curve__quantile')
        aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4)
        aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4)

        # Test the exports as well:
        exported_file_b1_b2 = hazard_export.export(
            curve_b1_b2.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2)

        exported_file_b1_b3 = hazard_export.export(
            curve_b1_b3.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3)

        exported_file_b1_b4 = hazard_export.export(
            curve_b1_b4.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4)

        exported_file_mean = hazard_export.export(
            mean_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_mean.xml', exported_file_mean)

        q01_file = hazard_export.export(
            quantile_0_1_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_quantile_0_1.xml', q01_file)

        q09_file = hazard_export.export(
            quantile_0_9_curve.hazard_curve.output.id, result_dir)
        check_equal(current, 'expected_quantile_0_9.xml', q09_file)

        shutil.rmtree(result_dir)
Beispiel #33
0
    def test_classical_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_classical.ini'
            )
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_classical.ini'
            )

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(
                risk_cfg, hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # 16 logic tree realizations + 1 mean + 2 quantiles = 19
            # + 19 insured loss curves
            self.assertEqual(38, loss_curve_outputs.count())
            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job,
                insured=False
            )
            # sanity check
            self.assertEqual(19, loss_curves.count())

            insured_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job,
                insured=True
            )
            # sanity check
            self.assertEqual(19, insured_curves.count())

            # mean
            self.assertEqual(1, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2, loss_curves.filter(statistics='quantile').count()
            )

            # mean
            self.assertEqual(
                1, insured_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                2, insured_curves.filter(statistics='quantile').count()
            )

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 19 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(19, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(38, len(loss_curve_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)
Beispiel #34
0
    def test_event_based_risk_export(self):
        target_dir = tempfile.mkdtemp()
        try:
            haz_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_haz_event_based.ini'
            )
            risk_cfg = helpers.get_data_path(
                'end-to-end-hazard-risk/job_risk_event_based.ini'
            )

            haz_job = helpers.run_job(haz_cfg).job
            # Run the risk on all outputs produced by the haz calc:
            risk_job = helpers.run_job(
                risk_cfg, hazard_calculation_id=haz_job.id).job

            risk_outputs = models.Output.objects.filter(oq_job=risk_job)

            agg_loss_curve_outputs = risk_outputs.filter(
                output_type='agg_loss_curve')
            loss_curve_outputs = risk_outputs.filter(output_type='loss_curve')
            loss_map_outputs = risk_outputs.filter(output_type='loss_map')

            # (1 mean + 2 quantiles) * 2 (as there also insured curves)
            self.assertEqual(6, loss_curve_outputs.count())

            # 16 rlzs + 16 (due to insured curves)
            event_loss_curve_outputs = risk_outputs.filter(
                output_type='event_loss_curve')
            self.assertEqual(32, event_loss_curve_outputs.count())
            self.assertEqual(16, agg_loss_curve_outputs.count())

            # make sure the mean and quantile curve sets got created correctly
            loss_curves = models.LossCurve.objects.filter(
                output__oq_job=risk_job
            )
            # sanity check (16 aggregate loss curve + 38 loss curves)
            self.assertEqual(54, loss_curves.count())
            # mean
            self.assertEqual(2, loss_curves.filter(statistics='mean').count())
            # quantiles
            self.assertEqual(
                4, loss_curves.filter(statistics='quantile').count()
            )

            # 16 logic tree realizations = 16 loss map + 1 mean loss
            # map + 2 quantile loss map
            self.assertEqual(19, loss_map_outputs.count())

            # 16 event loss table (1 per rlz)
            event_loss_tables = risk_outputs.filter(output_type="event_loss")
            self.assertEqual(16, event_loss_tables.count())

            # 32 loss fractions
            loss_fraction_outputs = risk_outputs.filter(
                output_type="loss_fraction")
            self.assertEqual(32, loss_fraction_outputs.count())

            # Now try to export everything, just to do a "smoketest" of the
            # exporter code:
            loss_curve_files = []
            for o in loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in loss_fraction_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))
            for o in event_loss_curve_outputs:
                loss_curve_files.append(core.export(o.id, target_dir, 'xml'))

            agg_loss_curve_files = []
            for o in agg_loss_curve_outputs:
                agg_loss_curve_files.append(
                    core.export(o.id, target_dir, 'xml')
                )

            event_loss_table_files = []
            for o in event_loss_tables:
                event_loss_table_files.append(
                    core.export(o.id, target_dir, 'csv')
                )

            loss_map_files = []
            for o in loss_map_outputs:
                loss_map_files.append(core.export(o.id, target_dir, 'xml'))

            self.assertEqual(70, len(loss_curve_files))
            self.assertEqual(16, len(agg_loss_curve_files))
            self.assertEqual(16, len(event_loss_table_files))
            self.assertEqual(19, len(loss_map_files))

            for f in loss_curve_files:
                self._test_exported_file(f)
            for f in loss_map_files:
                self._test_exported_file(f)
        finally:
            shutil.rmtree(target_dir)