示例#1
0
    def serialize(self, data):
        """
        :param data:

            A sequence of data where each datum has the following attributes:

            * matrix: N-dimensional numpy array containing the disaggregation
              histogram.
            * dim_labels: A list of strings which label the dimensions of a
              given histogram. For example, for a Magnitude-Distance-Epsilon
              histogram, we would expect `dim_labels` to be
              ``['Mag', 'Dist', 'Eps']``.
            * poe: The disaggregation Probability of Exceedance level for which
              these results were produced.
            * iml: Intensity measure level, interpolated from the source hazard
              curve at the given ``poe``.
        """

        with nrml.NRMLFile(self.dest, 'w') as fh:
            root = etree.Element('nrml',
                                 nsmap=nrml.SERIALIZE_NS_MAP)

            diss_matrices = etree.SubElement(root, 'disaggMatrices')

            _set_metadata(diss_matrices, self.metadata, _ATTR_MAP)

            transform = lambda val: ', '.join(map(scientificformat, val))
            _set_metadata(diss_matrices, self.metadata, self.BIN_EDGE_ATTR_MAP,
                          transform=transform)

            for result in data:
                diss_matrix = etree.SubElement(diss_matrices, 'disaggMatrix')

                # Check that we have bin edges defined for each dimension label
                # (mag, dist, lon, lat, eps, TRT)
                for label in result.dim_labels:
                    bin_edge_attr = self.DIM_LABEL_TO_BIN_EDGE_MAP.get(label)
                    assert self.metadata.get(bin_edge_attr) is not None, (
                        "Writer is missing '%s' metadata" % bin_edge_attr
                    )

                result_type = ','.join(result.dim_labels)
                diss_matrix.set('type', result_type)

                dims = ','.join(str(x) for x in result.matrix.shape)
                diss_matrix.set('dims', dims)

                diss_matrix.set('poE', scientificformat(result.poe))
                diss_matrix.set('iml', scientificformat(result.iml))

                for idxs, value in numpy.ndenumerate(result.matrix):
                    prob = etree.SubElement(diss_matrix, 'prob')

                    index = ','.join([str(x) for x in idxs])
                    prob.set('index', index)
                    prob.set('value', scientificformat(value))

            fh.write(etree.tostring(
                root, pretty_print=True, xml_declaration=True,
                encoding='UTF-8'))
示例#2
0
    def serialize(self, data):
        """
        :param data:

            A sequence of data where each datum has the following attributes:

            * matrix: N-dimensional numpy array containing the disaggregation
              histogram.
            * dim_labels: A list of strings which label the dimensions of a
              given histogram. For example, for a Magnitude-Distance-Epsilon
              histogram, we would expect `dim_labels` to be
              ``['Mag', 'Dist', 'Eps']``.
            * poe: The disaggregation Probability of Exceedance level for which
              these results were produced.
            * iml: Intensity measure level, interpolated from the source hazard
              curve at the given ``poe``.
        """

        with nrml.NRMLFile(self.dest, 'w') as fh:
            root = et.Element('nrml')

            diss_matrices = et.SubElement(root, 'disaggMatrices')

            _set_metadata(diss_matrices, self.metadata, _ATTR_MAP)

            transform = lambda val: ', '.join(map(scientificformat, val))
            _set_metadata(diss_matrices,
                          self.metadata,
                          self.BIN_EDGE_ATTR_MAP,
                          transform=transform)

            for result in data:
                diss_matrix = et.SubElement(diss_matrices, 'disaggMatrix')

                # Check that we have bin edges defined for each dimension label
                # (mag, dist, lon, lat, eps, TRT)
                for label in result.dim_labels:
                    bin_edge_attr = self.DIM_LABEL_TO_BIN_EDGE_MAP.get(label)
                    assert self.metadata.get(bin_edge_attr) is not None, (
                        "Writer is missing '%s' metadata" % bin_edge_attr)

                result_type = ','.join(result.dim_labels)
                diss_matrix.set('type', result_type)

                dims = ','.join(str(x) for x in result.matrix.shape)
                diss_matrix.set('dims', dims)

                diss_matrix.set('poE', scientificformat(result.poe))
                diss_matrix.set('iml', scientificformat(result.iml))

                for idxs, value in numpy.ndenumerate(result.matrix):
                    prob = et.SubElement(diss_matrix, 'prob')

                    index = ','.join([str(x) for x in idxs])
                    prob.set('index', index)
                    prob.set('value', scientificformat(value))

            nrml.write(list(root), fh)
示例#3
0
def export_hazard_curves_csv(key,
                             dest,
                             sitecol,
                             curves_by_imt,
                             imtls,
                             investigation_time=None):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    :param dict imtls: intensity measure types and levels
    :param investigation_time: investigation time
    """
    nsites = len(sitecol)
    # build a matrix of strings with size nsites * (num_imts + 1)
    # the + 1 is needed since the 0-th column contains lon lat
    rows = numpy.empty((nsites, len(imtls) + 1), dtype=object)
    for sid, lon, lat in zip(range(nsites), sitecol.lons, sitecol.lats):
        rows[sid, 0] = '%.5f %.5f' % (lon, lat)
    for i, imt in enumerate(curves_by_imt.dtype.names, 1):
        for sid, curve in zip(range(nsites), curves_by_imt[imt]):
            rows[sid, i] = scientificformat(curve, fmt='%11.7E')
    write_csv(dest, rows, header=('lon lat', ) + curves_by_imt.dtype.names)
    return {dest: dest}
示例#4
0
def export_hazard_curves_csv(key,
                             export_dir,
                             fname,
                             sitecol,
                             curves_by_imt,
                             imtls,
                             investigation_time=None):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    """
    dest = os.path.join(export_dir, fname)
    nsites = len(sitecol)
    # build a matrix of strings with size nsites * (num_imts + 1)
    # the + 1 is needed since the 0-th column contains lon lat
    rows = numpy.empty((nsites, len(imtls) + 1), dtype=object)
    for sid, lon, lat in zip(range(nsites), sitecol.lons, sitecol.lats):
        rows[sid, 0] = '%s %s' % (lon, lat)
    for i, imt in enumerate(sorted(curves_by_imt.dtype.fields), 1):
        for sid, curve in zip(range(nsites), curves_by_imt[imt]):
            rows[sid, i] = scientificformat(curve, fmt='%11.7E')
    write_csv(dest, rows)
    return {fname: dest}
示例#5
0
def get_actual_gmfs(job):
    """
    Returns the GMFs in the database as a list of pairs [(rlz_path, values)].
    """
    cursor = models.getcursor('job_init')
    cursor.execute(GET_GMF_OUTPUTS % job.id)
    actual_gmfs = [('_'.join(k), scientificformat(sorted(v), '%8.4E'))
                   for k, v in cursor.fetchall()]
    return actual_gmfs
示例#6
0
def get_actual_gmfs(job):
    """
    Returns the GMFs in the database as a list of pairs [(rlz_path, values)].
    """
    cursor = models.getcursor('job_init')
    cursor.execute(GET_GMF_OUTPUTS % job.id)
    actual_gmfs = [('_'.join(k), scientificformat(sorted(v), '%8.4E'))
                   for k, v in cursor.fetchall()]
    return actual_gmfs
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')

        # check loss ratios
        lrs = self.calc.datastore['vulnerability/VF/structural']
        got = scientificformat(lrs.mean_loss_ratios, '%.2f')
        self.assertEqual(got, '0.05 0.10 0.20 0.40 0.80')

        # check loss curves
        [fname] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves.csv', fname)

        # check loss maps
        [fname] = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps.csv', fname)
示例#8
0
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')

        # check loss ratios
        lrs = self.calc.datastore['risk_model/VF/structural-vulnerability']
        got = scientificformat(lrs.mean_loss_ratios, '%.2f')
        self.assertEqual(got, '0.05 0.10 0.20 0.40 0.80')

        # check loss curves
        [fname] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves.csv', fname)

        # check loss maps
        [fname] = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps.csv', fname)
示例#9
0
    def serialize(self, data):
        """
        Write the hazard curves to the given as GeoJSON. The GeoJSON format
        is customized to contain various bits of metadata.

        See :meth:`HazardCurveXMLWriter.serialize` for expected input.
        """
        oqmetadata = {}
        for key, value in self.metadata.items():
            if key == 'imls':
                oqmetadata['IMLs'] = value
            if value is not None:
                if key == 'imls':
                    oqmetadata['IMLs'] = value
                else:
                    oqmetadata[_ATTR_MAP.get(key)] = scientificformat(value)

        features = []
        feature_coll = {
            'type': 'FeatureCollection',
            'features': features,
            'oqtype': 'HazardCurve',
            'oqnrmlversion': '0.4',
            'oqmetadata': oqmetadata,
        }
        for hc in data:
            poes = list(hc.poes)
            lon = hc.location.x
            lat = hc.location.y

            feature = {
                'type': 'Feature',
                'geometry': {
                    'type': 'Point',
                    'coordinates': [float(lon), float(lat)],
                },
                'properties': {
                    'poEs': list(poes)
                },
            }
            features.append(feature)

        with nrml.NRMLFile(self.dest, 'w') as fh:
            json.dump(feature_coll,
                      fh,
                      sort_keys=True,
                      indent=4,
                      separators=(',', ': '))
示例#10
0
    def test(self):
        job = self.run_hazard(
            os.path.join(os.path.dirname(case_7.__file__), 'job.ini'))

        mean_curves = models.HazardCurveData.objects \
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean', hazard_curve__imt='PGA') \
            .order_by('location')
        actual = scientificformat(mean_curves[0].poes, '%11.7E')

        fname = os.path.join(os.path.dirname(case_7.__file__), 'expected',
                             'hazard_curve-mean.csv')
        # NB: the format of the expected file is lon lat, poe1 ... poeN, ...
        # we extract the first poes for the first point
        expected = [line.split(',')[1] for line in open(fname)][0]
        self.assertEqual(actual, expected)
示例#11
0
    def test(self):
        job = self.run_hazard(
            os.path.join(os.path.dirname(case_7.__file__), 'job.ini'))

        mean_curves = models.HazardCurveData.objects \
            .filter(hazard_curve__output__oq_job=job.id,
                    hazard_curve__statistics='mean', hazard_curve__imt='PGA') \
            .order_by('location')
        actual = scientificformat(mean_curves[0].poes, '%11.7E')

        fname = os.path.join(os.path.dirname(case_7.__file__), 'expected',
                             'hazard_curve-mean.csv')
        # NB: the format of the expected file is lon lat, poe1 ... poeN, ...
        # we extract the first poes for the first point
        expected = [line.split(',')[1] for line in open(fname)][0]
        self.assertEqual(actual, expected)
示例#12
0
 def check_event_loss_asset(self, job):
     el = models.EventLoss.objects.get(
         output__output_type='event_loss_asset', output__oq_job=job)
     path = self._test_path("expected/event_loss_asset.csv")
     expectedlines = open(path).read().split()
     gotlines = [
         scientificformat([row.rupture.tag, row.asset.asset_ref, row.loss],
                          fmt='%11.8E', sep=',')
         for row in el.eventlossasset_set.order_by(
             'rupture__tag', 'asset__asset_ref')]
     if gotlines != expectedlines:
         actual_dir = self._test_path("actual")
         if not os.path.exists(actual_dir):
             os.mkdir(actual_dir)
         open(os.path.join(actual_dir, "event_loss_asset.csv"), 'w').write(
             '\n'.join(gotlines))
     self.assertEqual(expectedlines, gotlines)
示例#13
0
文件: test.py 项目: myutwo/oq-engine
 def check_event_loss_asset(self, job):
     el = models.EventLoss.objects.get(
         output__output_type='event_loss_asset', output__oq_job=job)
     path = self._test_path("expected/event_loss_asset.csv")
     expectedlines = open(path).read().split()
     gotlines = [
         scientificformat([row.rupture.tag, row.asset.asset_ref, row.loss],
                          fmt='%11.8E', sep=',')
         for row in el.eventlossasset_set.order_by(
             'rupture__tag', 'asset__asset_ref')]
     if gotlines != expectedlines:
         actual_dir = self._test_path("actual")
         if not os.path.exists(actual_dir):
             os.mkdir(actual_dir)
         open(os.path.join(actual_dir, "event_loss_asset.csv"), 'w').write(
             '\n'.join(gotlines))
     self.assertEqual(expectedlines, gotlines)
示例#14
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='xml')

        # check loss ratios
        lrs = self.calc.datastore['composite_risk_model/VF/structural']
        got = scientificformat(lrs.mean_loss_ratios, '%.2f')
        self.assertEqual(got, '0.05 0.10 0.20 0.40 0.80')

        # check loss curves
        [fname] = out['loss_curves-rlzs', 'xml']
        self.assertEqualFiles('expected/loss_curves.xml', fname)

        # check loss maps
        clp = self.calc.oqparam.conditional_loss_poes
        fnames = out['loss_maps-rlzs', 'xml']
        self.assertEqual(len(fnames), 3)  # for 3 conditional loss poes
        for poe, fname in zip(clp, fnames):
            self.assertEqualFiles('expected/loss_map-poe-%s.xml' % poe, fname)
示例#15
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='xml')

        # check loss ratios
        lrs = self.calc.datastore['composite_risk_model/VF/structural']
        got = scientificformat(lrs.mean_loss_ratios, '%.2f')
        self.assertEqual(got, '0.05 0.10 0.20 0.40 0.80')

        # check loss curves
        [fname] = out['loss_curves-rlzs', 'xml']
        self.assertEqualFiles('expected/loss_curves.xml', fname)

        # check loss maps
        clp = self.calc.oqparam.conditional_loss_poes
        fnames = out['loss_maps-rlzs', 'xml']
        self.assertEqual(len(fnames), 3)  # for 3 conditional loss poes
        for poe, fname in zip(clp, fnames):
            self.assertEqualFiles('expected/loss_map-poe-%s.xml' % poe, fname)
示例#16
0
def export_stats_csv(key, export_dir, fname, sitecol, data_by_imt):
    """
    Export the scalar outputs.

    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: file name
    :param sitecol: site collection
    :param data_by_imt: dictionary of floats keyed by IMT
    """
    dest = os.path.join(export_dir, fname)
    rows = []
    for imt in sorted(data_by_imt):
        row = [imt]
        for col in data_by_imt[imt]:
            row.append(scientificformat(col))
        rows.append(row)
    write_csv(dest, numpy.array(rows).T)
    return {fname: dest}
示例#17
0
文件: views.py 项目: rcgee/oq-risklib
def rst_table(data, header=None, fmt='%9.7E'):
    """
    Build a .rst table from a matrix.
    
    >>> tbl = [['a', 1], ['b', 2]]
    >>> print rst_table(tbl, header=['Name', 'Value'])
    ==== =====
    Name Value
    ==== =====
    a    1    
    b    2    
    ==== =====
    """
    try:
        # see if data is a composite numpy array
        data.dtype.fields
    except AttributeError:
        # not a composite array
        header = header or ()
    else:
        if not header:
            header = [col.split(':')[0] for col in build_header(data.dtype)]
    if header:
        col_sizes = [len(col) for col in header]
    else:
        col_sizes = [len(str(col)) for col in data[0]]
    body = []
    for row in data:
        row = tuple(scientificformat(col, fmt) for col in row)
        for (i, col) in enumerate(row):
            col_sizes[i] = max(col_sizes[i], len(col))
        body.append(row)

    sepline = ' '.join(('=' * size for size in col_sizes))
    templ = ' '.join(('%-{}s'.format(size) for size in col_sizes))
    if header:
        lines = [sepline, templ % tuple(header), sepline]
    else:
        lines = [sepline]
    for row in body:
        lines.append(templ % row)
    lines.append(sepline)
    return '\n'.join(lines)
示例#18
0
def export_stats_csv(key, export_dir, fname, sitecol, data_by_imt):
    """
    Export the scalar outputs.

    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: file name
    :param sitecol: site collection
    :param data_by_imt: dictionary of floats keyed by IMT
    """
    dest = os.path.join(export_dir, fname)
    rows = []
    for imt in sorted(data_by_imt):
        row = [imt]
        for col in data_by_imt[imt]:
            row.append(scientificformat(col))
        rows.append(row)
    write_csv(dest, numpy.array(rows).T)
    return {fname: dest}
示例#19
0
    def serialize(self, data):
        """
        Write the hazard curves to the given as GeoJSON. The GeoJSON format
        is customized to contain various bits of metadata.

        See :meth:`HazardCurveXMLWriter.serialize` for expected input.
        """
        oqmetadata = {}
        for key, value in self.metadata.items():
            if key == 'imls':
                oqmetadata['IMLs'] = value
            if value is not None:
                if key == 'imls':
                    oqmetadata['IMLs'] = list(value)
                else:
                    oqmetadata[_ATTR_MAP.get(key)] = scientificformat(value)

        features = []
        feature_coll = {
            'type': 'FeatureCollection',
            'features': features,
            'oqtype': 'HazardCurve',
            'oqnrmlversion': '0.4',
            'oqmetadata': oqmetadata,
        }
        for hc in data:
            poes = list(hc.poes)
            lon = hc.location.x
            lat = hc.location.y

            feature = {
                'type': 'Feature',
                'geometry': {
                    'type': 'Point',
                    'coordinates': [float(lon), float(lat)],
                },
                'properties': {'poEs': list(poes)},
            }
            features.append(feature)

        with open(self.dest, 'w') as fh:
            json.dump(feature_coll, fh, sort_keys=True, indent=4,
                      separators=(',', ': '))
示例#20
0
def rst_table(data, header=None, fmt='%9.7E'):
    """
    Build a .rst table from a matrix.
    
    >>> tbl = [['a', 1], ['b', 2]]
    >>> print rst_table(tbl, header=['Name', 'Value'])
    ==== =====
    Name Value
    ==== =====
    a    1    
    b    2    
    ==== =====
    """
    try:
        # see if data is a composite numpy array
        data.dtype.fields
    except AttributeError:
        # not a composite array
        header = header or ()
    else:
        if not header:
            header = [col.split(':')[0] for col in build_header(data.dtype)]
    if header:
        col_sizes = [len(col) for col in header]
    else:
        col_sizes = [len(str(col)) for col in data[0]]
    body = []
    for row in data:
        row = tuple(scientificformat(col, fmt) for col in row)
        for (i, col) in enumerate(row):
            col_sizes[i] = max(col_sizes[i], len(col))
        body.append(row)

    sepline = ' '.join(('=' * size for size in col_sizes))
    templ = ' '.join(('%-{}s'.format(size) for size in col_sizes))
    if header:
        lines = [sepline, templ % tuple(header), sepline]
    else:
        lines = [sepline]
    for row in body:
        lines.append(templ % row)
    lines.append(sepline)
    return '\n'.join(lines)
示例#21
0
def export_hazard_curves_csv(key, export_dir, fname, sitecol, curves_by_imt,
                             imtls, investigation_time=None):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    """
    dest = os.path.join(export_dir, fname)
    nsites = len(sitecol)
    # build a matrix of strings with size nsites * (num_imts + 1)
    # the + 1 is needed since the 0-th column contains lon lat
    rows = numpy.empty((nsites, len(imtls) + 1), dtype=object)
    for sid, lon, lat in zip(range(nsites), sitecol.lons, sitecol.lats):
        rows[sid, 0] = '%s %s' % (lon, lat)
    for i, imt in enumerate(sorted(curves_by_imt.dtype.fields), 1):
        for sid, curve in zip(range(nsites), curves_by_imt[imt]):
            rows[sid, i] = scientificformat(curve, fmt='%11.7E')
    write_csv(dest, rows)
    return {fname: dest}
示例#22
0
文件: hazard.py 项目: gem/oq-risklib
def export_hazard_curves_csv(key, dest, sitecol, curves_by_imt,
                             imtls, investigation_time=None):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    :param dict imtls: intensity measure types and levels
    :param investigation_time: investigation time
    """
    nsites = len(sitecol)
    # build a matrix of strings with size nsites * (num_imts + 1)
    # the + 1 is needed since the 0-th column contains lon lat
    rows = numpy.empty((nsites, len(imtls) + 1), dtype=object)
    for sid, lon, lat in zip(range(nsites), sitecol.lons, sitecol.lats):
        rows[sid, 0] = '%.5f %.5f' % (lon, lat)
    for i, imt in enumerate(curves_by_imt.dtype.names, 1):
        for sid, curve in zip(range(nsites), curves_by_imt[imt]):
            rows[sid, i] = scientificformat(curve, fmt='%11.7E')
    write_csv(dest, rows, header=('lon lat',) + curves_by_imt.dtype.names)
    return {dest: dest}
示例#23
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='xml')

        # check loss ratios
        lrs = self.calc.datastore['loss_ratios']
        self.assertEqual(lrs.dtype.names, ('structural', ))
        numpy.testing.assert_equal(lrs.attrs['imt_taxos'], [['PGA', 'VF']])
        got = scientificformat(lrs['structural'][0], '%.2f')
        self.assertEqual(
            got, '0.00 0.01 0.02 0.03 0.04 0.05 0.06 0.07 0.08 '
            '0.09 0.10 0.12 0.14 0.16 0.18 0.20 0.24 0.28 0.32 '
            '0.36 0.40 0.48 0.56 0.64 0.72 0.80 0.84 0.88 0.92 '
            '0.96 1.00')

        # check loss curves
        [fname] = out['loss_curves-rlzs', 'xml']
        self.assertEqualFiles('expected/loss_curves.xml', fname)

        # check loss maps
        clp = self.calc.oqparam.conditional_loss_poes
        fnames = out['loss_maps-rlzs', 'xml']
        self.assertEqual(len(fnames), 3)  # for 3 conditional loss poes
        for poe, fname in zip(clp, fnames):
            self.assertEqualFiles('expected/loss_map-poe-%s.xml' % poe, fname)