Exemple #1
0
def export_gmf_xml(key, output, target):
    """
    Export the GMF Collection specified by ``output`` to the ``target``.

    :param output:
        :class:`openquake.engine.db.models.Output` with an `output_type` of
        `gmf`.
    :param target:
        The same ``target`` as :func:`export`.

    :returns:
        The same return value as defined by :func:`export`.
    """
    gmf = models.Gmf.objects.get(output=output.id)
    haz_calc = output.oq_job
    if output.output_type == 'gmf':
        lt_rlz = gmf.lt_realization
        sm_lt_path = core.LT_PATH_JOIN_TOKEN.join(lt_rlz.sm_lt_path)
        gsim_lt_path = core.LT_PATH_JOIN_TOKEN.join(lt_rlz.gsim_lt_path)
    else:  # gmf_scenario
        sm_lt_path = ''
        gsim_lt_path = ''
    dest = _get_result_export_dest(haz_calc.id, target, output.gmf)
    writer = hazard_writers.EventBasedGMFXMLWriter(dest, sm_lt_path,
                                                   gsim_lt_path)
    with floatformat('%12.8E'):
        writer.serialize(gmf)
    return dest
Exemple #2
0
def export_gmf_xml(key, export_dir, fname, sitecol, ruptures, gmfs, rlz,
                   investigation_time):
    """
    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: name of the exported file
    :param sitecol: the full site collection
    :param ruptures: an ordered list of ruptures
    :param gmfs: a matrix of ground motion fields of shape (R, N)
    :param rlz: a realization object
    :param investigation_time: investigation time (None for scenario)
    """
    dest = os.path.join(export_dir, fname)
    if hasattr(rlz, 'gsim_rlz'):  # event based
        smltpath = '_'.join(rlz.sm_lt_path)
        gsimpath = rlz.gsim_rlz.uid
    else:  # scenario
        smltpath = ''
        gsimpath = rlz.uid
    writer = hazard_writers.EventBasedGMFXMLWriter(
        dest, sm_lt_path=smltpath, gsim_lt_path=gsimpath)
    with floatformat('%12.8E'):
        writer.serialize(
            GmfCollection(sitecol, ruptures, gmfs, investigation_time))
    return {key: [dest]}
Exemple #3
0
    def test_case_9(self):
        with writers.floatformat('%10.6E'):
            out = self.run_calc(case_9.__file__, 'job.ini', exports='xml')
        f1, f2 = out['gmf_data', 'xml']
        self.assertEqualFiles('LinLee2008SSlab_gmf.xml', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.xml', f2)

        out = self.run_calc(case_9.__file__, 'job.ini', exports='txt,csv,hdf5')
        f1, f2 = out['gmf_data', 'txt']
        self.assertEqualFiles('LinLee2008SSlab_gmf.txt', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.txt', f2)

        f1, f2 = out['gmf_data', 'csv']
        self.assertEqualFiles('gmf-LinLee2008SSlab-PGA.csv', f1)
        self.assertEqualFiles('gmf-YoungsEtAl1997SSlab-PGA.csv', f2)

        # test the HDF5 export
        [fname] = out['gmf_data', 'hdf5']
        with h5py.File(fname) as f:
            self.assertEqual(len(f), 2)  # there are only two datasets
            data1 = f['LinLee2008SSlab()']
            data2 = f['YoungsEtAl1997SSlab()']
            self.assertEqual(
                data1.dtype.names,
                ('lon', 'lat', 'PGA-000', 'PGA-001', 'PGA-002', 'PGA-003',
                 'PGA-004', 'PGA-005', 'PGA-006', 'PGA-007', 'PGA-008',
                 'PGA-009'))
            self.assertEqual(data1.shape, (3,))
            self.assertEqual(data1.dtype.names, data2.dtype.names)
            self.assertEqual(data1.shape, data2.shape)
Exemple #4
0
    def test_case_9(self):
        with writers.floatformat('%10.6E'):
            out = self.run_calc(case_9.__file__, 'job.ini', exports='xml')
        f1, f2 = out['gmf_data', 'xml']
        self.assertEqualFiles('LinLee2008SSlab_gmf.xml', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.xml', f2)

        out = self.run_calc(case_9.__file__, 'job.ini', exports='txt,csv,npz')
        f1, f2 = out['gmf_data', 'txt']
        self.assertEqualFiles('LinLee2008SSlab_gmf.txt', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.txt', f2)

        f1, f2 = out['gmf_data', 'csv']
        self.assertEqualFiles('gmf-LinLee2008SSlab-PGA.csv', f1)
        self.assertEqualFiles('gmf-YoungsEtAl1997SSlab-PGA.csv', f2)

        # test the .npz export
        [fname] = out['gmf_data', 'npz']
        with numpy.load(fname) as f:
            self.assertEqual(len(f.keys()), 2)  # there are only two datasets
            data1 = f['LinLee2008SSlab()']
            data2 = f['YoungsEtAl1997SSlab()']
            self.assertEqual(data1.dtype.names, ('lon', 'lat', 'PGA'))
            self.assertEqual(data1.shape, (3,))
            self.assertEqual(data1['PGA'].shape, (3, 10))
            self.assertEqual(data1.dtype.names, data2.dtype.names)
            self.assertEqual(data1.shape, data2.shape)
Exemple #5
0
def export_hazard_curves_xml(key, dest, sitecol, curves_by_imt, imtls,
                             investigation_time):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    :param imtls: dictionary with the intensity measure types and levels
    :param investigation_time: investigation time in years
    """
    mdata = []
    hcurves = []
    for imt_str, imls in sorted(imtls.items()):
        hcurves.append([
            HazardCurve(site.location, poes)
            for site, poes in zip(sitecol, curves_by_imt[imt_str])
        ])
        imt = from_string(imt_str)
        mdata.append({
            'quantile_value': None,
            'statistics': None,
            'smlt_path': '',
            'gsimlt_path': '',
            'investigation_time': investigation_time,
            'imt': imt[0],
            'sa_period': imt[1],
            'sa_damping': imt[2],
            'imls': imls,
        })
    writer = hazard_writers.MultiHazardCurveXMLWriter(dest, mdata)
    with floatformat('%12.8E'):
        writer.serialize(hcurves)
    return {dest: dest}
Exemple #6
0
def export_gmf_xml(key, output, target):
    """
    Export the GMF Collection specified by ``output`` to the ``target``.

    :param output:
        :class:`openquake.engine.db.models.Output` with an `output_type` of
        `gmf`.
    :param target:
        The same ``target`` as :func:`export`.

    :returns:
        The same return value as defined by :func:`export`.
    """
    gmf = models.Gmf.objects.get(output=output.id)
    haz_calc = output.oq_job
    if output.output_type == 'gmf':
        lt_rlz = gmf.lt_realization
        sm_lt_path = core.LT_PATH_JOIN_TOKEN.join(lt_rlz.sm_lt_path)
        gsim_lt_path = core.LT_PATH_JOIN_TOKEN.join(lt_rlz.gsim_lt_path)
    else:  # gmf_scenario
        sm_lt_path = ''
        gsim_lt_path = ''
    dest = _get_result_export_dest(haz_calc.id, target, output.gmf)
    writer = hazard_writers.EventBasedGMFXMLWriter(
        dest, sm_lt_path, gsim_lt_path)
    with floatformat('%12.8E'):
        writer.serialize(gmf)
    return dest
Exemple #7
0
def export_hazard_curves_xml(key, dest, sitecol, curves_by_imt,
                             imtls, investigation_time):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    :param imtls: dictionary with the intensity measure types and levels
    :param investigation_time: investigation time in years
    """
    mdata = []
    hcurves = []
    for imt_str, imls in sorted(imtls.items()):
        hcurves.append(
            [HazardCurve(site.location, poes)
             for site, poes in zip(sitecol, curves_by_imt[imt_str])])
        imt = from_string(imt_str)
        mdata.append({
            'quantile_value': None,
            'statistics': None,
            'smlt_path': '',
            'gsimlt_path': '',
            'investigation_time': investigation_time,
            'imt': imt[0],
            'sa_period': imt[1],
            'sa_damping': imt[2],
            'imls': imls,
        })
    writer = hazard_writers.MultiHazardCurveXMLWriter(dest, mdata)
    with floatformat('%12.8E'):
        writer.serialize(hcurves)
    return {dest: dest}
Exemple #8
0
    def serialize(self, data):
        """
        :param data:

            A sequence of data where each datum has the following attributes:

            * matrix: N-dimensional numpy array containing the disaggregation
              histogram.
            * dim_labels: A list of strings which label the dimensions of a
              given histogram. For example, for a Magnitude-Distance-Epsilon
              histogram, we would expect `dim_labels` to be
              ``['Mag', 'Dist', 'Eps']``.
            * poe: The disaggregation Probability of Exceedance level for which
              these results were produced.
            * iml: Intensity measure level, interpolated from the source hazard
              curve at the given ``poe``.
        """

        with nrml.NRMLFile(self.dest, 'w') as fh, floatformat('%.6E'):
            root = et.Element('nrml')

            diss_matrices = et.SubElement(root, 'disaggMatrices')

            _set_metadata(diss_matrices, self.metadata, _ATTR_MAP)

            transform = lambda val: ', '.join(map(scientificformat, val))
            _set_metadata(diss_matrices,
                          self.metadata,
                          self.BIN_EDGE_ATTR_MAP,
                          transform=transform)

            for result in data:
                diss_matrix = et.SubElement(diss_matrices, 'disaggMatrix')

                # Check that we have bin edges defined for each dimension label
                # (mag, dist, lon, lat, eps, TRT)
                for label in result.dim_labels:
                    bin_edge_attr = self.DIM_LABEL_TO_BIN_EDGE_MAP.get(label)
                    assert self.metadata.get(bin_edge_attr) is not None, (
                        "Writer is missing '%s' metadata" % bin_edge_attr)

                result_type = ','.join(result.dim_labels)
                diss_matrix.set('type', result_type)

                dims = ','.join(str(x) for x in result.matrix.shape)
                diss_matrix.set('dims', dims)

                diss_matrix.set('poE', scientificformat(result.poe))
                diss_matrix.set('iml', scientificformat(result.iml))

                for idxs, value in numpy.ndenumerate(result.matrix):
                    prob = et.SubElement(diss_matrix, 'prob')

                    index = ','.join([str(x) for x in idxs])
                    prob.set('index', index)
                    prob.set('value', scientificformat(value))

            nrml.write(list(root), fh)
    def serialize(self, data):
        """
        :param data:

            A sequence of data where each datum has the following attributes:

            * matrix: N-dimensional numpy array containing the disaggregation
              histogram.
            * dim_labels: A list of strings which label the dimensions of a
              given histogram. For example, for a Magnitude-Distance-Epsilon
              histogram, we would expect `dim_labels` to be
              ``['Mag', 'Dist', 'Eps']``.
            * poe: The disaggregation Probability of Exceedance level for which
              these results were produced.
            * iml: Intensity measure level, interpolated from the source hazard
              curve at the given ``poe``.
        """

        with open(self.dest, 'wb') as fh, floatformat('%.6E'):
            root = et.Element('nrml')

            diss_matrices = et.SubElement(root, 'disaggMatrices')

            _set_metadata(diss_matrices, self.metadata, _ATTR_MAP)

            transform = lambda val: ', '.join(map(scientificformat, val))
            _set_metadata(diss_matrices, self.metadata, self.BIN_EDGE_ATTR_MAP,
                          transform=transform)

            for result in data:
                diss_matrix = et.SubElement(diss_matrices, 'disaggMatrix')

                # Check that we have bin edges defined for each dimension label
                # (mag, dist, lon, lat, eps, TRT)
                for label in result.dim_labels:
                    bin_edge_attr = self.DIM_LABEL_TO_BIN_EDGE_MAP.get(label)
                    assert self.metadata.get(bin_edge_attr) is not None, (
                        "Writer is missing '%s' metadata" % bin_edge_attr
                    )

                result_type = ','.join(result.dim_labels)
                diss_matrix.set('type', result_type)

                dims = ','.join(str(x) for x in result.matrix.shape)
                diss_matrix.set('dims', dims)

                diss_matrix.set('poE', scientificformat(result.poe))
                diss_matrix.set('iml', scientificformat(result.iml))

                for idxs, value in numpy.ndenumerate(result.matrix):
                    prob = et.SubElement(diss_matrix, 'prob')

                    index = ','.join([str(x) for x in idxs])
                    prob.set('index', index)
                    prob.set('value', scientificformat(value))

            nrml.write(list(root), fh)
Exemple #10
0
    def test_case_9(self):
        with writers.floatformat('%10.6E'):
            out = self.run_calc(case_9.__file__, 'job.ini', exports='xml')
        f1, f2 = out['gmfs', 'xml']
        self.assertEqualFiles('LinLee2008SSlab_gmf.xml', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.xml', f2)

        out = self.run_calc(case_9.__file__, 'job.ini', exports='csv')
        f1, f2 = out['gmfs', 'csv']
        self.assertEqualFiles('LinLee2008SSlab_gmf.csv', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.csv', f2)
Exemple #11
0
    def test_case_9(self):
        with writers.floatformat('%10.6E'):
            out = self.run_calc(case_9.__file__, 'job.ini', exports='xml')
        f1, f2 = out['gmfs', 'xml']
        self.assertEqualFiles('LinLee2008SSlab_gmf.xml', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.xml', f2)

        out = self.run_calc(case_9.__file__, 'job.ini', exports='csv')
        f1, f2 = out['gmfs', 'csv']
        self.assertEqualFiles('LinLee2008SSlab_gmf.csv', f1)
        self.assertEqualFiles('YoungsEtAl1997SSlab_gmf.csv', f2)
Exemple #12
0
 def to_nrml(self, key, data, fname=None, fmt='%11.7E'):
     """
     :param key:
      `dmg_dist_per_asset|dmg_dist_per_taxonomy|dmg_dist_total|collapse_map`
     :param data: sequence of rows to serialize
     :fname: the path name of the output file; if None, build a name
     :returns: path name of the saved file
     """
     fname = fname or writetmp()
     node = getattr(self, key + '_node')(data)
     with open(fname, 'w') as out, writers.floatformat(fmt):
         nrml.write([node], out)
     return fname
Exemple #13
0
def write(nodes, output=sys.stdout, fmt="%8.4E"):
    """
    Convert nodes into a NRML file. output must be a file
    object open in write mode. If you want to perform a
    consistency check, open it in read-write mode, then it will
    be read after creation and validated.

    :params nodes: an iterable over Node objects
    :params output: a file-like object in write or read-write mode
    """
    root = Node("nrml", nodes=nodes)
    with writers.floatformat(fmt):
        node_to_xml(root, output, {NRML05: "", GML_NAMESPACE: "gml:"})
    if hasattr(output, "mode") and "+" in output.mode:  # read-write mode
        output.seek(0)
        read(output)  # validate the written file
Exemple #14
0
def write(nodes, output=sys.stdout, fmt='%8.4E'):
    """
    Convert nodes into a NRML file. output must be a file
    object open in write mode. If you want to perform a
    consistency check, open it in read-write mode, then it will
    be read after creation and validated.

    :params nodes: an iterable over Node objects
    :params output: a file-like object in write or read-write mode
    """
    root = Node('nrml', nodes=nodes)
    with writers.floatformat(fmt):
        node_to_xml(root, output, {NRML05: '', GML_NAMESPACE: 'gml:'})
    if hasattr(output, 'mode') and '+' in output.mode:  # read-write mode
        output.seek(0)
        read(output)  # validate the written file
Exemple #15
0
 def test_case_1(self):
     # ROUNDING ERROR WARNING (MS): numbers such as 2.5 and 2.4999999999
     # are extremely close (up to 4E-11) however they must be rounded to
     # a single digit to compare equal in their string representation; for
     # this reason the precision here has to be reduced a lot, even it the
     # numbers are very close. It comes down to the known fact that
     # comparing the XMLs is not a good idea; suboptimal choises
     # sometimes have to be made, since we want this test to
     # to run both on Ubuntu 12.04 and Ubuntu 14.04.
     # Incidentally, when the approach of comparing the XML was taken,
     # the idea of supporting at the same time different versions of the
     # libraries was out of question, so it made a lot of sense to check
     # the XMLs, since the numbers had to be exactly identical.
     with writers.floatformat('%5.1E'):
         out = self.run_calc(case_1.__file__, 'job.ini', exports='xml')
     raise unittest.SkipTest  # because of the rounding errors
     self.assertEqualFiles('expected.xml', out['gmf_data', 'xml'][0])
Exemple #16
0
 def test_case_1(self):
     # ROUNDING ERROR WARNING (MS): numbers such as 2.5 and 2.4999999999
     # are extremely close (up to 4E-11) however they must be rounded to
     # a single digit to compare equal in their string representation; for
     # this reason the precision here has to be reduced a lot, even it the
     # numbers are very close. It comes down to the known fact that
     # comparing the XMLs is not a good idea; suboptimal choises
     # sometimes have to be made, since we want this test to
     # to run both on Ubuntu 12.04 and Ubuntu 14.04.
     # Incidentally, when the approach of comparing the XML was taken,
     # the idea of supporting at the same time different versions of the
     # libraries was out of question, so it made a lot of sense to check
     # the XMLs, since the numbers had to be exactly identical.
     with writers.floatformat('%5.1E'):
         out = self.run_calc(case_1.__file__, 'job.ini', exports='xml')
     raise unittest.SkipTest  # because of the rounding errors
     self.assertEqualFiles('expected.xml', out['gmf_data', 'xml'][0])
Exemple #17
0
def export_hazard_map_csv(key, output, target):
    """
    General hazard map export code.
    """
    file_ext = key[1]
    data = []
    for hazard_map in models.HazardMap.objects.filter(output=output):
        data.extend(zip(hazard_map.lons, hazard_map.lats, hazard_map.imls))

    haz_calc = output.oq_job
    dest = _get_result_export_dest(haz_calc.id, target, hazard_map,
                                   file_ext=file_ext)
    with open(dest, 'w') as f:
        writer = csv.writer(f, delimiter=' ')
        with floatformat('%12.8E'):
            for row in sorted(data):
                writer.writerow(map(scientificformat, row))
    return dest
Exemple #18
0
def export_hazard_curve_csv(key, output, target):
    """
    Save a hazard curve (of a given IMT) as a .csv file in the format
    (lon lat poe1 ... poeN), where the fields are space separated.
    """
    data = []
    for hc in models.HazardCurve.objects.filter(output=output.id):
        x_y_poes = models.HazardCurveData.objects.all_curves_simple(
            filter_args=dict(hazard_curve=hc.id))
        data.extend(x_y_poes)
    haz_calc_id = output.oq_job.id
    dest = _get_result_export_dest(haz_calc_id, target, hc, file_ext='csv')
    with open(dest, 'wb') as f:
        writer = csv.writer(f, delimiter=' ')
        with floatformat('%11.7E'):
            for x, y, poes in sorted(data):
                writer.writerow(map(scientificformat, [x, y] + poes))
    return dest
Exemple #19
0
def export_hazard_curve_csv(key, output, target):
    """
    Save a hazard curve (of a given IMT) as a .csv file in the format
    (lon lat poe1 ... poeN), where the fields are space separated.
    """
    data = []
    for hc in models.HazardCurve.objects.filter(output=output.id):
        x_y_poes = models.HazardCurveData.objects.all_curves_simple(
            filter_args=dict(hazard_curve=hc.id))
        data.extend(x_y_poes)
    haz_calc_id = output.oq_job.id
    dest = _get_result_export_dest(haz_calc_id, target, hc, file_ext='csv')
    with open(dest, 'wb') as f:
        writer = csv.writer(f, delimiter=' ')
        with floatformat('%11.7E'):
            for x, y, poes in sorted(data):
                writer.writerow(map(scientificformat, [x, y] + poes))
    return dest
Exemple #20
0
def write_source_model(dest, sources, name=None):
    """
    Writes a source model to XML.

    :param str dest:
        Destination path
    :param list sources:
        Source model as list of instance of the
        :class:`openquake.hazardlib.source.base.BaseSeismicSource`
    :param str name:
        Name of the source model (if missing, extracted from the filename)
    """
    name = name or os.path.splitext(os.path.basename(dest))[0]
    nodes = map(obj_to_node, sorted(sources, key=lambda src: src.source_id))
    source_model = LiteralNode("sourceModel", {"name": name}, nodes=nodes)
    with open(dest, 'w') as f, writers.floatformat('%s'):
        nrml.write([source_model], f)
    return dest
Exemple #21
0
def write(nodes, output=sys.stdout, fmt='%10.7E', gml=True):
    """
    Convert nodes into a NRML file. output must be a file
    object open in write mode. If you want to perform a
    consistency check, open it in read-write mode, then it will
    be read after creation and validated.

    :params nodes: an iterable over Node objects
    :params output: a file-like object in write or read-write mode
    """
    root = Node('nrml', nodes=nodes)
    namespaces = {NRML05: ''}
    if gml:
        namespaces[GML_NAMESPACE] = 'gml:'
    with writers.floatformat(fmt):
        node_to_xml(root, output, namespaces)
    if hasattr(output, 'mode') and '+' in output.mode:  # read-write mode
        output.seek(0)
        read(output)  # validate the written file
Exemple #22
0
def export_hazard_map_csv(key, output, target):
    """
    General hazard map export code.
    """
    file_ext = key[1]
    data = []
    for hazard_map in models.HazardMap.objects.filter(output=output):
        data.extend(zip(hazard_map.lons, hazard_map.lats, hazard_map.imls))

    haz_calc = output.oq_job
    dest = _get_result_export_dest(haz_calc.id,
                                   target,
                                   hazard_map,
                                   file_ext=file_ext)
    with open(dest, 'w') as f:
        writer = csv.writer(f, delimiter=' ')
        with floatformat('%12.8E'):
            for row in sorted(data):
                writer.writerow(map(scientificformat, row))
    return dest
Exemple #23
0
def write(nodes, output=sys.stdout, fmt='%.7E', gml=True, xmlns=None):
    """
    Convert nodes into a NRML file. output must be a file
    object open in write mode. If you want to perform a
    consistency check, open it in read-write mode, then it will
    be read after creation and validated.

    :params nodes: an iterable over Node objects
    :params output: a file-like object in write or read-write mode
    :param fmt: format used for writing the floats (default '%.7E')
    :param gml: add the http://www.opengis.net/gml namespace
    :param xmlns: NRML namespace like http://openquake.org/xmlns/nrml/0.4
    """
    root = Node('nrml', nodes=nodes)
    namespaces = {xmlns or NRML05: ''}
    if gml:
        namespaces[GML_NAMESPACE] = 'gml:'
    with writers.floatformat(fmt):
        node_to_xml(root, output, namespaces)
    if hasattr(output, 'mode') and '+' in output.mode:  # read-write mode
        output.seek(0)
        read(output)  # validate the written file