示例#1
0
def get_gmfs_from_txt(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the full path of the CSV file
    :returns:
        a composite array of shape (N, R) read from a CSV file with format
        `etag indices [gmv1 ... gmvN] * num_imts`
    """
    with open(fname) as csvfile:
        firstline = next(csvfile)
        try:
            coords = valid.coordinates(firstline)
        except:
            raise InvalidFile(
                'The first line of %s is expected to contain comma separated'
                'ordered coordinates, got %s instead' % (fname, firstline))
        sitecol = sitecol_from_coords(oqparam, coords)
        if not oqparam.imtls:
            oqparam.set_risk_imtls(get_risk_models(oqparam))
        imts = list(oqparam.imtls)
        imt_dt = numpy.dtype([(bytes(imt), F32) for imt in imts])
        num_gmfs = oqparam.number_of_ground_motion_fields
        gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt)
        etags = []

        for lineno, line in enumerate(csvfile, 2):
            row = line.split(',')
            try:
                indices = list(map(valid.positiveint, row[1].split()))
            except:
                raise InvalidFile(
                    'The second column in %s is expected to contain integer '
                    'indices, got %s' % (fname, row[1]))
            r_sites = (sitecol if not indices else site.FilteredSiteCollection(
                indices, sitecol))
            for i in range(len(imts)):
                try:
                    array = numpy.array(valid.positivefloats(row[i + 2]))
                    # NB: i + 2 because the first 2 fields are etag and indices
                except:
                    raise InvalidFile(
                        'The column #%d in %s is expected to contain positive '
                        'floats, got %s instead' % (i + 3, fname, row[i + 2]))
                gmf_by_imt[imts[i]][lineno - 2] = r_sites.expand(array, 0)
            etags.append(row[0])
    if lineno < num_gmfs + 1:
        raise InvalidFile('%s contains %d rows, expected %d' %
                          (fname, lineno, num_gmfs + 1))
    if etags != sorted(etags):
        raise InvalidFile('The etags in %s are not ordered: %s' %
                          (fname, etags))
    return sitecol, numpy.array(etags, '|S100'), gmf_by_imt.T
示例#2
0
def ffconvert(fname, limit_states, ff):
    """
    Convert a fragility function into a numpy array plus a bunch
    of attributes.

    :param fname: path to the fragility model file
    :param limit_states: expected limit states
    :param ff: fragility function node
    :returns: a pair (array, dictionary)
    """
    with context(fname, ff):
        ffs = ff[1:]
        imls = ff.imls
    with context(fname, imls):
        attrs = dict(format=ff['format'],
                     imt=imls['imt'],
                     nodamage=imls.attrib.get('noDamageLimit'))

    LS = len(limit_states)
    if LS != len(ffs):
        with context(fname, ff):
            raise InvalidFile('expected %d limit states, found %d' %
                              (LS, len(ffs)))
    if ff['format'] == 'continuous':
        attrs['minIML'] = float(imls['minIML'])
        attrs['maxIML'] = float(imls['maxIML'])
        array = numpy.zeros(LS, [('mean', F64), ('stddev', F64)])
        for i, ls, node in zip(range(LS), limit_states, ff[1:]):
            if ls != node['ls']:
                with context(fname, node):
                    raise InvalidFile('expected %s, found' %
                                      (ls, node['ls']))
            array['mean'][i] = node['mean']
            array['stddev'][i] = node['stddev']
    elif ff['format'] == 'discrete':
        attrs['imls'] = valid.positivefloats(~imls)
        valid.check_levels(attrs['imls'], attrs['imt'])
        num_poes = len(attrs['imls'])
        array = numpy.zeros((LS, num_poes))
        for i, ls, node in zip(range(LS), limit_states, ff[1:]):
            with context(fname, node):
                if ls != node['ls']:
                    raise InvalidFile('expected %s, found' %
                                      (ls, node['ls']))
                poes = (~node if isinstance(~node, list)
                        else valid.probabilities(~node))
                if len(poes) != num_poes:
                    raise InvalidFile('expected %s, found' %
                                      (num_poes, len(poes)))
                array[i, :] = poes
    # NB: the format is constrained in nrml.FragilityNode to be either
    # discrete or continuous, there is no third option
    return array, attrs
示例#3
0
def get_vulnerability_functions_05(node, fname):
    """
    :param node:
        a vulnerabilityModel node
    :param fname:
        path of the vulnerability filter
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    taxonomies = set()
    vmodel = scientific.VulnerabilityModel(**node.attrib)
    # imt, taxonomy -> vulnerability function
    for vfun in node.getnodes('vulnerabilityFunction'):
        with context(fname, vfun):
            imt = vfun.imls['imt']
            imls = numpy.array(~vfun.imls)
            taxonomy = vfun['id']
        if taxonomy in taxonomies:
            raise InvalidFile(
                'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                (taxonomy, fname, vfun.lineno))
        if vfun['dist'] == 'PM':
            loss_ratios, probs = [], []
            for probabilities in vfun[1:]:
                loss_ratios.append(probabilities['lr'])
                probs.append(valid.probabilities(~probabilities))
            probs = numpy.array(probs)
            assert probs.shape == (len(loss_ratios), len(imls))
            vmodel[imt, taxonomy] = (
                scientific.VulnerabilityFunctionWithPMF(
                    taxonomy, imt, imls, numpy.array(loss_ratios),
                    probs))  # the seed will be set by readinput.get_risk_model
        else:
            with context(fname, vfun):
                loss_ratios = ~vfun.meanLRs
                coefficients = ~vfun.covLRs
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.meanLRs.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, '
                    'line %d' % (len(coefficients), len(imls), fname,
                                 vfun.covLRs.lineno))
            with context(fname, vfun):
                vmodel[imt, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt, imls, loss_ratios, coefficients,
                    vfun['dist'])
    return vmodel
示例#4
0
def get_vulnerability_functions_04(fname):
    """
    Parse the vulnerability model in NRML 0.4 format.

    :param fname:
        path of the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function + vset
    """
    categories = dict(assetCategory=set(),
                      lossCategory=set(),
                      vulnerabilitySetID=set())
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    for vset in read_nodes(fname, filter_vset,
                           nodefactory['vulnerabilityModel']):
        categories['assetCategory'].add(vset['assetCategory'])
        categories['lossCategory'].add(vset['lossCategory'])
        categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
        imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
    del categories['vulnerabilitySetID']
    return vf_dict, categories
示例#5
0
def get_scenario_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the NRML files containing the GMFs
    :returns:
        a triple (sitecol, etags, gmf array)
    """
    if not oqparam.imtls:
        oqparam.set_risk_imtls(get_risk_models(oqparam))
    imts = list(oqparam.imtls)
    num_imts = len(imts)
    imt_dt = numpy.dtype([(bytes(imt), F32) for imt in imts])
    gmfset = nrml.read(fname).gmfCollection.gmfSet
    etags, sitecounts = _extract_etags_sitecounts(gmfset)
    oqparam.sites = sorted(sitecounts)
    site_idx = {lonlat: i for i, lonlat in enumerate(oqparam.sites)}
    oqparam.number_of_ground_motion_fields = num_events = len(etags)
    sitecol = get_site_collection(oqparam)
    num_sites = len(oqparam.sites)
    gmf_by_imt = numpy.zeros((num_events, num_sites), imt_dt)
    counts = collections.Counter()
    for i, gmf in enumerate(gmfset):
        if len(gmf) != num_sites:  # there must be one node per site
            raise InvalidFile(
                'Expected %d sites, got %d nodes in %s, line %d' %
                (num_sites, len(gmf), fname, gmf.lineno))
        counts[gmf['ruptureId']] += 1
        imt = gmf['IMT']
        if imt == 'SA':
            imt = 'SA(%s)' % gmf['saPeriod']
        for node in gmf:
            sid = site_idx[node['lon'], node['lat']]
            gmf_by_imt[imt][i % num_events, sid] = node['gmv']

    for etag, count in counts.items():
        if count < num_imts:
            raise InvalidFile('Found a missing etag %r in %s' % (etag, fname))
        elif count > num_imts:
            raise InvalidFile('Found a duplicated etag %r in %s' %
                              (etag, fname))
    expected_gmvs_per_site = num_imts * len(etags)
    for lonlat, counts in sitecounts.items():
        if counts != expected_gmvs_per_site:
            raise InvalidFile('%s: expected %d gmvs at location %s, found %d' %
                              (fname, expected_gmvs_per_site, lonlat, counts))
    return sitecol, etags, gmf_by_imt.T
示例#6
0
def get_rlzs_assoc(oqparam):
    """
    Extract the GSIM realizations from the gsim_logic_tree file, if present,
    or build a single realization from the gsim attribute. It is only defined
    for the scenario calculators.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    """
    if 'gsim_logic_tree' in oqparam.inputs:
        gsim_lt = get_gsim_lt(oqparam, [])
        if len(gsim_lt.values) != 1:
            gsim_file = os.path.join(oqparam.base_path,
                                     oqparam.inputs['gsim_logic_tree'])
            raise InvalidFile(
                'The gsim logic tree file %s must contain a single tectonic '
                'region type, found %s instead ' %
                (gsim_file, list(gsim_lt.values)))
        trt = gsim_lt.values
        rlzs = sorted(get_gsim_lt(oqparam, trt))
    else:
        rlzs = [
            logictree.Realization(value=(str(oqparam.gsim), ),
                                  weight=1,
                                  lt_path=('', ),
                                  ordinal=0,
                                  lt_uid=('@', ))
        ]
    return logictree.RlzsAssoc(rlzs)
示例#7
0
def get_scenario_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the NRML files containing the GMFs
    :returns:
        a triple (sitecol, rupture_tags, gmf array)
    """
    if not oqparam.imtls:
        oqparam.set_risk_imtls(get_risk_models(oqparam))
    imts = list(oqparam.imtls)
    imt_dt = numpy.dtype([(imt, float) for imt in imts])
    gmfset = nrml.read(fname).gmfCollection.gmfSet
    tags, oqparam.sites = _extract_tags_sites(gmfset)
    oqparam.number_of_ground_motion_fields = num_events = len(tags)
    sitecol = get_site_collection(oqparam)
    num_sites = len(oqparam.sites)
    gmf_by_imt = numpy.zeros((num_events, num_sites), imt_dt)
    num_imts = len(imts)
    counts = collections.Counter()
    for i, gmf in enumerate(gmfset):
        if len(gmf) != num_sites:  # there must be one node per site
            raise InvalidFile('Expected %d sites, got %d in %s, line %d' % (
                num_sites, len(gmf), fname, gmf.lineno))
        counts[gmf['ruptureId']] += 1
        imt = gmf['IMT']
        if imt == 'SA':
            imt = 'SA(%s)' % gmf['saPeriod']
        for site_idx, lon, lat, node in zip(
                range(num_sites), sitecol.lons, sitecol.lats, gmf):
            if (node['lon'], node['lat']) != (lon, lat):
                raise InvalidFile('The site mesh is not ordered in %s, line %d'
                                  % (fname, node.lineno))
            try:
                gmf_by_imt[imt][i % num_events, site_idx] = node['gmv']
            except IndexError:
                raise InvalidFile('Something wrong in %s, line %d' %
                                  (fname, node.lineno))
    for tag, count in counts.items():
        if count < num_imts:
            raise InvalidFile('Found a missing tag %r in %s' %
                              (tag, fname))
        elif count > num_imts:
            raise InvalidFile('Found a duplicated tag %r in %s' %
                              (tag, fname))
    return sitecol, tags, gmf_by_imt.T
示例#8
0
def get_gmfs_from_csv(oqparam, sitecol, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param sitecol:
        a SiteCollection instance with sites consistent with the CSV file
    :param fname:
        the full path of the CSV file
    :returns:
        a composite array of shape (N, R) read from a CSV file with format
        `tag indices [gmv1 ... gmvN] * num_imts`
    """
    imts = list(oqparam.imtls)
    imt_dt = numpy.dtype([(imt, float) for imt in imts])
    num_gmfs = oqparam.number_of_ground_motion_fields
    gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt)
    tags = []
    with open(fname) as csvfile:
        for lineno, line in enumerate(csvfile, 1):
            row = line.split(',')
            try:
                indices = list(map(valid.positiveint, row[1].split()))
            except:
                raise InvalidFile(
                    'The second column in %s is expected to contain integer '
                    'indices, got %s instead' % (fname, row[1]))
            r_sites = (sitecol if not indices else site.FilteredSiteCollection(
                indices, sitecol))
            for i in range(len(imts)):
                try:
                    array = numpy.array(valid.positivefloats(row[i + 2]))
                    # NB: i + 2 because the first 2 fields are tag and indices
                except:
                    raise InvalidFile(
                        'The column #%d in %s is expected to contain positive '
                        'floats, got %s instead' % (i + 3, fname, row[i + 2]))
                gmf_by_imt[imts[i]][lineno - 1] = r_sites.expand(array, 0)
            tags.append(row[0])
    if lineno < num_gmfs:
        raise InvalidFile('%s contains %d rows, expected %d' %
                          (fname, lineno, num_gmfs))
    if tags != sorted(tags):
        raise InvalidFile('The tags in %s are not ordered: %s' % (fname, tags))
    return sitecol, numpy.array(tags, '|S100'), gmf_by_imt.T
示例#9
0
def get_vulnerability_functions_04(node, fname):
    """
    :param node:
        a vulnerabilityModel node
    :param fname:
        path to the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    logging.warn('Please upgrade %s to NRML 0.5', fname)
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    imts = set()
    taxonomies = set()
    # imt, taxonomy -> vulnerability function
    vmodel = scientific.VulnerabilityModel(**node.attrib)
    for vset in node:
        imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vmodel[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    return vmodel
示例#10
0
def read_composite_array(fname, sep=','):
    r"""
    Convert a CSV file with header into a numpy array of records.

    >>> from openquake.baselib.general import writetmp
    >>> fname = writetmp('PGA:float64:3,PGV:float64:2,avg:float64:1\n'
    ...                  '.1 .2 .3,.4 .5,.6\n')
    >>> print read_composite_array(fname)  # array of shape (1,)
    [([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])]
    """
    with open(fname) as f:
        header = next(f)
        fields, dtype = parse_header(header.split(sep))
        ts_pairs = []  # [(type, shape), ...]
        for name in fields:
            dt = dtype.fields[name][0]
            ts_pairs.append((dt.subdtype[0].type if dt.subdtype else dt.type,
                             dt.shape))
        col_ids = list(range(1, len(ts_pairs) + 1))
        num_columns = len(col_ids)
        records = []
        col, col_id = '', 0
        for i, line in enumerate(f, 2):
            row = line.split(sep)
            if len(row) != num_columns:
                raise InvalidFile(
                    'expected %d columns, found %d in file %s, line %d' %
                    (num_columns, len(row), fname, i))
            try:
                record = []
                for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids):
                    record.append(_cast(col, ntype, shape, i, fname))
                records.append(tuple(record))
            except Exception as e:
                raise InvalidFile(
                    'Could not cast %r in file %s, line %d, column %d '
                    'using %s: %s' % (col, fname, i, col_id,
                                      (ntype.__name__,) + shape, e))
        return numpy.array(records, dtype)
示例#11
0
def get_gmfs(oqparam):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        sitecol, tags, gmf array
    """
    fname = oqparam.inputs['gmfs']
    if fname.endswith('.txt'):
        return get_gmfs_from_txt(oqparam, fname)
    elif fname.endswith('.xml'):
        return get_scenario_from_nrml(oqparam, fname)
    else:
        raise InvalidFile(fname)
示例#12
0
def get_gmfs(oqparam, sitecol=None):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param sitecol:
        a SiteCollection instance with sites consistent with the data file
    :returns:
        sitecol, tags, gmf array
    """
    fname = oqparam.inputs['gmfs']
    if fname.endswith('.csv'):
        return get_gmfs_from_csv(oqparam, sitecol, fname)
    elif fname.endswith('.xml'):
        return get_scenario_from_nrml(oqparam, fname)
    else:
        raise InvalidFile(fname)
示例#13
0
def get_source_models(oqparam, gsim_lt, source_model_lt, in_memory=True):
    """
    Build all the source models generated by the logic tree.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param gsim_lt:
        a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
    :param source_model_lt:
        a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
    :param in_memory:
        if True, keep in memory the sources, else just collect the TRTs
    :returns:
        an iterator over :class:`openquake.commonlib.source.SourceModel`
        tuples
    """
    converter = sourceconverter.SourceConverter(
        oqparam.investigation_time, oqparam.rupture_mesh_spacing,
        oqparam.complex_fault_mesh_spacing, oqparam.width_of_mfd_bin,
        oqparam.area_source_discretization)
    parser = source.SourceModelParser(converter)

    # consider only the effective realizations
    rlzs = logictree.get_effective_rlzs(source_model_lt)
    samples_by_lt_path = source_model_lt.samples_by_lt_path()
    num_source_models = len(rlzs)
    for i, rlz in enumerate(rlzs):
        sm = rlz.value  # name of the source model
        smpath = rlz.lt_path
        num_samples = samples_by_lt_path[smpath]
        fname = possibly_gunzip(os.path.join(oqparam.base_path, sm))
        if in_memory:
            apply_unc = source_model_lt.make_apply_uncertainties(smpath)
            try:
                trt_models = parser.parse_trt_models(fname, apply_unc)
            except ValueError as e:
                if str(e) in ('Surface does not conform with Aki & '
                              'Richards convention',
                              'Edges points are not in the right order'):
                    raise InvalidFile('''\
    %s: %s. Probably you are using an obsolete model.
    In that case you can fix the file with the command
    python -m openquake.engine.tools.correct_complex_sources %s
    ''' % (fname, e, fname))
                else:
                    raise
        else:  # just collect the TRT models
            smodel = next(
                read_nodes(fname, lambda el: 'sourceModel' in el.tag,
                           source.nodefactory['sourceModel']))
            trt_models = source.TrtModel.collect(smodel)
        trts = [mod.trt for mod in trt_models]
        source_model_lt.tectonic_region_types.update(trts)

        gsim_file = oqparam.inputs.get('gsim_logic_tree')
        if gsim_file:  # check TRTs
            for trt_model in trt_models:
                if trt_model.trt not in gsim_lt.values:
                    raise ValueError(
                        "Found in %r a tectonic region type %r inconsistent "
                        "with the ones in %r" % (sm, trt_model.trt, gsim_file))
        else:
            gsim_lt = logictree.GsimLogicTree.from_(oqparam.gsim)
        weight = rlz.weight / num_samples
        num_gsim_paths = (num_samples if oqparam.number_of_logic_tree_samples
                          else gsim_lt.get_num_paths())
        logging.info('Processed source model %d/%d with %d gsim path(s)',
                     i + 1, num_source_models, num_gsim_paths)
        yield source.SourceModel(sm, weight, smpath, trt_models,
                                 num_gsim_paths, i, num_samples)

    # log if some source file is being used more than once
    for fname, hits in parser.fname_hits.items():
        if hits > 1:
            logging.info('%s has been considered %d times', fname, hits)
示例#14
0
def get_fragility_functions(fname,
                            continuous_fragility_discretization,
                            steps_per_interval=None):
    """
    :param fname:
        path of the fragility file
    :param continuous_fragility_discretization:
        continuous_fragility_discretization parameter
    :param steps_per_interval:
        steps_per_interval parameter
    :returns:
        damage_states list and dictionary taxonomy -> functions
    """
    [fmodel] = read_nodes(fname, lambda el: el.tag.endswith('fragilityModel'),
                          nodefactory['fragilityModel'])
    # ~fmodel.description is ignored
    limit_states = ~fmodel.limitStates
    tag = 'ffc' if fmodel['format'] == 'continuous' else 'ffd'
    fragility_functions = AccumDict()  # taxonomy -> functions
    for ffs in fmodel.getnodes('ffs'):
        add_zero_value = False
        # NB: the noDamageLimit is only defined for discrete fragility
        # functions. It is a way to set the starting point of the functions:
        # if noDamageLimit is at the left of each IMLs, it means that the
        # function starts at zero at the given point, so we need to add
        # noDamageLimit to the list of IMLs and zero to the list of poes
        nodamage = ffs.attrib.get('noDamageLimit')
        taxonomy = ~ffs.taxonomy
        imt_str, imls, min_iml, max_iml, imlUnit = ~ffs.IML

        if fmodel['format'] == 'discrete':
            if nodamage is not None and nodamage < imls[0]:
                # discrete fragility
                imls = [nodamage] + imls
                add_zero_value = True
            if steps_per_interval:
                gen_imls = scientific.fine_graining(imls, steps_per_interval)
            else:
                gen_imls = imls
        else:  # continuous:
            if min_iml is None:
                raise InvalidFile('Missing attribute minIML, line %d' %
                                  ffs.IML.lineno)
            elif max_iml is None:
                raise InvalidFile('Missing attribute maxIML, line %d' %
                                  ffs.IML.lineno)
            gen_imls = numpy.linspace(min_iml, max_iml,
                                      continuous_fragility_discretization)
        fragility_functions[taxonomy] = scientific.FragilityFunctionList(
            [],
            imt=imt_str,
            imls=list(gen_imls),
            no_damage_limit=nodamage,
            continuous_fragility_discretization=
            continuous_fragility_discretization,
            steps_per_interval=steps_per_interval)
        lstates = []
        for ff in ffs.getnodes(tag):
            ls = ff['ls']  # limit state
            lstates.append(ls)
            if tag == 'ffc':
                with context(fname, ff):
                    mean_stddev = ~ff.params
                fragility_functions[taxonomy].append(
                    scientific.FragilityFunctionContinuous(ls, *mean_stddev))
            else:  # discrete
                with context(fname, ff):
                    poes = ~ff.poEs
                if add_zero_value:
                    poes = [0.] + poes

                fragility_functions[taxonomy].append(
                    scientific.FragilityFunctionDiscrete(
                        ls, imls, poes, nodamage))

        if lstates != limit_states:
            raise InvalidFile("Expected limit states %s, got %s in %s" %
                              (limit_states, lstates, fname))

    fragility_functions.damage_states = ['no_damage'] + limit_states
    return fragility_functions
示例#15
0
def get_vulnerability_functions(fname):
    """
    :param fname:
        path of the vulnerability filter
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    node = nrml.read(fname)
    if node['xmlns'] == nrml.NRML05:
        vmodel = node[0]
        for vfun in vmodel.getnodes('vulnerabilityFunction'):
            with context(fname, vfun):
                imt = vfun.imls['imt']
                imls = numpy.array(~vfun.imls)
                taxonomy = vfun['id']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            if vfun['dist'] == 'PM':
                loss_ratios, probs = [], []
                for probabilities in vfun[1:]:
                    loss_ratios.append(probabilities['lr'])
                    probs.append(valid.probabilities(~probabilities))
                probs = numpy.array(probs)
                assert probs.shape == (len(loss_ratios), len(imls))
                vf_dict[imt,
                        taxonomy] = (scientific.VulnerabilityFunctionWithPMF(
                            taxonomy, imt, imls, numpy.array(loss_ratios),
                            probs))
            else:
                with context(fname, vfun):
                    loss_ratios = ~vfun.meanLRs
                    coefficients = ~vfun.covLRs
                if len(loss_ratios) != len(imls):
                    raise InvalidFile(
                        'There are %d loss ratios, but %d imls: %s, line %d' %
                        (len(loss_ratios), len(imls), fname,
                         vfun.meanLRs.lineno))
                if len(coefficients) != len(imls):
                    raise InvalidFile(
                        'There are %d coefficients, but %d imls: %s, '
                        'line %d' % (len(coefficients), len(imls), fname,
                                     vfun.covLRs.lineno))
                with context(fname, vfun):
                    vf_dict[imt, taxonomy] = scientific.VulnerabilityFunction(
                        taxonomy, imt, imls, loss_ratios, coefficients,
                        vfun['dist'])
        return vf_dict
    # otherwise, read the old format (NRML 0.4)
    for vset in read_nodes(fname, filter_vset,
                           nodefactory['vulnerabilityModel']):
        imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    return vf_dict