Example #1
0
 def __fromh5__(self, array, attrs):
     dd = collections.defaultdict(dict)
     for (name_, literal_) in array:
         name = decode(name_)
         literal = decode(literal_)
         if '.' in name:
             k1, k2 = name.split('.', 1)
             dd[k1][k2] = ast.literal_eval(literal)
         else:
             dd[name] = ast.literal_eval(literal)
     vars(self).update(dd)
Example #2
0
def get_serial(etag):
    """
    >>> print(get_serial("grp=00~ses=0007~src=1-3~rup=018-01"))
    18
    """
    try:
        trt, ses, src, rup = decode(etag).split('~')
    except ValueError:
        trt, ses, src, rup, sample = decode(etag).split('~')
    serial = rup.split('=')[1].split('-')[0]
    return int(serial)
Example #3
0
 def __fromh5__(self, dic, attrs):
     for name in ('loss_types', 'deduc', 'i_lim', 'retro'):
         setattr(self, name, [decode(x) for x in attrs[name].split()])
     self.occupancy_periods = attrs['occupancy_periods']
     self.time_event = attrs['time_event']
     self.tot_sites = attrs['tot_sites']
     self.nbytes = attrs['nbytes']
     self.array = dic['array'].value
     self.tagcol = dic['tagcol']
     self.cost_calculator = dic['cost_calculator']
     self.asset_refs = dic['asset_refs'].value
     self.cost_calculator.tagi = {
         decode(tagname): i for i, tagname in enumerate(self.tagnames)}
Example #4
0
def check_obsolete_version(calculation_mode='WebUI'):
    """
    Check if there is a newer version of the engine.

    :param calculation_mode:
         - the calculation mode when called from the engine
         - an empty string when called from the WebUI
    :returns:
        - a message if the running version of the engine is obsolete
        - the empty string if the engine is updated
        - None if the check could not be performed (i.e. github is down)
    """
    if os.environ.get('JENKINS_URL') or os.environ.get('TRAVIS'):
        # avoid flooding our API server with requests from CI systems
        return

    headers = {'User-Agent': 'OpenQuake Engine %s;%s;%s;%s' %
               (__version__, calculation_mode, platform.platform(),
                config.distribution.oq_distribute)}
    try:
        req = Request(OQ_API + '/engine/latest', headers=headers)
        # NB: a timeout < 1 does not work
        data = urlopen(req, timeout=1).read()  # bytes
        tag_name = json.loads(decode(data))['tag_name']
        current = version_triple(__version__)
        latest = version_triple(tag_name)
    except Exception:  # page not available or wrong version tag
        return
    if current < latest:
        return ('Version %s of the engine is available, but you are '
                'still using version %s' % (tag_name, __version__))
    else:
        return ''
Example #5
0
def git_suffix(fname):
    """
    :returns: `<short git hash>` if Git repository found
    """
    # we assume that the .git folder is two levels above any package
    # i.e. openquake/engine/../../.git
    git_path = os.path.join(os.path.dirname(fname), '..', '..', '.git')

    # macOS complains if we try to execute git and it's not available.
    # Code will run, but a pop-up offering to install bloatware (Xcode)
    # is raised. This is annoying in end-users installations, so we check
    # if .git exists before trying to execute the git executable
    if os.path.isdir(git_path):
        try:
            gh = subprocess.check_output(
                ['git', 'rev-parse', '--short', 'HEAD'],
                stderr=open(os.devnull, 'w'),
                cwd=os.path.dirname(git_path)).strip()
            gh = "-git" + decode(gh) if gh else ''
            return gh
        except:
            # trapping everything on purpose; git may not be installed or it
            # may not work properly
            pass

    return ''
Example #6
0
    def __init__(self, amplification_group, magnitudes, distances):
        """
        Setup the amplification factors.

        :param amplification_group:
            Amplification model as instance of :class:`h5py.Group`
        :param magnitudes:
            Array of magnitudes
        :param distances:
            Array of distances
        """
        self.shape = None
        self.periods = None
        self.mean = None
        self.sigma = None
        self.magnitudes = magnitudes
        self.distances = distances
        self.parameter = decode(amplification_group.attrs["apply_to"])
        self.values = numpy.array([float(key) for key in amplification_group])
        self.argidx = numpy.argsort(self.values)
        self.values = self.values[self.argidx]
        if self.parameter in RuptureContext._slots_:
            self.element = "Rupture"
        elif self.parameter in site.site_param_dt:
            self.element = "Sites"
        else:
            raise ValueError("Amplification parameter %s not recognised!"
                             % self.parameter)
        self._build_data(amplification_group)
Example #7
0
 def _set_attrib(self, node, n, tn, v):
     val = self.validators[tn]
     try:
         node.attrib[n] = val(decode(v))
     except Exception as exc:
         raise ValueError(
             'Could not convert %s->%s: %s, line %s' %
             (tn, val.__name__, exc, node.lineno))
Example #8
0
def _display(node, indent, expandattrs, expandvals, output):
    """Core function to display a Node object"""
    attrs = _displayattrs(node.attrib, expandattrs)
    val = ' %s' % repr(node.text) \
        if expandvals and node.text is not None else ''
    output.write(decode((indent + striptag(node.tag) + attrs + val + '\n')))
    for sub_node in node:
        _display(sub_node, indent + '  ', expandattrs, expandvals, output)
Example #9
0
def view_job_info(token, dstore):
    """
    Determine the amount of data transferred from the controller node
    to the workers and back in a classical calculation.
    """
    job_info = h5py.File.__getitem__(dstore.hdf5, 'job_info')
    rows = [(k, ast.literal_eval(decode(v))) for k, v in job_info]
    return rst_table(rows)
Example #10
0
def _humansize(literal):
    dic = ast.literal_eval(decode(literal))
    if isinstance(dic, dict):
        items = sorted(dic.items(), key=operator.itemgetter(1), reverse=True)
        lst = ['%s %s' % (k, humansize(v)) for k, v in items]
        return ', '.join(lst)
    else:
        return str(dic)
Example #11
0
def get_serial(etag):
    """
    >>> print(get_serial("trt=00~ses=0007~src=1-3~rup=018-01"))
    018
    """
    trt, ses, src, rup = decode(etag).split('~')
    serial = rup.split('=')[1].split('-')[0]
    return serial
Example #12
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     info = dstore['job_info']
     dpath = dstore.hdf5path
     mtime = os.path.getmtime(dpath)
     host = '%s:%s' % (info.hostname, decode(dpath))
     updated = str(time.ctime(mtime))
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table([[host, updated]] + versions)
     # NB: in the future, the sitecol could be transferred as
     # an array by leveraging the HDF5 serialization protocol;
     # for the moment however the size of the
     # data to transfer is given by the usual pickle
     sitecol_size = humansize(len(parallel.Pickled(dstore['sitecol'])))
     self.text += '\n\nnum_sites = %d, sitecol = %s' % (
         len(dstore['sitecol']), sitecol_size)
Example #13
0
    def to_str(self, expandattrs=True, expandvals=True):
        """
        Convert the node into a string, intended for testing/debugging purposes

        :param expandattrs:
          print the values of the attributes if True, else print only the names
        :param expandvals:
          print the values if True, else print only the tag names
        """
        out = io.BytesIO()
        node_display(self, expandattrs, expandvals, out)
        return decode(out.getvalue())
Example #14
0
 def _set_text(self, node, text, tag):
     if text is None:
         return
     try:
         val = self.validators[tag]
     except KeyError:
         return
     try:
         node.text = val(decode(text.strip()))
     except Exception as exc:
         raise ValueError('Could not convert %s->%s: %s' %
                          (tag, val.__name__, exc))
Example #15
0
 def __init__(self, dstore):
     self.dstore = dstore
     try:
         self.builder = get_loss_builder(dstore)
     except KeyError:  # no 'events' for non event_based_risk
         pass
     self.assetcol = dstore['assetcol']
     arefs = [decode(aref) for aref in self.assetcol.asset_refs]
     self.str2asset = dict(zip(arefs, self.assetcol))
     self.asset_refs = arefs
     self.loss_types = dstore.get_attr('risk_model', 'loss_types')
     self.R = dstore['csm_info'].get_num_rlzs()
Example #16
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     try:
         num_rlzs = dstore['csm_info'].get_num_rlzs()
     except KeyError:
         num_rlzs = '?'
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table(versions)
     self.text += '\n\nnum_sites = %d, num_levels = %d, num_rlzs = %s' % (
         len(dstore['sitecol']), len(oq.imtls.array), num_rlzs)
Example #17
0
 def __repr__(self):
     info_by_model = collections.OrderedDict()
     for sm in self.source_models:
         info_by_model[sm.path] = (
             '_'.join(map(decode, sm.path)),
             decode(sm.name),
             [sg.id for sg in sm.src_groups],
             sm.weight,
             self.get_num_rlzs(sm))
     summary = ['%s, %s, grp=%s, weight=%s: %d realization(s)' % ibm
                for ibm in info_by_model.values()]
     return '<%s\n%s>' % (
         self.__class__.__name__, '\n'.join(summary))
Example #18
0
 def __toh5__(self):
     # NB: the loss types do not contain spaces, so we can store them
     # together as a single space-separated string
     op = decode(self.occupancy_periods)
     attrs = {'time_event': self.time_event or 'None',
              'occupancy_periods': op,
              'loss_types': ' '.join(self.loss_types),
              'tot_sites': self.tot_sites,
              'fields': ' '.join(self.fields),
              'tagnames': encode(self.tagnames),
              'nbytes': self.array.nbytes}
     return dict(
         array=self.array, tagcol=self.tagcol), attrs
Example #19
0
 def __fromh5__(self, dic, attrs):
     sg_data = group_array(dic['sg_data'], 'sm_id')
     sm_data = dic['sm_data']
     vars(self).update(attrs)
     self.gsim_fname = decode(self.gsim_fname)
     if self.gsim_fname.endswith('.xml'):
         self.gsim_lt = logictree.GsimLogicTree(
             self.gsim_fname, sorted(self.trts))
     else:  # fake file with the name of the GSIM
         self.gsim_lt = logictree.GsimLogicTree.from_(self.gsim_fname)
     self.source_models = []
     for sm_id, rec in enumerate(sm_data):
         tdata = sg_data[sm_id]
         srcgroups = [
             sourceconverter.SourceGroup(
                 self.trts[trti], id=grp_id, eff_ruptures=effrup)
             for grp_id, trti, effrup, sm_id in tdata if effrup]
         path = tuple(str(decode(rec['path'])).split('_'))
         trts = set(sg.trt for sg in srcgroups)
         num_gsim_paths = self.gsim_lt.reduce(trts).get_num_paths()
         sm = SourceModel(rec['name'], rec['weight'], path, srcgroups,
                          num_gsim_paths, sm_id, rec['samples'])
         self.source_models.append(sm)
Example #20
0
def git_suffix(fname):
    """
    :returns: `<short git hash>` if Git repository found
    """
    try:
        gh = subprocess.check_output(
            ['git', 'rev-parse', '--short', 'HEAD'],
            stderr=open(os.devnull, 'w'), cwd=os.path.dirname(fname)).strip()
        gh = "-git" + decode(gh) if gh else ''
        return gh
    except:
        # trapping everything on purpose; git may not be installed or it
        # may not work properly
        return ''
Example #21
0
 def __getitem__(self, path):
     h5obj = super(File, self).__getitem__(path)
     h5attrs = h5obj.attrs
     if '__pyclass__' in h5attrs:
         # NB: the `decode` below is needed for Python 3
         modname, clsname = decode(h5attrs['__pyclass__']).rsplit('.', 1)
         cls = getattr(importlib.import_module(modname), clsname)
         obj = cls.__new__(cls)
         if not hasattr(h5obj, 'shape'):  # is group
             h5obj = {unquote_plus(k): self['%s/%s' % (path, k)]
                      for k, v in h5obj.items()}
         obj.__fromh5__(h5obj, h5attrs)
         return obj
     else:
         return h5obj
Example #22
0
 def __toh5__(self):
     # NB: the loss types do not contain spaces, so we can store them
     # together as a single space-separated string
     op = decode(self.occupancy_periods)
     attrs = {'time_event': self.time_event or 'None',
              'occupancy_periods': op,
              'loss_types': ' '.join(self.loss_types),
              'deduc': ' '.join(self.deduc),
              'i_lim': ' '.join(self.i_lim),
              'retro': ' '.join(self.retro),
              'tot_sites': self.tot_sites,
              'tagnames': encode(self.tagnames),
              'nbytes': self.array.nbytes}
     return dict(
         array=self.array, cost_calculator=self.cost_calculator,
         tagcol=self.tagcol, asset_refs=self.asset_refs), attrs
Example #23
0
 def __getitem__(self, path):
     h5obj = super(File, self).__getitem__(path)
     h5attrs = h5obj.attrs
     if '__pyclass__' in h5attrs:
         # NB: the `decode` below is needed for Python 3
         cls = dotname2cls(decode(h5attrs['__pyclass__']))
         obj = cls.__new__(cls)
         if hasattr(h5obj, 'items'):  # is group
             h5obj = {unquote_plus(k): self['%s/%s' % (path, k)]
                      for k, v in h5obj.items()}
         elif hasattr(h5obj, 'value'):
             h5obj = h5obj.value
         obj.__fromh5__(h5obj, h5attrs)
         return obj
     else:
         return h5obj
Example #24
0
 def to_array(self, ebruptures):
     data = []
     for ebr in ebruptures:
         rup = ebr.rupture
         rc = self.cmaker.make_rupture_context(rup)
         ruptparams = tuple(getattr(rc, param) for param in self.params)
         point = rup.surface.get_middle_point()
         multi_lons, multi_lats = rup.surface.get_surface_boundaries()
         boundary = ','.join('((%s))' % ','.join(
             '%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats))
                             for lons, lats in zip(multi_lons, multi_lats))
         try:
             rate = ebr.rupture.occurrence_rate
         except AttributeError:  # for nonparametric sources
             rate = numpy.nan
         data.append((ebr.serial, ebr.multiplicity, len(ebr.sids),
                      rate, rup.mag, point.x, point.y, point.z,
                      rup.surface.get_strike(), rup.surface.get_dip(),
                      rup.rake, decode(boundary)) + ruptparams)
     return numpy.array(data, self.dt)
Example #25
0
def form(value):
    """
    Format numbers in a nice way.

    >>> form(0)
    '0'
    >>> form(0.0)
    '0.0'
    >>> form(0.0001)
    '1.000E-04'
    >>> form(1003.4)
    '1,003'
    >>> form(103.4)
    '103'
    >>> form(9.3)
    '9.30000'
    >>> form(-1.2)
    '-1.2'
    """
    if isinstance(value, FLOAT + INT):
        if value <= 0:
            return str(value)
        elif value < .001:
            return '%.3E' % value
        elif value < 10 and isinstance(value, FLOAT):
            return '%.5f' % value
        elif value > 1000:
            return '{:,d}'.format(int(round(value)))
        elif numpy.isnan(value):
            return 'NaN'
        else:  # in the range 10-1000
            return str(int(value))
    elif isinstance(value, bytes):
        return decode(value)
    elif isinstance(value, str):
        return value
    elif isinstance(value, numpy.object_):
        return str(value)
    elif hasattr(value, '__len__') and len(value) > 1:
        return ' '.join(map(form, value))
    return str(value)
Example #26
0
def get_params(job_inis, **kw):
    """
    Parse one or more INI-style config files.

    :param job_inis:
        List of configuration files (or list containing a single zip archive)
    :param kw:
        Optionally override some parameters
    :returns:
        A dictionary of parameters
    """
    input_zip = None
    if len(job_inis) == 1 and job_inis[0].endswith('.zip'):
        input_zip = job_inis[0]
        job_inis = extract_from_zip(
            job_inis[0], ['job_hazard.ini', 'job_haz.ini',
                          'job.ini', 'job_risk.ini'])

    not_found = [ini for ini in job_inis if not os.path.exists(ini)]
    if not_found:  # something was not found
        raise IOError('File not found: %s' % not_found[0])

    cp = configparser.ConfigParser()
    cp.read(job_inis)

    # directory containing the config files we're parsing
    job_ini = os.path.abspath(job_inis[0])
    base_path = decode(os.path.dirname(job_ini))
    params = dict(base_path=base_path, inputs={'job_ini': job_ini})
    if input_zip:
        params['inputs']['input_zip'] = os.path.abspath(input_zip)

    for sect in cp.sections():
        _update(params, cp.items(sect), base_path)
    _update(params, kw.items(), base_path)  # override on demand

    if params['inputs'].get('reqv'):
        # using pointsource_distance=0 because of the reqv approximation
        params['pointsource_distance'] = '0'
    return params
Example #27
0
 def __fromh5__(self, dic, attrs):
     # TODO: this is called more times than needed, maybe we should cache it
     sg_data = group_array(dic['sg_data'], 'sm_id')
     sm_data = dic['sm_data']
     vars(self).update(attrs)
     self.gsim_lt = dic['gsim_lt']
     self.source_models = []
     for sm_id, rec in enumerate(sm_data):
         tdata = sg_data[sm_id]
         srcgroups = [
             sourceconverter.SourceGroup(
                 self.trts[data['trti']], id=data['grp_id'],
                 name=get_field(data, 'name', ''),
                 eff_ruptures=data['effrup'],
                 tot_ruptures=get_field(data, 'totrup', 0))
             for data in tdata]
         path = tuple(str(decode(rec['path'])).split('_'))
         sm = logictree.LtSourceModel(
             rec['name'], rec['weight'], path, srcgroups,
             rec['num_rlzs'], sm_id, rec['samples'])
         self.source_models.append(sm)
     self.init()
Example #28
0
 def init(self, fle=None):
     """
     Executes the preprocessing steps at the instantiation stage to read in
     the tables from hdf5 and hold them in memory.
     """
     if fle is None:
         fname = self.kwargs.get('gmpe_table', self.GMPE_TABLE)
         if fname is None:
             raise ValueError('You forgot to set GMPETable.GMPE_TABLE!')
         elif os.path.isabs(fname):
             self.GMPE_TABLE = fname
         else:
             # NB: (hackish) GMPE_DIR must be set externally
             self.GMPE_TABLE = os.path.abspath(
                 os.path.join(self.GMPE_DIR, fname))
         fle = h5py.File(self.GMPE_TABLE, "r")
     try:
         # this is the format inside the datastore
         self.distance_type = fle["distance_type"][()]
     except KeyError:
         # this is the original format outside the datastore
         self.distance_type = decode(fle["Distances"].attrs["metric"])
     self.REQUIRES_DISTANCES = set([self.distance_type])
     # Load in magnitude
     self.m_w = fle["Mw"][:]
     # Load in distances
     self.distances = fle["Distances"][:]
     # Load intensity measure types and levels
     self.imls = hdf_arrays_to_dict(fle["IMLs"])
     self.DEFINED_FOR_INTENSITY_MEASURE_TYPES = set(self._supported_imts())
     if "SA" in self.imls and "T" not in self.imls:
         raise ValueError("Spectral Acceleration must be accompanied by "
                          "periods")
     # Get the standard deviations
     self._setup_standard_deviations(fle)
     if "Amplification" in fle:
         self._setup_amplification(fle)
Example #29
0
def extract_agg_losses(dstore, what):
    """
    Aggregate losses of the given loss type and tags. Use it as
    /extract/agg_losses/structural?taxonomy=RC&zipcode=20126
    /extract/agg_losses/structural?taxonomy=RC&zipcode=*

    :returns:
        an array of shape (T, R) if one of the tag names has a `*` value
        an array of shape (R,), being R the number of realizations
        an array of length 0 if there is no data for the given tags
    """
    loss_type, tags = get_loss_type_tags(what)
    if not loss_type:
        raise ValueError('loss_type not passed in agg_losses/<loss_type>')
    L = dstore['oqparam'].lti[loss_type]
    if 'avg_losses-stats' in dstore:
        stats = decode(dstore['avg_losses-stats'].attrs['stat'])
        losses = dstore['avg_losses-stats'][:, :, L]
    elif 'avg_losses-rlzs' in dstore:
        stats = ['mean']
        losses = dstore['avg_losses-rlzs'][:, :, L]
    else:
        raise KeyError('No losses found in %s' % dstore)
    return _filter_agg(dstore['assetcol'], losses, tags, stats)
Example #30
0
def export_damage(ekey, dstore):
    loss_types = dstore.get_attr('composite_risk_model', 'loss_types')
    damage_states = dstore.get_attr('composite_risk_model', 'damage_states')
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    dmg_by_asset = dstore['dmg_by_asset']  # shape (N, L, R)
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    dmg_states = [DmgState(s, i) for i, s in enumerate(damage_states)]
    D = len(dmg_states)
    N, R = dmg_by_asset.shape
    L = len(loss_types)
    fnames = []

    for l, r in itertools.product(range(L), range(R)):
        lt = loss_types[l]
        rlz = rlzs[r]
        dd_asset = []
        for n, ass in enumerate(assetcol):
            assref = decode(aref[ass['idx']])
            dist = dmg_by_asset[n, r][lt]
            site = Site(ass['lon'], ass['lat'])
            for ds in range(D):
                dd_asset.append(
                    DmgDistPerAsset(ExposureData(assref, site), dmg_states[ds],
                                    dist['mean'][ds], dist['stddev'][ds]))

        f1 = export_dmg_xml(('dmg_dist_per_asset', 'xml'), dstore, dmg_states,
                            dd_asset, lt, rlz)
        max_damage = dmg_states[-1]
        # the collapse map is extracted from the damage distribution per asset
        # (dda) by taking the value corresponding to the maximum damage
        collapse_map = [dda for dda in dd_asset if dda.dmg_state == max_damage]
        f2 = export_dmg_xml(('collapse_map', 'xml'), dstore, dmg_states,
                            collapse_map, lt, rlz)
        fnames.extend(sum((f1 + f2).values(), []))
    return sorted(fnames)
Example #31
0
 def to_array(self, ebruptures):
     """
     Convert a list of ebruptures into an array of dtype RuptureRata.dt
     """
     data = []
     for ebr in ebruptures:
         rup = ebr.rupture
         self.cmaker.add_rup_params(rup)
         ruptparams = tuple(getattr(rup, param) for param in self.params)
         point = rup.surface.get_middle_point()
         multi_lons, multi_lats = rup.surface.get_surface_boundaries()
         bounds = ','.join('((%s))' % ','.join(
             '%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats))
                           for lons, lats in zip(multi_lons, multi_lats))
         try:
             rate = ebr.rupture.occurrence_rate
         except AttributeError:  # for nonparametric sources
             rate = numpy.nan
         data.append(
             (ebr.serial, ebr.srcidx, ebr.n_occ, rate,
              rup.mag, point.x, point.y, point.z, rup.surface.get_strike(),
              rup.surface.get_dip(), rup.rake,
              'MULTIPOLYGON(%s)' % decode(bounds)) + ruptparams)
     return numpy.array(data, self.dt)
Example #32
0
 def to_array(self, ebruptures):
     """
     Convert a list of ebruptures into an array of dtype RuptureRata.dt
     """
     data = []
     for ebr in ebruptures:
         rup = ebr.rupture
         self.cmaker.add_rup_params(rup)
         ruptparams = tuple(getattr(rup, param) for param in self.params)
         point = rup.surface.get_middle_point()
         multi_lons, multi_lats = rup.surface.get_surface_boundaries()
         bounds = ','.join('((%s))' % ','.join(
             '%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats))
                           for lons, lats in zip(multi_lons, multi_lats))
         try:
             rate = ebr.rupture.occurrence_rate
         except AttributeError:  # for nonparametric sources
             rate = numpy.nan
         data.append(
             (ebr.serial, ebr.multiplicity, ebr.eidx1, rate,
              rup.mag, point.x, point.y, point.z, rup.surface.get_strike(),
              rup.surface.get_dip(), rup.rake,
              'MULTIPOLYGON(%s)' % decode(bounds)) + ruptparams)
     return numpy.array(data, self.dt)
Example #33
0
 def __getitem__(self, aid):
     a = self.array[aid]
     values = {
         lt: a['value-' + lt]
         for lt in self.loss_types if lt != 'occupants'
     }
     if 'occupants' in self.array.dtype.names:
         values['occupants_' + str(self.time_event)] = a['occupants']
     return Asset(
         a['idx'],
         [a[decode(name)] for name in self.tagnames],
         number=a['number'],
         location=(
             valid.longitude(a['lon']),  # round coordinates
             valid.latitude(a['lat'])),
         values=values,
         area=a['area'],
         deductibles={lt[self.D:]: a[lt]
                      for lt in self.deduc},
         insurance_limits={lt[self.I:]: a[lt]
                           for lt in self.i_lim},
         retrofitted=a['retrofitted'] if self.retro else None,
         calc=self.cost_calculator,
         ordinal=aid)
Example #34
0
def export_loss_maps_stats_xml_geojson(ekey, dstore):
    loss_maps = get_loss_maps(dstore, 'stats')
    N, S = loss_maps.shape
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    fnames = []
    export_type = ekey[1]
    writercls = (risk_writers.LossMapGeoJSONWriter if export_type == 'geojson'
                 else risk_writers.LossMapXMLWriter)
    for writer, (ltype, poe, s,
                 insflag) in _gen_writers(dstore, writercls, ekey[0]):
        ins = '_ins' if insflag else ''
        if ltype not in loss_maps.dtype.names:
            continue
        array = loss_maps[ltype + ins][:, s]
        curves = []
        poe_str = 'poe-%s' % poe
        for ass, val in zip(assetcol, array[poe_str]):
            loc = Location(ass['lon'], ass['lat'])
            curve = LossMap(loc, decode(aref[ass['idx']]), val, None)
            curves.append(curve)
        writer.serialize(curves)
        fnames.append(writer._dest)
    return sorted(fnames)
Example #35
0
def view_task_hazard(token, dstore):
    """
    Display info about a given task. Here are a few examples of usage::

     $ oq show task_hazard:0  # the fastest task
     $ oq show task_hazard:-1  # the slowest task
    """
    tasks = set(dstore['task_info'])
    if 'source_data' not in dstore:
        return 'Missing source_data'
    if 'classical_split_filter' in tasks:
        data = dstore['task_info/classical_split_filter'][()]
    else:
        data = dstore['task_info/compute_gmfs'][()]
    data.sort(order='duration')
    rec = data[int(token.split(':')[1])]
    taskno = rec['taskno']
    arr = get_array(dstore['source_data'][()], taskno=taskno)
    st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
    sources = dstore['task_sources'][taskno - 1].split()
    srcs = set(decode(s).split(':', 1)[0] for s in sources)
    res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
        taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
    return res + rst_table(st, header='variable mean stddev min max n'.split())
Example #36
0
def export_bcr_map(ekey, dstore):
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    bcr_data = dstore['bcr-rlzs']
    N, R = bcr_data.shape
    realizations = dstore['csm_info'].get_rlzs_assoc().realizations
    loss_types = dstore.get_attr('composite_risk_model', 'loss_types')
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    for rlz in realizations:
        for l, loss_type in enumerate(loss_types):
            rlz_data = bcr_data[loss_type][:, rlz.ordinal]
            path = dstore.build_fname('bcr-%s' % loss_type, rlz, 'csv')
            data = [[
                'lon', 'lat', 'asset_ref', 'average_annual_loss_original',
                'average_annual_loss_retrofitted', 'bcr'
            ]]
            for ass, value in zip(assetcol, rlz_data):
                data.append((ass['lon'], ass['lat'], decode(aref[ass['idx']]),
                             value['annual_loss_orig'],
                             value['annual_loss_retro'], value['bcr']))
            writer.save(data, path)
            fnames.append(path)
    return writer.getsaved()
Example #37
0
def view_task_hazard(token, dstore):
    """
    Display info about a given task. Here are a few examples of usage::

     $ oq show task_hazard:0  # the fastest task
     $ oq show task_hazard:-1  # the slowest task
    """
    tasks = set(dstore['task_info'])
    if 'source_data' not in dstore:
        return 'Missing source_data'
    if 'classical_split_filter' in tasks:
        data = dstore['task_info/classical_split_filter'][()]
    else:
        data = dstore['task_info/compute_gmfs'][()]
    data.sort(order='duration')
    rec = data[int(token.split(':')[1])]
    taskno = rec['taskno']
    arr = get_array(dstore['source_data'][()], taskno=taskno)
    st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
    sources = dstore['task_sources'][taskno - 1].split()
    srcs = set(decode(s).split(':', 1)[0] for s in sources)
    res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
        taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
    return res + rst_table(st, header='variable mean stddev min max n'.split())
Example #38
0
 def __fromh5__(self, dic, attrs):
     # TODO: this is called more times than needed, maybe we should cache it
     sg_data = group_array(dic['sg_data'], 'sm_id')
     sm_data = dic['sm_data']
     vars(self).update(attrs)
     self.gsim_lt = dic['gsim_lt']
     self.source_models = []
     for sm_id, rec in enumerate(sm_data):
         tdata = sg_data[sm_id]
         srcgroups = [
             sourceconverter.SourceGroup(self.trts[data['trti']],
                                         id=data['grp_id'],
                                         name=get_field(data, 'name', ''),
                                         eff_ruptures=data['effrup'],
                                         tot_ruptures=get_field(
                                             data, 'totrup', 0))
             for data in tdata
         ]
         path = tuple(str(decode(rec['path'])).split('_'))
         sm = logictree.LtSourceModel(rec['name'], rec['weight'], path,
                                      srcgroups, rec['num_rlzs'], sm_id,
                                      rec['samples'])
         self.source_models.append(sm)
     self.init()
Example #39
0
def check_obsolete_version(calculation_mode='WebUI'):
    """
    Check if there is a newer version of the engine.

    :param calculation_mode:
         - the calculation mode when called from the engine
         - an empty string when called from the WebUI
    :returns:
        - a message if the running version of the engine is obsolete
        - the empty string if the engine is updated
        - None if the check could not be performed (i.e. github is down)
    """
    if os.environ.get('JENKINS_URL') or os.environ.get('TRAVIS'):
        # avoid flooding our API server with requests from CI systems
        return

    headers = {
        'User-Agent':
        'OpenQuake Engine %s;%s;%s;%s' %
        (__version__, calculation_mode, platform.platform(),
         config.distribution.oq_distribute)
    }
    try:
        req = Request(OQ_API + '/engine/latest', headers=headers)
        # NB: a timeout < 1 does not work
        data = urlopen(req, timeout=1).read()  # bytes
        tag_name = json.loads(decode(data))['tag_name']
        current = version_triple(__version__)
        latest = version_triple(tag_name)
    except Exception:  # page not available or wrong version tag
        return
    if current < latest:
        return ('Version %s of the engine is available, but you are '
                'still using version %s' % (tag_name, __version__))
    else:
        return ''
Example #40
0
 def __fromh5__(self, array, attrs):
     vars(self).update(attrs)
     self.cost_types = dict(zip(self.loss_types, array['cost_type']))
     self.area_types = dict(zip(self.loss_types, array['area_type']))
     self.units = dict(zip(self.loss_types, decode(array['unit'])))
Example #41
0
 def __fromh5__(self, dic, attrs):
     self.tagnames = [decode(name) for name in attrs['tagnames']]
     for tagname in dic:
         setattr(self, tagname + '_idx',
                 {tag: idx for idx, tag in enumerate(dic[tagname])})
         setattr(self, tagname, dic[tagname].value)
Example #42
0
def _get(dstore, name):
    try:
        dset = dstore[name + '-stats']
        return dset, decode(dset.attrs['stat'])
    except KeyError:  # single realization
        return dstore[name + '-rlzs'], ['mean']
Example #43
0
def capitalize(words):
    """
    Capitalize words separated by spaces.
    """
    return ' '.join(w.capitalize() for w in decode(words).split(' '))
Example #44
0
def export_disagg_csv(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    hmap4 = dstore['hmap4']
    rlzs = dstore['full_lt'].get_realizations()
    best_rlzs = dstore['best_rlzs'][:]
    N, M, P, Z = hmap4.shape
    imts = list(oq.imtls)
    fnames = []
    bins = {name: dset[:] for name, dset in dstore['disagg-bins'].items()}
    ex = 'disagg?kind=%s&imt=%s&site_id=%s&poe_id=%d'
    if ekey[0] == 'disagg_traditional':
        ex += '&traditional=1'
        trad = '-traditional'
    else:
        trad = ''
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    metadata = dstore.metadata
    poes_disagg = ['nan'] * P
    for p in range(P):
        try:
            poes_disagg[p] = str(oq.poes_disagg[p])
        except IndexError:
            pass
    for s in range(N):
        rlzcols = ['rlz%d' % r for r in best_rlzs[s]]
        lon, lat = sitecol.lons[s], sitecol.lats[s]
        weights = numpy.array([rlzs[r].weight['weight'] for r in best_rlzs[s]])
        weights /= weights.sum()  # normalize to 1
        metadata.update(investigation_time=oq.investigation_time,
                        mag_bin_edges=bins['Mag'].tolist(),
                        dist_bin_edges=bins['Dist'].tolist(),
                        lon_bin_edges=bins['Lon'][s].tolist(),
                        lat_bin_edges=bins['Lat'][s].tolist(),
                        eps_bin_edges=bins['Eps'].tolist(),
                        tectonic_region_types=decode(bins['TRT'].tolist()),
                        rlz_ids=best_rlzs[s].tolist(),
                        weights=weights.tolist(),
                        lon=lon, lat=lat)
        for k in oq.disagg_outputs:
            splits = k.lower().split('_')
            header = ['imt', 'poe'] + splits + rlzcols
            values = []
            nonzeros = []
            for m, p in iproduct(M, P):
                imt = imts[m]
                aw = extract(dstore, ex % (k, imt, s, p))
                # for instance for Mag_Dist [(mag, dist, poe0, poe1), ...]
                poes = aw[:, len(splits):]
                if 'trt' in header:
                    nonzeros.append(True)
                else:
                    nonzeros.append(poes.any())  # nonzero poes
                for row in aw:
                    values.append([imt, poes_disagg[p]] + list(row))
            if any(nonzeros):
                com = {key: value for key, value in metadata.items()
                       if value is not None and key not in skip_keys}
                com.update(metadata)
                fname = dstore.export_path('%s%s-%d.csv' % (k, trad, s))
                writers.write_csv(fname, values, header=header,
                                  comment=com, fmt='%.5E')
                fnames.append(fname)
    return sorted(fnames)
Example #45
0
def export_disagg_csv_xml(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    hmap4 = dstore['hmap4']
    N, M, P, Z = hmap4.shape
    imts = list(oq.imtls)
    rlzs = dstore['full_lt'].get_realizations()
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    bins = {name: dset[:] for name, dset in dstore['disagg-bins'].items()}
    ex = 'disagg?kind=%s&imt=%s&site_id=%s&poe_id=%d&z=%d'
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    for s, m, p, z in iproduct(N, M, P, Z):
        dic = {
            k: dstore['disagg/' + k][s, m, p, ..., z]
            for k in oq.disagg_outputs
        }
        if sum(arr.sum() for arr in dic.values()) == 0:  # no data
            continue
        imt = from_string(imts[m])
        r = hmap4.rlzs[s, z]
        rlz = rlzs[r]
        iml = hmap4[s, m, p, z]
        poe_agg = dstore['poe4'][s, m, p, z]
        fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d.xml' %
                                   (r, imt, s, p))
        lon, lat = sitecol.lons[s], sitecol.lats[s]
        metadata = dstore.metadata
        metadata.update(investigation_time=oq.investigation_time,
                        imt=imt.name,
                        smlt_path='_'.join(rlz.sm_lt_path),
                        gsimlt_path=rlz.gsim_rlz.pid,
                        lon=lon,
                        lat=lat,
                        mag_bin_edges=bins['Mag'].tolist(),
                        dist_bin_edges=bins['Dist'].tolist(),
                        lon_bin_edges=bins['Lon'][s].tolist(),
                        lat_bin_edges=bins['Lat'][s].tolist(),
                        eps_bin_edges=bins['Eps'].tolist(),
                        tectonic_region_types=decode(bins['TRT'].tolist()))
        if ekey[1] == 'xml':
            metadata['sa_period'] = getattr(imt, 'period', None) or None
            metadata['sa_damping'] = getattr(imt, 'damping', None)
            writer = writercls(fname, **metadata)
            data = []
            for k in oq.disagg_outputs:
                data.append(DisaggMatrix(poe_agg, iml, k.split('_'), dic[k]))
            writer.serialize(data)
            fnames.append(fname)
        else:  # csv
            metadata['poe'] = poe_agg
            for k in oq.disagg_outputs:
                header = k.lower().split('_') + ['poe']
                com = {
                    key: value
                    for key, value in metadata.items()
                    if value is not None and key not in skip_keys
                }
                com.update(metadata)
                fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d_%s.csv' %
                                           (r, imt, s, p, k))
                values = extract(dstore, ex % (k, imt, s, p, z))
                writers.write_csv(fname,
                                  values,
                                  header=header,
                                  comment=com,
                                  fmt='%.5E')
                fnames.append(fname)
    return sorted(fnames)
Example #46
0
 def get_tag(self, tagname, tagidx):
     """
     :returns: the tag associated to the given tagname and tag index
     """
     return '%s=%s' % (tagname, decode(getattr(self, tagname)[tagidx]))
Example #47
0
 def gen_tags(self, tagname):
     """
     :yields: the tags associated to the given tagname
     """
     for tagvalue in getattr(self, tagname):
         yield '%s=%s' % (tagname, decode(tagvalue))