Beispiel #1
0
    def initialize_sources(self):
        """
        Parse source models and validate source logic trees. It also
        filters the sources far away and apply uncertainties to the
        relevant ones. Notice that sources are automatically split.

        :returns:
            a list with the number of sources for each source model
        """
        logs.LOG.progress("initializing sources")
        self.source_model_lt = logictree.SourceModelLogicTree.from_hc(self.hc)
        sm_paths = distinct(self.source_model_lt)
        nrml_to_hazardlib = source.SourceConverter(
            self.hc.investigation_time,
            self.hc.rupture_mesh_spacing,
            self.hc.width_of_mfd_bin,
            self.hc.area_source_discretization,
        )
        # define an ordered dictionary trt_model_id -> SourceCollector
        self.source_collector = collections.OrderedDict()
        for i, (sm, weight, smpath) in enumerate(sm_paths):
            fname = os.path.join(self.hc.base_path, sm)
            apply_unc = self.source_model_lt.make_apply_uncertainties(smpath)
            try:
                source_collectors = source.parse_source_model(
                    fname, nrml_to_hazardlib, apply_unc)
            except ValueError as e:
                if str(e) == ('Surface does not conform with Aki & '
                              'Richards convention'):
                    raise InvalidFile('''\
%s: %s. Probably you are using an obsolete model.
In that case you can fix the file with the command
python -m openquake.engine.tools.correct_complex_sources %s
''' % (fname, e, fname))
                else:
                    raise
            trts = [sc.trt for sc in source_collectors]

            self.source_model_lt.tectonic_region_types.update(trts)
            lt_model = models.LtSourceModel.objects.create(
                hazard_calculation=self.hc, sm_lt_path=smpath, ordinal=i,
                sm_name=sm, weight=weight)
            if self.hc.inputs.get('gsim_logic_tree'):  # check TRTs
                gsims_by_trt = lt_model.make_gsim_lt(trts).values
            else:
                gsims_by_trt = {}

            # save TrtModels for each tectonic region type
            for sc in source_collectors:
                # NB: the source_collectors are ordered by number of sources
                # and lexicographically, so the models are in the right order
                trt_model_id = models.TrtModel.objects.create(
                    lt_model=lt_model,
                    tectonic_region_type=sc.trt,
                    num_sources=len(sc.sources),
                    num_ruptures=sc.num_ruptures,
                    min_mag=sc.min_mag,
                    max_mag=sc.max_mag,
                    gsims=gsims_by_trt.get(sc.trt, [])).id
                self.source_collector[trt_model_id] = sc
Beispiel #2
0
 def pre_execute(self):
     """
     Do pre-execution work. At the moment, this work entails:
     parsing and initializing sources, parsing and initializing the
     site model (if there is one), parsing vulnerability and
     exposure files, and generating logic tree realizations. (The
     latter piece basically defines the work to be done in the
     `execute` phase.)
     """
     self.parse_risk_models()
     self.initialize_sources()
     self.initialize_site_model()
     self.create_ruptures()
     n_imts = len(distinct(from_string(imt)
                           for imt in self.hc.intensity_measure_types))
     n_sites = len(self.hc.site_collection)
     n_gmf = self.hc.number_of_ground_motion_fields
     output_weight = n_sites * n_imts * n_gmf
     logs.LOG.info('Expected output size=%s', output_weight)
     models.JobInfo.objects.create(
         oq_job=self.job,
         num_sites=n_sites,
         num_realizations=1,
         num_imts=n_imts,
         num_levels=0,
         input_weight=0,
         output_weight=output_weight)
     self.check_limits(input_weight=0, output_weight=output_weight)
     return 0, output_weight
Beispiel #3
0
def check_levels(imls, imt):
    """
    Raise a ValueError if the given levels are invalid.

    :param imls: a list of intensity measure and levels
    :param imt: the intensity measure type

    >>> check_levels([0.1, 0.2], 'PGA')  # ok
    >>> check_levels([0.1], 'PGA')
    Traceback (most recent call last):
       ...
    ValueError: Not enough imls for PGA: [0.1]
    >>> check_levels([0.2, 0.1], 'PGA')
    Traceback (most recent call last):
       ...
    ValueError: The imls for PGA are not sorted: [0.2, 0.1]
    >>> check_levels([0.2, 0.2], 'PGA')
    Traceback (most recent call last):
       ...
    ValueError: Found duplicated levels for PGA: [0.2, 0.2]
    """
    if len(imls) < 2:
        raise ValueError('Not enough imls for %s: %s' % (imt, imls))
    elif imls != sorted(imls):
        raise ValueError('The imls for %s are not sorted: %s' % (imt, imls))
    elif len(distinct(imls)) < len(imls):
        raise ValueError("Found duplicated levels for %s: %s" % (imt, imls))
Beispiel #4
0
    def initialize_sources(self):
        """
        Parse source models and validate source logic trees. It also
        filters the sources far away and apply uncertainties to the
        relevant ones. Notice that sources are automatically split.

        :returns:
            a list with the number of sources for each source model
        """
        logs.LOG.progress("initializing sources")
        self.source_model_lt = logictree.SourceModelLogicTree.from_hc(self.hc)
        sm_paths = distinct(self.source_model_lt)
        nrml_to_hazardlib = source.NrmlHazardlibConverter(
            self.hc.investigation_time,
            self.hc.rupture_mesh_spacing,
            self.hc.width_of_mfd_bin,
            self.hc.area_source_discretization,
        )
        # define an ordered dictionary trt_model_id -> SourceCollector
        self.source_collector = collections.OrderedDict()
        for i, (sm, weight, smpath) in enumerate(sm_paths):
            fname = os.path.join(self.hc.base_path, sm)
            apply_unc = self.source_model_lt.make_apply_uncertainties(smpath)
            source_collectors = source.parse_source_model(
                fname, nrml_to_hazardlib, apply_unc)
            trts = [sc.trt for sc in source_collectors]

            self.source_model_lt.tectonic_region_types.update(trts)
            lt_model = models.LtSourceModel.objects.create(
                hazard_calculation=self.hc, sm_lt_path=smpath, ordinal=i,
                sm_name=sm, weight=weight)

            # save TrtModels for each tectonic region type
            gsims_by_trt = lt_model.make_gsim_lt(trts).values
            for sc in source_collectors:
                if not sc.trt in gsims_by_trt:
                    gsim_file = self.hc.inputs['gsim_logic_tree']
                    raise ValueError(
                        "Found in %r a tectonic region type %r inconsistent "
                        "with the ones in %r" % (sm, sc.trt, gsim_file))
                # NB: the source_collectors are ordered by number of sources
                # and lexicographically, so the models are in the right order
                trt_model_id = models.TrtModel.objects.create(
                    lt_model=lt_model,
                    tectonic_region_type=sc.trt,
                    num_sources=len(sc.sources),
                    num_ruptures=sc.num_ruptures,
                    min_mag=sc.min_mag,
                    max_mag=sc.max_mag,
                    gsims=gsims_by_trt[sc.trt]).id
                self.source_collector[trt_model_id] = sc
Beispiel #5
0
def gmfs(job_id, ses_ruptures, sitecol, gmf_id):
    """
    :param int job_id: the current job ID
    :param ses_ruptures: a set of `SESRupture` instances
    :param sitecol: a `SiteCollection` instance
    :param int gmf_id: the ID of a `Gmf` instance
    """
    job = models.OqJob.objects.get(pk=job_id)
    hc = job.hazard_calculation
    # distinct is here to make sure that IMTs such as
    # SA(0.8) and SA(0.80) are considered the same
    imts = distinct(from_string(x) for x in sorted(hc.intensity_measure_types))
    gsim = AVAILABLE_GSIMS[hc.gsim]()  # instantiate the GSIM class
    correlation_model = models.get_correl_model(job)

    cache = collections.defaultdict(list)  # {site_id, imt -> gmvs}
    inserter = writer.CacheInserter(models.GmfData, 1000)
    # insert GmfData in blocks of 1000 sites

    # NB: ses_ruptures a non-empty list produced by the block_splitter
    rupture = ses_ruptures[0].rupture  # ProbabilisticRupture instance
    with EnginePerformanceMonitor('computing gmfs', job_id, gmfs):
        gmf = GmfComputer(rupture, sitecol, imts, [gsim], hc.truncation_level,
                          correlation_model)
        gname = gsim.__class__.__name__
        for ses_rup in ses_ruptures:
            for (gname, imt), gmvs in gmf.compute(ses_rup.seed):
                for site_id, gmv in zip(sitecol.sids, gmvs):
                    # float may be needed below to convert 1x1 matrices
                    cache[site_id, imt].append((gmv, ses_rup.id))

    with EnginePerformanceMonitor('saving gmfs', job_id, gmfs):
        for (site_id, imt_str), data in cache.iteritems():
            imt = from_string(imt_str)
            gmvs, rup_ids = zip(*data)
            inserter.add(
                models.GmfData(
                    gmf_id=gmf_id,
                    task_no=0,
                    imt=imt[0],
                    sa_period=imt[1],
                    sa_damping=imt[2],
                    site_id=site_id,
                    rupture_ids=rup_ids,
                    gmvs=gmvs))
        inserter.flush()
Beispiel #6
0
def intensity_measure_types(value):
    """
    :param value: input string
    :returns: non-empty list of Intensity Measure Type objects

    >>> intensity_measure_types('PGA')
    ['PGA']
    >>> intensity_measure_types('PGA, SA(1.00)')
    ['PGA', 'SA(1.0)']
    >>> intensity_measure_types('SA(0.1), SA(0.10)')
    Traceback (most recent call last):
      ...
    ValueError: Duplicated IMTs in SA(0.1), SA(0.10)
    """
    imts = []
    for chunk in value.split(','):
        imts.append(str(imt.from_string(chunk.strip())))
    if len(distinct(imts)) < len(imts):
        raise ValueError('Duplicated IMTs in %s' % value)
    return imts
Beispiel #7
0
def intensity_measure_types(value):
    """
    :param value: input string
    :returns: non-empty list of Intensity Measure Type objects

    >>> intensity_measure_types('PGA')
    ['PGA']
    >>> intensity_measure_types('PGA, SA(1.00)')
    ['PGA', 'SA(1.0)']
    >>> intensity_measure_types('SA(0.1), SA(0.10)')
    Traceback (most recent call last):
      ...
    ValueError: Duplicated IMTs in SA(0.1), SA(0.10)
    """
    imts = []
    for chunk in value.split(','):
        imts.append(str(imt.from_string(chunk.strip())))
    if len(distinct(imts)) < len(imts):
        raise ValueError('Duplicated IMTs in %s' % value)
    return imts
Beispiel #8
0
 def pre_execute(self):
     """
     Do pre-execution work. At the moment, this work entails:
     parsing and initializing sources, parsing and initializing the
     site model (if there is one), parsing vulnerability and
     exposure files, and generating logic tree realizations. (The
     latter piece basically defines the work to be done in the
     `execute` phase.)
     """
     # if you don't use an explicit transaction, errors will be masked
     # the problem is that Django by default performs implicit transactions
     # without rollback, see
     # https://docs.djangoproject.com/en/1.3/topics/db/transactions/
     with transaction.commit_on_success(using='job_init'):
         self.parse_risk_models()
     with transaction.commit_on_success(using='job_init'):
         self.initialize_site_model()
     with transaction.commit_on_success(using='job_init'):
         self.initialize_sources()
     self.create_ruptures()
     n_imts = len(distinct(from_string(imt)
                           for imt in self.hc.intensity_measure_types))
     n_sites = len(self.hc.site_collection)
     n_gmf = self.hc.number_of_ground_motion_fields
     output_weight = n_sites * n_imts * n_gmf
     logs.LOG.info('Expected output size=%s', output_weight)
     models.JobInfo.objects.create(
         oq_job=self.job,
         num_sites=n_sites,
         num_realizations=1,
         num_imts=n_imts,
         num_levels=0,
         input_weight=0,
         output_weight=output_weight)
     self.check_limits(input_weight=0, output_weight=output_weight)
     return 0, output_weight
Beispiel #9
0
    def parse_risk_models(self):
        """
        If any risk model is given in the hazard calculation, the
        computation will be driven by risk data. In this case the
        locations will be extracted from the exposure file (if there
        is one) and the imt (and levels) will be extracted from the
        vulnerability model (if there is one)
        """
        hc = self.hc
        if hc.vulnerability_models:
            logs.LOG.progress("parsing risk models")

            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            for vf in hc.vulnerability_models:
                intensity_measure_types_and_levels = dict(
                    (record['IMT'], record['IML']) for record in
                    parsers.VulnerabilityModelParser(vf))

                for imt, levels in \
                        intensity_measure_types_and_levels.items():
                    if (imt in hc.intensity_measure_types_and_levels and
                        (set(hc.intensity_measure_types_and_levels[imt]) -
                         set(levels))):
                        logs.LOG.warning(
                            "The same IMT %s is associated with "
                            "different levels" % imt)
                    else:
                        hc.intensity_measure_types_and_levels[imt] = levels

                hc.intensity_measure_types.extend(
                    intensity_measure_types_and_levels)

            # remove possible duplicates
            if hc.intensity_measure_types is not None:
                hc.intensity_measure_types = list(set(
                    hc.intensity_measure_types))
            hc.save()
            logs.LOG.info("Got IMT and levels "
                          "from vulnerability models: %s - %s" % (
                              hc.intensity_measure_types_and_levels,
                              hc.intensity_measure_types))

        if 'fragility' in hc.inputs:
            hc.intensity_measure_types_and_levels = dict()
            hc.intensity_measure_types = list()

            parser = iter(parsers.FragilityModelParser(
                hc.inputs['fragility']))
            hc = self.hc

            fragility_format, _limit_states = parser.next()

            if (fragility_format == "continuous" and
                    hc.calculation_mode != "scenario"):
                raise NotImplementedError(
                    "Getting IMT and levels from "
                    "a continuous fragility model is not yet supported")

            hc.intensity_measure_types_and_levels = dict(
                (iml['IMT'], iml['imls'])
                for _taxonomy, iml, _params, _no_damage_limit in parser)
            hc.intensity_measure_types.extend(
                hc.intensity_measure_types_and_levels)
            hc.save()

        if 'exposure' in hc.inputs:
            with logs.tracing('storing exposure'):
                exposure.ExposureDBWriter(
                    self.job).serialize(
                    parsers.ExposureModelParser(hc.inputs['exposure']))

        # save IMTs
        imt_strings = self.hc.get_imts()
        imts = distinct(map(from_string, imt_strings))
        if len(imt_strings) > imts:
            logs.LOG.warn('Found duplicated IMTs: %s', imt_strings)
        models.Imt.save_new(imts)