예제 #1
0
def run_job(job_file, output_type):
    """Given a job_file, run the job."""

    a_job = Job.from_file(job_file, output_type)
    is_job_valid = a_job.is_valid()

    if is_job_valid[0]:
        a_job.set_status('running')

        try:
            a_job.launch()
        except sqlalchemy.exc.SQLAlchemyError:
            # Try to cleanup the session status to have a chance to update the
            # job record without further errors.
            session = get_db_session("reslt", "writer")
            if session.is_active:
                session.rollback()

            a_job.set_status('failed')

            raise
        except:
            a_job.set_status('failed')

            raise
        else:
            a_job.set_status('succeeded')
    else:
        a_job.set_status('failed')

        LOG.critical("The job configuration is inconsistent:")

        for error_message in is_job_valid[1]:
            LOG.critical("   >>> %s" % error_message)
예제 #2
0
def compute_mean_hazard_maps(job):
    """Compute mean hazard maps using as input all the
    pre computed mean hazard curves.

    The POES_HAZARD_MAPS parameter in the configuration file specifies
    all the values used in the computation.
    """

    poes = _extract_values_from_config(job, POES_PARAM_NAME)

    LOG.debug("[MEAN_HAZARD_MAPS] List of POEs is %s" % poes)

    # get all the pre computed mean curves
    pattern = "%s*%s*" % (kvs.tokens.MEAN_HAZARD_CURVE_KEY_TOKEN, job.id)
    mean_curves = kvs.mget_decoded(pattern)

    LOG.debug("[MEAN_HAZARD_MAPS] Found %s pre computed mean curves"
            % len(mean_curves))

    keys = []
    for poe in poes:
        for mean_curve in mean_curves:
            site = shapes.Site(mean_curve["site_lon"],
                               mean_curve["site_lat"])

            key = kvs.tokens.mean_hazard_map_key(
                    job.id, site, poe)
            keys.append(key)

            _store_iml_for(mean_curve, key, job, poe)

    return keys
예제 #3
0
파일: java.py 프로젝트: hsberlin/openquake
def jvm(max_mem=None):
    """Return the jpype module, after guaranteeing the JVM is running and
    the classpath has been loaded properly."""
    jarpaths = (os.path.abspath(
                    os.path.join(os.path.dirname(__file__), "../lib")),
                os.path.abspath(
                    os.path.join(os.path.dirname(__file__), "../dist")))
    log4j_properties_path = os.path.abspath(
                                os.path.join(os.path.dirname(__file__),
                                "../log4j.properties"))
    if not jpype.isJVMStarted():
        max_mem = get_jvm_max_mem(max_mem)
        LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
        jpype.startJVM(jpype.getDefaultJVMPath(),
            "-Djava.ext.dirs=%s:%s" % jarpaths,
            # "-Dlog4j.debug", # turn on log4j internal debugging
            "-Dlog4j.configuration=file://%s" % log4j_properties_path,
            "-Xmx%sM" % max_mem)

        # override the log level set in log4j configuration file this can't be
        # done on the JVM command line (i.e. -Dlog4j.rootLogger= is not
        # supported by log4j)
        _set_java_log_level(FLAGS.debug.upper())

        if FLAGS.capture_java_debug:
            _setup_java_capture(sys.stdout, sys.stderr)

    return jpype
예제 #4
0
    def _partition(self):
        """Split the set of sites to compute in blocks and store
        the in the underlying kvs system.
        """

        sites = []
        self.blocks_keys = []
        region_constraint = self.region

        # we use the exposure, if specified,
        # otherwise we use the input region
        if self.has(EXPOSURE):
            sites = self._read_sites_from_exposure()
            LOG.debug("Loaded %s sites from exposure portfolio." % len(sites))
        elif self.region:
            sites = self.region.sites
        else:
            raise Exception("I don't know how to get the sites!")
        if self.partition:
            block_count = 0
            for block in BlockSplitter(sites, constraint=region_constraint):
                self.blocks_keys.append(block.id)
                block.to_kvs()
                block_count += 1
            LOG.debug("Job has partitioned %s sites into %s blocks" %
                      (len(sites), block_count))
        else:
            block = Block(sites)
            self.blocks_keys.append(block.id)
            block.to_kvs()
예제 #5
0
파일: core.py 프로젝트: bwyss/oq-engine
def compute_uhs_task(job_id, realization, site):
    """Compute Uniform Hazard Spectra for a given site of interest and 1 or
    more Probability of Exceedance values. The bulk of the computation will
    be done by utilizing the `UHSCalculator` class in the Java code.

    UHS results will be written directly to the database.

    :param int job_id:
        ID of the job record in the DB/KVS.
    :param realization:
        Logic tree sample number (from 1 to N, where N is the
        NUMBER_OF_LOGIC_TREE_SAMPLES param defined in the job config.
    :param site:
        The site of interest (a :class:`openquake.shapes.Site` object).
    """
    job_ctxt = utils_tasks.get_running_job(job_id)

    log_msg = (
        "Computing UHS for job_id=%s, site=%s, realization=%s."
        " UHS results will be serialized to the database.")
    log_msg %= (job_ctxt.job_id, site, realization)
    LOG.info(log_msg)

    uhs_results = compute_uhs(job_ctxt, site)

    write_uhs_spectrum_data(job_ctxt, realization, site, uhs_results)
예제 #6
0
def read_sites_from_exposure(a_job):
    """
    Given the exposure model specified in the job config, read all sites which
    are located within the region of interest.

    :param a_job: a Job object with an EXPOSURE parameter defined
    :type a_job: :py:class:`openquake.job.Job`

    :returns: a list of :py:class:`openquake.shapes.Site` objects
    """

    sites = []
    path = os.path.join(a_job.base_path, a_job.params[conf.EXPOSURE])

    reader = exposure.ExposurePortfolioFile(path)
    constraint = a_job.region

    LOG.debug(
        "Constraining exposure parsing to %s" % constraint)

    for site, _asset_data in reader.filter(constraint):

        # we don't want duplicates (bug 812395):
        if not site in sites:
            sites.append(site)

    return sites
예제 #7
0
def run_job(job_file, output_type):
    """
    Given a job_file, run the job.

    :param job_file: the path of the configuration file for the job
    :type job_file: string
    :param output_type: the desired format for the results, one of 'db', 'xml'
    :type output_type: string
    """
    a_job = Job.from_file(job_file, output_type)
    a_job.set_status('running')

    # closing all db connections to make sure they're not shared between
    # supervisor and job executor processes. otherwise if one of them closes
    # the connection it immediately becomes unavailable for other
    close_connection()

    job_pid = os.fork()
    if not job_pid:
        # job executor process
        try:
            logs.init_logs_amqp_send(level=FLAGS.debug, job_id=a_job.job_id)
            a_job.launch()
        except Exception, ex:
            LOG.critical("Job failed with exception: '%s'" % str(ex))
            a_job.set_status('failed')
            raise
        else:
            a_job.set_status('succeeded')
        return
예제 #8
0
def plot_aggregate_curve(job, aggregate_curve):
    """Plot an aggreate loss curve.

    This function is triggered only if the AGGREGATE_LOSS_CURVE
    parameter is specified in the configuration file.

    :param job: the job the engine is currently processing.
    :type job:
        :py:class:`openquake.risk.job.probabilistic.ProbabilisticEventMixin`
    :param aggregate_curve: the aggregate curve to plot.
    :type aggregate_curve: :py:class:`openquake.shapes.Curve`
    """

    if not job.has("AGGREGATE_LOSS_CURVE"):
        LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
                "skipping aggregate loss curve computation...")

        return

    path = os.path.join(job.params["BASE_PATH"],
            job.params["OUTPUT_DIR"], _filename(job.job_id))

    plotter = curve.CurvePlot(path)
    plotter.write(_for_plotting(aggregate_curve,
            job.params["INVESTIGATION_TIME"]), autoscale_y=False)

    plotter.close()
    LOG.debug("Aggregate loss curve stored at %s" % path)
예제 #9
0
def run_job(job_file, output_type):
    """
    Given a job_file, run the job.

    :param job_file: the path of the configuration file for the job
    :type job_file: string
    :param output_type: the desired format for the results, one of 'db', 'xml'
    :type output_type: string
    """

    a_job = Job.from_file(job_file, output_type)
    is_job_valid = a_job.is_valid()

    if is_job_valid[0]:
        a_job.set_status('running')

        spawn_job_supervisor(a_job.job_id, os.getpid())

        try:
            a_job.launch()
        except Exception, ex:
            LOG.critical("Job failed with exception: '%s'" % str(ex))
            a_job.set_status('failed')
            raise
        else:
            a_job.set_status('succeeded')
예제 #10
0
def compute_aggregate_curve(job):
    """Compute and plot an aggreate loss curve.

    This function expects to find in kvs a set of pre computed
    GMFs and assets.

    This function is triggered only if the AGGREGATE_LOSS_CURVE
    parameter is specified in the configuration file.

    :param job: the job the engine is currently processing.
    :type job: openquake.risk.job.probabilistic.ProbabilisticEventMixin
    """
    if not job.has("AGGREGATE_LOSS_CURVE"):
        LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
                "skipping aggregate loss curve computation...")

        return

    epsilon_provider = risk_job.EpsilonProvider(job.params)
    aggregate_loss_curve = \
        prob.AggregateLossCurve.from_kvs(job.id, epsilon_provider)

    path = os.path.join(job.params["BASE_PATH"],
            job.params["OUTPUT_DIR"], _filename(job.id))

    plotter = curve.CurvePlot(path)
    plotter.write(_for_plotting(
            aggregate_loss_curve.compute(),
            job.params["INVESTIGATION_TIME"]), autoscale_y=False)

    plotter.close()
    LOG.debug("Aggregate loss curve stored at %s" % path)
예제 #11
0
파일: java.py 프로젝트: VSilva/openquake
def jvm(max_mem=4000):
    """Return the jpype module, after guaranteeing the JVM is running and 
    the classpath has been loaded properly."""
    jarpaths = (os.path.abspath(
                    os.path.join(os.path.dirname(__file__), "../lib")),
                os.path.abspath(
                    os.path.join(os.path.dirname(__file__), "../dist")))
    # TODO(JMC): Make sure these directories exist
    # LOG.debug("Jarpath is %s", jarpaths)
    if not jpype.isJVMStarted():
        LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
        jpype.startJVM(jpype.getDefaultJVMPath(), 
            "-Djava.ext.dirs=%s:%s" % jarpaths, 
        #"-Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.Log4JLogger",
            # "-Dlog4j.debug",
            "-Dlog4j.configuration=log4j.properties",
            "-Dlog4j.rootLogger=%s, A1" % (FLAGS.debug.upper()),
            # "-Dlog4j.rootLogger=DEBUG, A1",
            "-Xmx%sM" % max_mem)
        
        if FLAGS.capture_java_debug:
            mystream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stdout)
            errstream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stderr)
            outputstream = jpype.JClass("org.gem.PythonOutputStream")()
            err_stream = jpype.JClass("org.gem.PythonOutputStream")()
            outputstream.setPythonStdout(mystream)
            err_stream.setPythonStdout(errstream)
        
            ps = jpype.JClass("java.io.PrintStream")
            jpype.java.lang.System.setOut(ps(outputstream))
            jpype.java.lang.System.setErr(ps(err_stream))
        
    return jpype
예제 #12
0
파일: core.py 프로젝트: kpanic/openquake
def compute_uhs_task(job_id, realization, site):
    """Compute Uniform Hazard Spectra for a given site of interest and 1 or
    more Probability of Exceedance values. The bulk of the computation will
    be done by utilizing the `UHSCalculator` class in the Java code.

    UHS results will be written directly to the database.

    :param int job_id:
        ID of the job record in the DB/KVS.
    :param realization:
        Logic tree sample number (from 1 to N, where N is the
        NUMBER_OF_LOGIC_TREE_SAMPLES param defined in the job config.
    :param site:
        The site of interest (a :class:`openquake.shapes.Site` object).
    """
    calc_proxy = utils_tasks.get_running_calculation(job_id)

    log_msg = (
        "Computing UHS for job_id=%s, site=%s, realization=%s."
        " UHS results will be serialized to the database.")
    log_msg %= (calc_proxy.job_id, site, realization)
    LOG.info(log_msg)

    uhs_results = compute_uhs(calc_proxy, site)

    write_uhs_spectrum_data(calc_proxy, realization, site, uhs_results)
예제 #13
0
def compute_quantile_hazard_curves(job, sites):
    """Compute a quantile hazard curve for each site in the list
    using as input all the pre-computed curves for different realizations.

    The QUANTILE_LEVELS parameter in the configuration file specifies
    all the values used in the computation.
    """

    keys = []
    quantiles = _extract_values_from_config(job, QUANTILE_PARAM_NAME)

    LOG.debug("[QUANTILE_HAZARD_CURVES] List of quantiles is %s" % quantiles)

    for site in sites:
        for quantile in quantiles:
            hazard_curves = curves_at(job.id, site)

            poes = [_extract_y_values_from(curve) for curve in hazard_curves]
            quantile_poes = compute_quantile_curve(poes, quantile)

            quantile_curve = {"site_lat": site.latitude,
                "site_lon": site.longitude,
                "curve": _reconstruct_curve_list_from(quantile_poes)}

            key = kvs.tokens.quantile_hazard_curve_key(
                    job.id, site, quantile)
            keys.append(key)

            kvs.set_value_json_encoded(key, quantile_curve)

    return keys
예제 #14
0
def jvm(max_mem=None):
    """Return the jpype module, after guaranteeing the JVM is running and 
    the classpath has been loaded properly."""
    jarpaths = (os.path.abspath(
        os.path.join(os.path.dirname(__file__), "../lib")),
                os.path.abspath(
                    os.path.join(os.path.dirname(__file__), "../dist")))
    # TODO(JMC): Make sure these directories exist
    # LOG.debug("Jarpath is %s", jarpaths)
    if not jpype.isJVMStarted():
        max_mem = get_jvm_max_mem(max_mem)
        LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
        jpype.startJVM(
            jpype.getDefaultJVMPath(),
            "-Djava.ext.dirs=%s:%s" % jarpaths,
            #"-Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.Log4JLogger",
            # "-Dlog4j.debug",
            "-Dlog4j.configuration=log4j.properties",
            "-Dlog4j.rootLogger=%s, A1" % (FLAGS.debug.upper()),
            # "-Dlog4j.rootLogger=DEBUG, A1",
            "-Xmx%sM" % max_mem)

        if FLAGS.capture_java_debug:
            mystream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stdout)
            errstream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stderr)
            outputstream = jpype.JClass("org.gem.PythonOutputStream")()
            err_stream = jpype.JClass("org.gem.PythonOutputStream")()
            outputstream.setPythonStdout(mystream)
            err_stream.setPythonStdout(errstream)

            ps = jpype.JClass("java.io.PrintStream")
            jpype.java.lang.System.setOut(ps(outputstream))
            jpype.java.lang.System.setErr(ps(err_stream))

    return jpype
    def from_kvs(job_id, epsilon_provider):
        """Return an aggregate curve using the GMFs and assets
        stored in the underlying kvs system."""

        vuln_model = vulnerability.load_vuln_model_from_kvs(job_id)
        aggregate_curve = AggregateLossCurve(vuln_model, epsilon_provider)

        gmfs_keys = kvs.get_keys("%s*%s*" % (
                job_id, kvs.tokens.GMF_KEY_TOKEN))

        LOG.debug("Found %s stored GMFs..." % len(gmfs_keys))
        asset_counter = 0

        for gmfs_key in gmfs_keys:
            assets = _assets_keys_for_gmfs(job_id, gmfs_key)

            for asset in assets:
                asset_counter += 1
                gmfs = kvs.get_value_json_decoded(gmfs_key)

                aggregate_curve.append(gmfs,
                        json.JSONDecoder().decode(asset))

        LOG.debug("Found %s stored assets..." % asset_counter)
        return aggregate_curve
예제 #16
0
def compute_aggregate_curve(job):
    """Compute and plot an aggreate loss curve.

    This function expects to find in kvs a set of pre computed
    GMFs and assets.

    This function is triggered only if the AGGREGATE_LOSS_CURVE
    parameter is specified in the configuration file.

    :param job: the job the engine is currently processing.
    :type job: openquake.risk.job.probabilistic.ProbabilisticEventMixin
    """
    if not job.has("AGGREGATE_LOSS_CURVE"):
        LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
                "skipping aggregate loss curve computation...")

        return

    epsilon_provider = risk_job.EpsilonProvider(job.params)
    aggregate_loss_curve = \
        prob.AggregateLossCurve.from_kvs(job.id, epsilon_provider)

    path = os.path.join(job.params["BASE_PATH"], job.params["OUTPUT_DIR"],
                        _filename(job.id))

    plotter = curve.CurvePlot(path)
    plotter.write(_for_plotting(aggregate_loss_curve.compute(),
                                job.params["INVESTIGATION_TIME"]),
                  autoscale_y=False)

    plotter.close()
    LOG.debug("Aggregate loss curve stored at %s" % path)
예제 #17
0
def compute_quantile_hazard_curves(job, sites):
    """Compute a quantile hazard curve for each site in the list
    using as input all the pre-computed curves for different realizations.
    
    The QUANTILE_LEVELS parameter in the configuration file specifies
    all the values used in the computation.
    """

    keys = []

    quantiles = _extract_quantiles_from_config(job)

    LOG.debug("List of QUANTILES is %s" % quantiles)

    for site in sites:
        for quantile in quantiles:

            quantile_curve = {
                "site_lat": site.latitude,
                "site_lon": site.longitude,
                "curve": _reconstruct_curve_list_from(compute_quantile_curve(curves_at(job.id, site), quantile)),
            }

            key = kvs.tokens.quantile_hazard_curve_key(job.id, site, quantile)
            keys.append(key)

            LOG.debug("QUANTILE curve at %s is %s" % (key, quantile_curve))

            kvs.set_value_json_encoded(key, quantile_curve)

    return keys
예제 #18
0
def _get_iml_from(curve, job, poe):
    """Return the interpolated IML using the values defined in
    the INTENSITY_MEASURE_LEVELS parameter as the reference grid to
    interpolate in.

    IML from config is in ascending order (abscissa of hazard curve)
    PoE from curve is in descending order (ordinate of hazard curve)

    In our interpolation, PoE becomes the x axis, IML the y axis, therefore
    the arrays have to be reversed (x axis has to be monotonically
    increasing).
    """

    # reverse arrays
    poes = numpy.array(_extract_y_values_from(curve["curve"]))[::-1]
    imls = numpy.log(numpy.array(_extract_imls_from_config(job))[::-1])

    site = shapes.Site(curve["site_lon"], curve["site_lat"])

    if poe > poes[-1]:
        LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
            "using maximum PoE value pair, PoE: %s, IML: %s, at site %s" % (
            poe, poes[-1], math.exp(imls[-1]), site))
        return math.exp(imls[-1])

    if poe < poes[0]:
        LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
            "using minimum PoE value pair, PoE: %s, IML: %s, at site %s" % (
            poe, poes[0], math.exp(imls[0]), site))
        return math.exp(imls[0])

    return math.exp(interp1d(poes, imls, kind='linear')(poe))
예제 #19
0
    def _partition(self):
        """Split the set of sites to compute in blocks and store
        the in the underlying kvs system.
        """

        sites = []
        self.blocks_keys = []
        region_constraint = self.region
        
        # we use the exposure, if specified,
        # otherwise we use the input region
        if self.has(EXPOSURE):
            sites = self._read_sites_from_exposure()
            LOG.debug("Loaded %s sites from exposure portfolio." % len(sites))
        elif self.region:
            sites = self.region.sites
        else:
            raise Exception("I don't know how to get the sites!")
        if self.partition:
            block_count = 0
            for block in BlockSplitter(sites, constraint=region_constraint):
                self.blocks_keys.append(block.id)
                block.to_kvs()
                block_count += 1
            LOG.debug("Job has partitioned %s sites into %s blocks" % (
                    len(sites), block_count))
        else:
            block = Block(sites)
            self.blocks_keys.append(block.id)
            block.to_kvs()
예제 #20
0
 def validator(self, *args):
     """Validate this job before running the decorated function."""
     try:
         # TODO(JMC): Add good stuff here
         assert self.has(EXPOSURE) or self.has(INPUT_REGION)
     except AssertionError, e:
         LOG.exception(e)
         return []
예제 #21
0
 def validator(self, *args):
     """Validate this job before running the decorated function."""
     try:
         # TODO(JMC): Add good stuff here
         assert self.has(EXPOSURE) or self.has(INPUT_REGION)
     except AssertionError, e:
         LOG.exception(e)
         return []
예제 #22
0
def guarantee_file(path, url):
    """Based on flag, download test data file or raise error."""
    if not os.path.isfile(path):
        if not FLAGS.download_test_data:
            raise Exception("Test data does not exist")
        LOG.info("Downloading test data for %s", path)
        retcode = subprocess.call(["curl", url, "-o", path])
        if retcode:
            raise Exception("Test data could not be downloaded from %s" % (url))
예제 #23
0
def guarantee_file(path, url):
    """Based on flag, download test data file or raise error."""
    if not os.path.isfile(path):
        if not FLAGS.download_test_data:
            raise Exception("Test data does not exist")
        LOG.info("Downloading test data for %s", path)
        retcode = subprocess.call(["curl", url, "-o", path])
        if retcode:
            raise Exception("Test data could not be downloaded from %s" % (url))
예제 #24
0
    def from_file(config_file, output_type):
        """
        Create a job from external configuration files.

        :param config_file: the external configuration file path
        :param output_type: where to store results:
            * 'db' database
            * 'xml' XML files *plus* database
        :param params: optional dictionary of default parameters, overridden by
            the ones read from the config file
        :type params: :py:class:`dict`
        """

        # output_type can be set, in addition to 'db' and 'xml', also to
        # 'xml_without_db', which has the effect of serializing only to xml
        # without requiring a database at all.
        # This allows to run tests without requiring a database.
        # This is not documented in the public interface because it is
        # essentially a detail of our current tests and ci infrastructure.
        assert output_type in ('db', 'xml', 'xml_without_db')

        config_file = os.path.abspath(config_file)
        LOG.debug("Loading Job from %s" % (config_file))

        base_path = os.path.abspath(os.path.dirname(config_file))

        params = {}

        sections = []
        for each_config_file in Job.default_configs() + [config_file]:
            new_sections, new_params = parse_config_file(each_config_file)
            sections.extend(new_sections)
            params.update(new_params)
        params['BASE_PATH'] = base_path

        if output_type == 'xml_without_db':
            # we are running a test
            job_id = 0
            serialize_results_to = ['xml']
        else:
            # openquake-server creates the job record in advance and stores the
            # job id in the config file
            job_id = params.get('OPENQUAKE_JOB_ID')
            if not job_id:
                # create the database record for this job
                job_id = prepare_job(params).id

            if output_type == 'db':
                serialize_results_to = ['db']
            else:
                serialize_results_to = ['db', 'xml']

        job = Job(params, job_id, sections=sections, base_path=base_path)
        job.serialize_results_to = serialize_results_to
        job.config_file = config_file  # pylint: disable=W0201
        return job
예제 #25
0
def run_job(job_file):
    """ Given a job_file, run the job. If we don't get results log it """
    a_job = Job.from_file(job_file)
    # TODO(JMC): Expose a way to set whether jobs should be partitioned
    results = a_job.launch()
    if not results:
        LOG.critical("The job configuration is inconsistent, " "aborting computation.")
    else:
        for filepath in results:
            print filepath
예제 #26
0
def run_job(job_file):
    """ Given a job_file, run the job. If we don't get results log it """
    a_job = Job.from_file(job_file)
    # TODO(JMC): Expose a way to set whether jobs should be partitioned
    results = a_job.launch()
    if not results:
        LOG.critical("The job configuration is inconsistent, "
                     "aborting computation.")
    else:
        for filepath in results:
            print filepath
예제 #27
0
    def default_configs(cls):
        """
         Default job configuration files, writes a warning if they don't exist.
        """
        if not FLAGS.include_defaults:
            return []

        if not any([os.path.exists(cfg) for cfg in cls.__defaults]):
            LOG.warning("No default configuration! If your job config doesn't "
                        "define all of the expected properties things might "
                        "break.")
        return cls.__defaults
예제 #28
0
def store_gmpe_map(job_id, seed, calc):
    """Generate a hash map of GMPEs (keyed by Tectonic Region Type) and store
    it in the KVS.

    :param int job_id: numeric ID of the job
    :param int seed: seed for random logic tree sampling
    :param calc: logic tree processor
    :type calc: :class:`openquake.input.logictree.LogicTreeProcessor` instance
    """
    LOG.info("Storing GMPE map from job config")
    key = kvs.tokens.gmpe_key(job_id)
    calc.sample_and_save_gmpe_logictree(kvs.get_client(), key, seed)
예제 #29
0
    def default_configs(cls):
        """
         Default job configuration files, writes a warning if they don't exist.
        """
        if not FLAGS.include_defaults:
            return []

        if not any([os.path.exists(cfg) for cfg in cls.__defaults]):
            LOG.warning("No default configuration! If your job config doesn't "
                        "define all of the expected properties things might "
                        "break.")
        return cls.__defaults
예제 #30
0
def serialize_mfd(mfd, parent_node):
    if mfd.__class__.__name__ == 'org.opensha.sha.magdist.SummedMagFreqDist':
        LOG.debug("Serializing a SummedMFD")
        mfd_list = mfd.getMagFreqDists()
        if mfd_list is None:
            mfd_list = [mfd]
        for sub_mfd in mfd_list:
            serialize_evenly_discretized_mfd(sub_mfd, parent_node)
    elif mfd.__class__.__name__ == 'org.opensha.sha.magdist.IncrementalMagFreqDist':
        LOG.debug("Serializing an IncrementalMFD")
        serialize_evenly_discretized_mfd(mfd, parent_node)
    else:
        raise Exception("Unhandled mfd class: %s" % mfd.__class__.__name__)
예제 #31
0
    def cleanup(self):
        """
        Perform any necessary cleanup steps after the job completes.

        Currently, this method only clears KVS cache data for the job.
        """
        LOG.debug("Running KVS garbage collection for job %s" % self.job_id)

        gc_cmd = ['python', 'bin/cache_gc.py', '--job=%s' % self.job_id]

        # run KVS garbage collection aynchronously
        # stdout goes to /dev/null to silence any output from the GC
        subprocess.Popen(gc_cmd, env=os.environ, stdout=open('/dev/null', 'w'))
예제 #32
0
파일: general.py 프로젝트: kpanic/openquake
def store_source_model(job_id, seed, params, calc):
    """Generate source model from the source model logic tree and store it in
    the KVS.

    :param int job_id: numeric ID of the job
    :param int seed: seed for random logic tree sampling
    :param dict params: the config parameters as (dict)
    :param calc: logic tree processor
    :type calc: :class:`openquake.input.logictree.LogicTreeProcessor` instance
    """
    LOG.info("Storing source model from job config")
    key = kvs.tokens.source_model_key(job_id)
    mfd_bin_width = float(params.get("WIDTH_OF_MFD_BIN"))
    calc.sample_and_save_source_model_logictree(kvs.get_client(), key, seed, mfd_bin_width)
예제 #33
0
def store_source_model(job_id, seed, params, calc):
    """Generate source model from the source model logic tree and store it in
    the KVS.

    :param int job_id: numeric ID of the job
    :param int seed: seed for random logic tree sampling
    :param dict params: the config parameters as (dict)
    :param calc: logic tree processor
    :type calc: :class:`openquake.input.logictree.LogicTreeProcessor` instance
    """
    LOG.info("Storing source model from job config")
    key = kvs.tokens.source_model_key(job_id)
    mfd_bin_width = float(params.get('WIDTH_OF_MFD_BIN'))
    calc.sample_and_save_source_model_logictree(
        kvs.get_client(), key, seed, mfd_bin_width)
예제 #34
0
    def from_kvs(job_id):
        """Return an aggregate curve using the computed
        loss curves in the kvs system."""
        client = kvs.get_client(binary=False)
        keys = client.keys("%s*%s*" % (job_id,
                kvs.tokens.LOSS_CURVE_KEY_TOKEN))

        LOG.debug("Found %s stored loss curves..." % len(keys))

        aggregate_curve = AggregateLossCurve()

        for key in keys:
            aggregate_curve.append(shapes.Curve.from_json(kvs.get(key)))
        
        return aggregate_curve
예제 #35
0
    def from_file(config_file):
        """ Create a job from external configuration files. """
        config_file = os.path.abspath(config_file)
        LOG.debug("Loading Job from %s" % (config_file))

        base_path = os.path.abspath(os.path.dirname(config_file))
        params = {}
        sections = []
        for each_config_file in Job.default_configs() + [config_file]:
            new_sections, new_params = parse_config_file(each_config_file)
            sections.extend(new_sections)
            params.update(new_params)
        params['BASE_PATH'] = base_path
        job = Job(params, sections=sections, base_path=base_path)
        job.config_file = config_file  # pylint: disable=W0201
        return job
예제 #36
0
    def _read_sites_from_exposure(self):
        """Read the set of sites to compute from the exposure file specified
        in the job definition."""

        sites = []
        path = os.path.join(self.base_path, self[EXPOSURE])
        reader = exposure.ExposurePortfolioFile(path)
        constraint = self.region
        if not constraint:
            constraint = AlwaysTrueConstraint()
        else:
            LOG.debug("Constraining exposure parsing to %s" % constraint.polygon)
        for asset_data in reader.filter(constraint):
            sites.append(asset_data[0])

        return sites
예제 #37
0
 def _slurp_files(self):
     """Read referenced files and write them into kvs, keyed on their
     sha1s."""
     kvs_client = kvs.get_client(binary=False)
     if self.base_path is None:
         LOG.debug("Can't slurp files without a base path, homie...")
         return
     for key, val in self.params.items():
         if key[-5:] == '_FILE':
             path = os.path.join(self.base_path, val)
             with open(path) as data_file:
                 LOG.debug("Slurping %s" % path)
                 sha1 = hashlib.sha1(data_file.read()).hexdigest()
                 data_file.seek(0)
                 kvs_client.set(sha1, data_file.read())
                 self.params[key] = sha1
예제 #38
0
    def from_file(config_file):
        """ Create a job from external configuration files. """
        config_file = os.path.abspath(config_file)
        LOG.debug("Loading Job from %s" % (config_file))

        base_path = os.path.abspath(os.path.dirname(config_file))
        params = {}
        sections = []
        for each_config_file in Job.default_configs() + [config_file]:
            new_sections, new_params = parse_config_file(each_config_file)
            sections.extend(new_sections)
            params.update(new_params)
        params['BASE_PATH'] = base_path
        job = Job(params, sections=sections, base_path=base_path)
        job.config_file = config_file  # pylint: disable=W0201
        return job
예제 #39
0
 def _slurp_files(self):
     """Read referenced files and write them into kvs, keyed on their
     sha1s."""
     kvs_client = kvs.get_client(binary=False)
     if self.base_path is None:
         LOG.debug("Can't slurp files without a base path, homie...")
         return
     for key, val in self.params.items():
         if key[-5:] == '_FILE':
             path = os.path.join(self.base_path, val)
             with open(path) as data_file:
                 LOG.debug("Slurping %s" % path)
                 sha1 = hashlib.sha1(data_file.read()).hexdigest()
                 data_file.seek(0)
                 kvs_client.set(sha1, data_file.read())
                 self.params[key] = sha1
예제 #40
0
    def _read_sites_from_exposure(self):
        """Read the set of sites to compute from the exposure file specified
        in the job definition."""

        sites = []
        path = os.path.join(self.base_path, self[EXPOSURE])
        reader = exposure.ExposurePortfolioFile(path)
        constraint = self.region
        if not constraint:
            constraint = AlwaysTrueConstraint()
        else:
            LOG.debug("Constraining exposure parsing to %s" %
                      constraint.polygon)
        for asset_data in reader.filter(constraint):
            sites.append(asset_data[0])

        return sites
    def from_kvs(job_id):
        """Return an aggregate curve using the GMFs and assets
        stored in the underlying kvs system."""
        
        vuln_model = vulnerability.load_vuln_model_from_kvs(job_id)
        aggregate_curve = AggregateLossCurve(vuln_model)
        
        client = kvs.get_client(binary=False)
        gmfs_keys = client.keys("%s*%s*" % (job_id, kvs.tokens.GMF_KEY_TOKEN))
        LOG.debug("Found %s stored GMFs..." % len(gmfs_keys))

        for gmfs_key in gmfs_keys: # O(2*n)
            asset = _asset_for_gmfs(job_id, gmfs_key)
            gmfs = kvs.get_value_json_decoded(gmfs_key)
            aggregate_curve.append(gmfs, asset)
        
        return aggregate_curve
예제 #42
0
 def _slurp_files(self):
     """Read referenced files and write them into kvs, keyed on their
     sha1s."""
     kvs_client = kvs.get_client()
     if self.base_path is None:
         LOG.debug("Can't slurp files without a base path, homie...")
         return
     for key, val in self.params.items():
         if key[-5:] == '_FILE':
             path = os.path.join(self.base_path, val)
             with open(path) as data_file:
                 LOG.debug("Slurping %s" % path)
                 blob = data_file.read()
                 file_key = kvs.tokens.generate_blob_key(self.job_id, blob)
                 kvs_client.set(file_key, blob)
                 self.params[key] = file_key
                 self.params[key + "_PATH"] = path
예제 #43
0
    def safe_interpolator(poe):
        """
        Return the interpolated IML, limiting the value between the minimum and
        maximum IMLs of the original points describing the curve.
        """
        if poe > poes[-1]:
            LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
                "using maximum PoE value pair, PoE: %s, IML: %s, at site %s"
                % (poe, poes[-1], imls[-1], site))
            return imls[-1]

        if poe < poes[0]:
            LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
                "using minimum PoE value pair, PoE: %s, IML: %s, at site %s"
                % (poe, poes[0], imls[0], site))
            return imls[0]

        return math.exp(interpolator(poe))
예제 #44
0
def compute_mean_hazard_maps(job_id, sites, imls, poes):
    """Compute mean hazard maps using as input all the
    pre computed mean hazard curves.
    """

    LOG.debug("[MEAN_HAZARD_MAPS] List of POEs is %s" % poes)

    keys = []
    for site in sites:
        mean_poes = kvs.get_value_json_decoded(
            kvs.tokens.mean_hazard_curve_key(job_id, site))
        interpolate = build_interpolator(mean_poes, imls, site)

        for poe in poes:
            key = kvs.tokens.mean_hazard_map_key(job_id, site, poe)
            keys.append(key)

            kvs.set_value_json_encoded(key, interpolate(poe))

    return keys
예제 #45
0
    def launch(self):
        """ Based on the behaviour specified in the configuration, mix in the
        correct behaviour for the tasks and then execute them.
        """
        output_dir = os.path.join(self.base_path, self['OUTPUT_DIR'])
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        results = []
        self._partition()
        for (key, mixin) in Mixin.ordered_mixins():
            if key.upper() not in self.sections:
                continue

            with Mixin(self, mixin, key=key):
                # The mixin defines a preload decorator to handle the needed
                # data for the tasks and decorates _execute(). the mixin's
                # _execute() method calls the expected tasks.
                LOG.debug("Job %s Launching %s for %s" % (self.id, mixin, key))
                results.extend(self.execute())

        return results
예제 #46
0
def compute_quantile_hazard_curves(job_id, sites, realizations, quantiles):
    """Compute a quantile hazard curve for each site in the list
    using as input all the pre-computed curves for different realizations.
    """

    LOG.debug("[QUANTILE_HAZARD_CURVES] List of quantiles is %s" % quantiles)

    keys = []
    for site in sites:
        poes = poes_at(job_id, site, realizations)

        for quantile in quantiles:
            quantile_poes = compute_quantile_curve(poes, quantile)

            key = kvs.tokens.quantile_hazard_curve_key(
                    job_id, site, quantile)
            keys.append(key)

            kvs.set_value_json_encoded(key, quantile_poes)

    return keys
    def append(self, gmfs, asset):
        """Add the losses distribution identified by the given GMFs
        and asset to the set used to compute the aggregate curve."""

        if self.empty:
            self._initialize_parameters(gmfs)

        assert gmfs["TimeSpan"] == self._time_span
        assert gmfs["TSES"] == self._tses
        assert len(gmfs["IMLs"]) == self._gmfs_length

        if asset["vulnerabilityFunctionReference"] in self.vuln_model:
            loss_ratios = compute_loss_ratios(self.vuln_model[
                    asset["vulnerabilityFunctionReference"]], gmfs,
                    self.epsilon_provider, asset)

            self.distribution.append(loss_ratios * asset["assetValue"])
        else:
            LOG.debug("Unknown vulnerability function %s, asset %s will " \
                    "not be included in the aggregate computation"
                    % (asset["vulnerabilityFunctionReference"],
                    asset["assetID"]))
    def add(self, ground_motion_field_set, asset):
        """Compute the losses for the given ground motion field set, and
        sum those to the current sum of the losses.

        If the asset refers to a vulnerability function that is not
        supported by the vulnerability model, the distribution
        of the losses is discarded.

        :param ground_motion_field_set: the set of ground motion
            fields used to compute the loss ratios.
        :type ground_motion_field_set: :py:class:`dict` with the following
            keys:
            **IMLs** - tuple of ground motion fields (float)
        :param asset: the asset used to compute the loss ratios and losses.
        :type asset: :py:class:`dict` as provided by
            :py:class:`openquake.parser.exposure.ExposurePortfolioFile`
        """

        if asset["vulnerabilityFunctionReference"] not in self.vuln_model:
            LOG.debug("Unknown vulnerability function %s, asset %s will " \
                      "not be included in the aggregate computation"
                      % (asset["vulnerabilityFunctionReference"],
                      asset["assetID"]))

            return

        vuln_function = self.vuln_model[
            asset["vulnerabilityFunctionReference"]]

        loss_ratios = self.lr_calculator(
            vuln_function, ground_motion_field_set,
            self.epsilon_provider, asset)

        losses = numpy.array(loss_ratios) * asset["assetValue"]

        self.sum_losses(losses)
예제 #49
0
def compute_quantile_hazard_maps(job_id, sites, quantiles, imls, poes):
    """Compute quantile hazard maps using as input all the
    pre computed quantile hazard curves.
    """

    LOG.debug("[QUANTILE_HAZARD_MAPS] List of POEs is %s" % poes)
    LOG.debug("[QUANTILE_HAZARD_MAPS] List of quantiles is %s" % quantiles)

    keys = []
    for quantile in quantiles:
        for site in sites:
            quantile_poes = kvs.get_value_json_decoded(
                kvs.tokens.quantile_hazard_curve_key(job_id, site, quantile))

            interpolate = build_interpolator(quantile_poes, imls, site)

            for poe in poes:
                key = kvs.tokens.quantile_hazard_map_key(
                        job_id, site, poe, quantile)
                keys.append(key)

                kvs.set_value_json_encoded(key, interpolate(poe))

    return keys
예제 #50
0
def compute_quantile_hazard_maps(job):
    """Compute quantile hazard maps using as input all the
    pre computed quantile hazard curves.

    The POES_HAZARD_MAPS parameter in the configuration file specifies
    all the values used in the computation.
    """

    quantiles = _extract_values_from_config(job, QUANTILE_PARAM_NAME)
    poes = _extract_values_from_config(job, POES_PARAM_NAME)

    LOG.debug("[QUANTILE_HAZARD_MAPS] List of POEs is %s" % poes)
    LOG.debug("[QUANTILE_HAZARD_MAPS] List of quantiles is %s" % quantiles)

    keys = []
    for quantile in quantiles:
        # get all the pre computed quantile curves
        pattern = "%s*%s*%s" % (kvs.tokens.QUANTILE_HAZARD_CURVE_KEY_TOKEN,
                job.id, quantile)

        quantile_curves = kvs.mget_decoded(pattern)

        LOG.debug("[QUANTILE_HAZARD_MAPS] Found %s pre computed " \
                "quantile curves for quantile %s"
                % (len(quantile_curves), quantile))

        for poe in poes:
            for quantile_curve in quantile_curves:
                site = shapes.Site(quantile_curve["site_lon"],
                                   quantile_curve["site_lat"])

                key = kvs.tokens.quantile_hazard_map_key(
                        job.id, site, poe, quantile)
                keys.append(key)

                _store_iml_for(quantile_curve, key, job, poe)

    return keys