示例#1
0
    def get_hazard_job(self):
        try:
            job = models.JobParam.objects.filter(
                name='description',
                value__contains=self.hazard_calculation_fixture,
                job__status="complete").latest('id').job
        except ObjectDoesNotExist:
            warnings.warn("Computing Hazard input from scratch")
            job = helpers.run_job(
                self._test_path('job_haz.ini'))
            self.assertEqual('complete', job.status)
        else:
            warnings.warn("Using existing Hazard input")

        if self.save_load:
            # Close the opened transactions
            saved_calculation = save_hazards.main(job.id)

            # FIXME Here on, to avoid deadlocks due to stale
            # transactions, we commit all the opened transactions. We
            # should find who is responsible for the eventual opened
            # transaction
            connection = models.getcursor('job_init').connection
            if connection is not None:
                connection.commit()

            [load_calculation] = load_hazards.hazard_load(
                models.getcursor('admin').connection, saved_calculation)
            return models.OqJob.objects.get(pk=load_calculation)
        else:
            return job
示例#2
0
    def get_hazard_job(self):
        if not self._get_queryset().exists():
            warnings.warn("Computing Hazard input from scratch")
            job = helpers.run_job(
                self._test_path('job_haz.ini'))
            self.assertEqual('complete', job.status)
        else:
            warnings.warn("Using existing Hazard input")
            job = self._get_queryset().latest('oqjob__last_update').oqjob

        if self.save_load:
            # Close the opened transactions
            saved_calculation = save_hazards.main(job.hazard_calculation.id)

            # FIXME Here on, to avoid deadlocks due to stale
            # transactions, we commit all the opened transactions. We
            # should find who is responsible for the eventual opened
            # transaction
            connection = models.getcursor('job_init').connection
            if connection is not None:
                connection.commit()

            [load_calculation] = load_hazards.hazard_load(
                models.getcursor('admin').connection, saved_calculation)
            return models.OqJob.objects.get(
                hazard_calculation__id=load_calculation)
        else:
            return job
示例#3
0
    def get_hazard_job(self):
        try:
            job = models.JobParam.objects.filter(
                name='description',
                value__contains=self.hazard_calculation_fixture,
                job__status="complete").latest('id').job
        except ObjectDoesNotExist:
            warnings.warn("Computing Hazard input from scratch")
            job = helpers.run_job(
                self._test_path('job_haz.ini')).job
            self.assertEqual('complete', job.status)
        else:
            warnings.warn("Using existing Hazard input")

        if self.save_load:
            # Close the opened transactions
            saved_calculation = save_hazards.main(job.id)

            # FIXME Here on, to avoid deadlocks due to stale
            # transactions, we commit all the opened transactions. We
            # should find who is responsible for the eventual opened
            # transaction
            connection = models.getcursor('job_init').connection
            if connection is not None:
                connection.commit()

            [load_calculation] = load_hazards.hazard_load(
                models.getcursor('admin').connection, saved_calculation)
            return models.OqJob.objects.get(pk=load_calculation)
        else:
            return job
示例#4
0
    def get_by_site(self, site, hazard_id, imls):
        """
        :param site:
            An instance of :class:`django.contrib.gis.geos.point.Point`
            corresponding to the location of an asset.
        """
        if site.wkt in self._cache:
            return self._cache[site.wkt]

        cursor = models.getcursor('job_init')

        query = """
        SELECT
            hzrdr.hazard_curve_data.poes,
            min(ST_Distance(location::geography,
                            ST_GeographyFromText(%s), false))
                AS min_distance
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s
        GROUP BY id
        ORDER BY min_distance
        LIMIT 1;"""

        args = (site.wkt, hazard_id)

        cursor.execute(query, args)
        poes, distance = cursor.fetchone()

        hazard = zip(imls, poes)

        self._cache[site.wkt] = (hazard, distance)

        return hazard, distance
示例#5
0
    def __init__(self, taxonomy, rc, epsilon_sampling=0):
        self.taxonomy = taxonomy
        self.rc = rc
        self.epsilon_sampling = epsilon_sampling
        self.hc = rc.get_hazard_calculation()
        max_dist = rc.best_maximum_distance * 1000  # km to meters
        cursor = models.getcursor('job_init')

        hazard_exposure = models.extract_from([self.hc.oqjob], 'exposuremodel')
        if self.rc.exposure_model is hazard_exposure:
            # no need of geospatial queries, just join on the location
            self.assoc_query = cursor.mogrify("""\
WITH assocs AS (
  SELECT %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON exp.site::text = hsite.location::text
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (rc.oqjob.id, self.hc.id,
                          rc.exposure_model.id, taxonomy,
                          rc.region_constraint.wkt))
        else:
            # associate each asset to the closest hazard site
            self.assoc_query = cursor.mogrify("""\
WITH assocs AS (
  SELECT DISTINCT ON (exp.id) %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON ST_DWithin(exp.site, hsite.location, %s)
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
  ORDER BY exp.id, ST_Distance(exp.site, hsite.location, false)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (rc.oqjob.id, max_dist, self.hc.id,
                          rc.exposure_model.id, taxonomy,
                          rc.region_constraint.wkt))

        # insert the associations for the current taxonomy
        with transaction.commit_on_success(using='job_init'):
            cursor.execute(self.assoc_query)

        # now read the associations just inserted
        self.asset_sites = models.AssetSite.objects.filter(
            job=rc.oqjob, asset__taxonomy=taxonomy)
        if not self.asset_sites:
            raise AssetSiteAssociationError(
                'Could not associated any asset of taxonomy %s to '
                'hazard sites within the distance of %s km'
                % (taxonomy, self.rc.best_maximum_distance))

        self.asset_ids = [a.asset_id for a in self.asset_sites]
        self.site_ids = [a.site_id for a in self.asset_sites]
        self.rupture_ids = {}
        self.epsilons_shape = {}
示例#6
0
def job_stats(job):
    """
    A context manager saving information such as the number of sites
    and the disk space occupation in the job_stats table. The information
    is saved at the end of the job, even if the job fails.
    """
    dbname = DATABASES['default']['NAME']
    curs = models.getcursor('job_init')
    curs.execute("select pg_database_size(%s)", (dbname, ))
    dbsize = curs.fetchall()[0][0]

    js = job.jobstats
    try:
        yield
    except:
        conn = django_db.connections['job_init']
        if conn.is_dirty():
            conn.rollback()
        raise
    finally:
        job.is_running = False
        job.save()

        # save job stats
        curs.execute("select pg_database_size(%s)", (dbname, ))
        new_dbsize = curs.fetchall()[0][0]
        js.disk_space = new_dbsize - dbsize
        js.stop_time = datetime.utcnow()
        js.save()

        cleanup_after_job(job, terminate=TERMINATE)
示例#7
0
    def _get_data(self, ho):
        # extract the poes for each site from the given hazard output
        imt_type, sa_period, sa_damping = from_string(self.imt)
        oc = ho.output_container
        if oc.output.output_type == 'hazard_curve_multi':
            oc = models.HazardCurve.objects.get(
                output__oq_job=oc.output.oq_job,
                output__output_type='hazard_curve',
                statistics=oc.statistics,
                lt_realization=oc.lt_realization,
                imt=imt_type,
                sa_period=sa_period,
                sa_damping=sa_damping)

        cursor = models.getcursor('job_init')
        query = """\
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s AND location = %s
        """
        all_curves = []
        for site_id in self.site_ids:
            location = models.HazardSite.objects.get(pk=site_id).location
            cursor.execute(query, (oc.id, 'SRID=4326; ' + location.wkt))
            poes = cursor.fetchall()[0][0]
            all_curves.append(poes)
        return all_curves
示例#8
0
文件: engine.py 项目: julgp/oq-engine
def job_stats(job):
    """
    A context manager saving information such as the number of sites
    and the disk space occupation in the job_stats table. The information
    is saved at the end of the job, even if the job fails.
    """
    dbname = DATABASES['default']['NAME']
    curs = models.getcursor('job_init')
    curs.execute("select pg_database_size(%s)", (dbname,))
    dbsize = curs.fetchall()[0][0]

    js = job.jobstats
    job.is_running = True
    job.save()
    try:
        yield
    except:
        conn = django_db.connections['job_init']
        if conn.is_dirty():
            conn.rollback()
        raise
    finally:
        job.is_running = False
        job.save()

        # save job stats
        curs.execute("select pg_database_size(%s)", (dbname,))
        new_dbsize = curs.fetchall()[0][0]
        js.disk_space = new_dbsize - dbsize
        js.stop_time = datetime.utcnow()
        js.save()

        cleanup_after_job(job, terminate=TERMINATE)
示例#9
0
def job_stats(job):
    """
    A context manager saving information such as the number of sites
    and the disk space occupation in the job_stats table. The information
    is saved at the end of the job, even if the job fails.
    """
    dbname = DATABASES["default"]["NAME"]
    curs = models.getcursor("job_init")
    curs.execute("select pg_database_size(%s)", (dbname,))
    dbsize = curs.fetchall()[0][0]

    # create job stats, which implicitly records the start time for the job
    js = models.JobStats.objects.create(oq_job=job)
    job.is_running = True
    job.save()
    try:
        yield
    finally:
        job.is_running = False
        job.save()

        # save job stats
        curs.execute("select pg_database_size(%s)", (dbname,))
        new_dbsize = curs.fetchall()[0][0]
        js.disk_space = new_dbsize - dbsize
        js.stop_time = datetime.utcnow()
        js.save()

        cleanup_after_job(job, terminate=TERMINATE)
示例#10
0
    def _get_data(self, ho):
        # extract the poes for each site from the given hazard output
        imt_type, sa_period, sa_damping = from_string(self.imt)
        oc = ho.output_container
        if oc.output.output_type == 'hazard_curve':
            imls = oc.imls
        elif oc.output.output_type == 'hazard_curve_multi':
            oc = models.HazardCurve.objects.get(
                output__oq_job=oc.output.oq_job,
                output__output_type='hazard_curve',
                statistics=oc.statistics,
                lt_realization=oc.lt_realization,
                imt=imt_type,
                sa_period=sa_period,
                sa_damping=sa_damping)
            imls = oc.imls

        cursor = models.getcursor('job_init')
        query = """\
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s AND location = %s
        """
        all_curves = []
        for site_id in self.site_ids:
            location = models.HazardSite.objects.get(pk=site_id).location
            cursor.execute(query, (oc.id, 'SRID=4326; ' + location.wkt))
            poes = cursor.fetchall()[0][0]
            all_curves.append(zip(imls, poes))
        return all_curves
示例#11
0
def get_actual_gmfs(job):
    """
    Returns the GMFs in the database as a list of pairs [(rlz_path, values)].
    """
    cursor = models.getcursor('job_init')
    cursor.execute(GET_GMF_OUTPUTS % job.id)
    actual_gmfs = [('_'.join(k), scientificformat(sorted(v), '%8.4E'))
                   for k, v in cursor.fetchall()]
    return actual_gmfs
示例#12
0
def get_actual_gmfs(job):
    """
    Returns the GMFs in the database as a list of pairs [(rlz_path, values)].
    """
    cursor = models.getcursor('job_init')
    cursor.execute(GET_GMF_OUTPUTS % job.id)
    actual_gmfs = [('_'.join(k), scientificformat(sorted(v), '%8.4E'))
                   for k, v in cursor.fetchall()]
    return actual_gmfs
示例#13
0
    def __init__(self, taxonomy, calc):
        self.exposure_model = calc.exposure_model
        self.hazard_outputs = calc.get_hazard_outputs()
        self.taxonomy = taxonomy
        self.calc = calc
        self.oqparam = models.OqJob.objects.get(
            pk=calc.oqparam.hazard_calculation_id)
        self.calculation_mode = self.calc.oqparam.calculation_mode
        self.number_of_ground_motion_fields = self.oqparam.get_param(
            'number_of_ground_motion_fields', 0)
        max_dist = calc.best_maximum_distance * 1000  # km to meters
        self.cursor = models.getcursor('job_init')

        hazard_exposure = models.extract_from([self.oqparam], 'exposuremodel')
        if self.exposure_model and hazard_exposure and \
           self.exposure_model.id == hazard_exposure.id:
            # no need of geospatial queries, just join on the location
            self.assoc_query = self.cursor.mogrify(
                """\
WITH assocs AS (
  SELECT %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON exp.site::text = hsite.location::text
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""",
                (self.calc.job.id, self.oqparam.id, self.exposure_model.id,
                 taxonomy, self.calc.oqparam.region_constraint))
        else:
            # associate each asset to the closest hazard site
            self.assoc_query = self.cursor.mogrify(
                """\
WITH assocs AS (
  SELECT DISTINCT ON (exp.id) %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON ST_DWithin(exp.site, hsite.location, %s)
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
  ORDER BY exp.id, ST_Distance(exp.site, hsite.location, false)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (self.calc.job.id, max_dist, self.oqparam.id,
                          self.exposure_model.id, taxonomy,
                          self.calc.oqparam.region_constraint))
        self.num_assets = 0
        self._rupture_ids = {}
        self.epsilons_shape = {}
示例#14
0
    def __init__(self, taxonomy, calc):
        self.exposure_model = calc.exposure_model
        self.hazard_outputs = calc.get_hazard_outputs()
        self.taxonomy = taxonomy
        self.calc = calc
        self.oqparam = models.OqJob.objects.get(
            pk=calc.oqparam.hazard_calculation_id)
        self.calculation_mode = self.calc.oqparam.calculation_mode
        self.number_of_ground_motion_fields = self.oqparam.get_param(
            'number_of_ground_motion_fields', 0)
        max_dist = calc.best_maximum_distance * 1000  # km to meters
        self.cursor = models.getcursor('job_init')

        hazard_exposure = models.extract_from([self.oqparam], 'exposuremodel')
        if self.exposure_model and hazard_exposure and \
           self.exposure_model.id == hazard_exposure.id:
            # no need of geospatial queries, just join on the location
            self.assoc_query = self.cursor.mogrify("""\
WITH assocs AS (
  SELECT %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN (SELECT id, lon, lat FROM hzrdi.hazard_site
  WHERE hazard_calculation_id = %s
  AND ST_Covers(ST_GeometryFromText(%s), ST_MakePoint(lon, lat))) AS hsite
  ON ST_X(exp.site::GEOMETRY) = hsite.lon
  AND ST_Y(exp.site::GEOMETRY) = hsite.lat
  WHERE exposure_model_id = %s AND taxonomy=%s
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (self.calc.job.id, self.oqparam.id,
                          self.calc.oqparam.region_constraint,
                          self.exposure_model.id, taxonomy))
        else:
            # associate each asset to the closest hazard site
            self.assoc_query = self.cursor.mogrify("""\
WITH assocs AS (
  SELECT DISTINCT ON (exp.id) %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN (SELECT id, lon, lat FROM hzrdi.hazard_site
  WHERE hazard_calculation_id = %s
  AND ST_Covers(ST_GeometryFromText(%s), ST_MakePoint(lon, lat))) AS hsite
  ON ST_DWithin(exp.site, ST_MakePoint(hsite.lon, hsite.lat)::geography, %s)
  WHERE exposure_model_id = %s AND taxonomy=%s
  ORDER BY exp.id, ST_Distance(
  exp.site, ST_MakePoint(hsite.lon, hsite.lat)::geography, false)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (self.calc.job.id, self.oqparam.id,
                          self.calc.oqparam.region_constraint, max_dist,
                          self.exposure_model.id, taxonomy))
        self.num_assets = 0
        self._rupture_ids = {}
        self.epsilons_shape = {}
示例#15
0
文件: test.py 项目: julgp/oq-engine
 def test(self):
     cfg = os.path.join(os.path.dirname(__file__), 'job.ini')
     job = self.run_hazard(cfg)
     cursor = models.getcursor('job_init')
     cursor.execute(GET_GMF_OUTPUTS % job.id)
     actual_gmfs = cursor.fetchall()
     self.assertEqual(len(actual_gmfs), len(EXPECTED_GMFS))
     for (actual_path, actual_gmf), (expected_path, expected_gmf) in zip(
             actual_gmfs, EXPECTED_GMFS):
         self.assertEqual(actual_path, expected_path)
         self.assertEqual(len(actual_gmf), len(expected_gmf))
         numpy.testing.assert_almost_equal(
             sorted(actual_gmf), sorted(expected_gmf))
示例#16
0
def main(hazard_calculation_id, outdir=None):
    """
    Dump a hazard_calculation and its relative outputs
    """
    logging.basicConfig(level=logging.WARN)

    assert models.OqJob.objects.filter(
        pk=hazard_calculation_id).exists(), ("The provided hazard calculation "
                                             "does not exist")

    hc = HazardDumper(models.getcursor('admin').connection, outdir)
    hc.dump(hazard_calculation_id)
    log.info('Written %s' % hc.outdir)
    return hc.outdir
示例#17
0
def main(hazard_calculation_id, outdir=None):
    """
    Dump a hazard_calculation and its relative outputs
    """
    logging.basicConfig(level=logging.WARN)

    assert models.HazardCalculation.objects.filter(
        pk=hazard_calculation_id).exists(), ("The provided hazard calculation "
                                             "does not exist")

    hc = HazardDumper(models.getcursor('admin').connection, outdir)
    hc.dump(hazard_calculation_id)
    log.info('Written %s' % hc.outdir)
    return hc.outdir
示例#18
0
    def __init__(self, taxonomy, rc):
        self.hazard_outputs = rc.hazard_outputs()
        self.taxonomy = taxonomy
        self.rc = rc
        self.hc = rc.hazard_calculation
        self.calculation_mode = self.rc.oqjob.get_param('calculation_mode')
        self.number_of_ground_motion_fields = self.hc.get_param(
            'number_of_ground_motion_fields', 0)
        max_dist = rc.best_maximum_distance * 1000  # km to meters
        self.cursor = models.getcursor('job_init')

        hazard_exposure = models.extract_from([self.hc], 'exposuremodel')
        if self.rc.exposure_model is hazard_exposure:
            # no need of geospatial queries, just join on the location
            self.assoc_query = self.cursor.mogrify("""\
WITH assocs AS (
  SELECT %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON exp.site::text = hsite.location::text
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (rc.oqjob.id, self.hc.id,
                          rc.exposure_model.id, taxonomy,
                          rc.region_constraint))
        else:
            # associate each asset to the closest hazard site
            self.assoc_query = self.cursor.mogrify("""\
WITH assocs AS (
  SELECT DISTINCT ON (exp.id) %s, exp.id, hsite.id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON ST_DWithin(exp.site, hsite.location, %s)
  WHERE hsite.hazard_calculation_id = %s
  AND exposure_model_id = %s AND taxonomy=%s
  AND ST_COVERS(ST_GeographyFromText(%s), exp.site)
  ORDER BY exp.id, ST_Distance(exp.site, hsite.location, false)
)
INSERT INTO riskr.asset_site (job_id, asset_id, site_id)
SELECT * FROM assocs""", (rc.oqjob.id, max_dist, self.hc.id,
                          rc.exposure_model.id, taxonomy,
                          rc.region_constraint))

        self.num_assets = 0
        self._rupture_ids = {}
        self.epsilons_shape = {}
示例#19
0
    def get_by_site(self, site, hazard_id):
        """
        :param site:
            An instance of :class:`django.contrib.gis.geos.point.Point`
            corresponding to the location of an asset.
        """
        cursor = models.getcursor('job_init')

        query = """\
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s AND location = %s
        """
        cursor.execute(query, (hazard_id, 'SRID=4326; ' + site.location.wkt))
        return cursor.fetchone()
示例#20
0
 def ruptures():
     cursor = models.getcursor('job_init')
     # a rupture "consumes" 8Kb. This limit actually
     # control the amount of memory used to store them
     limit = 10000
     offsets = range(0, count, limit)
     query = """
             SELECT rup.rupture FROM hzrdr.ses_rupture AS rup
             JOIN hzrdr.ses AS ses ON ses.id = rup.ses_id
             WHERE ses.ses_collection_id = %s
             ORDER BY rup.id LIMIT %s OFFSET %s"""
     for offset in offsets:
         cursor.execute(query, (hazard_output.id, limit, offset))
         for (rupture_data,) in cursor.fetchall():
             yield pickle.loads(str(rupture_data))
示例#21
0
 def ruptures():
     cursor = models.getcursor('job_init')
     # a rupture "consumes" 8Kb. This limit actually
     # control the amount of memory used to store them
     limit = 10000
     offsets = range(0, count, limit)
     query = """
             SELECT rup.rupture FROM hzrdr.ses_rupture AS rup
             JOIN hzrdr.ses AS ses ON ses.id = rup.ses_id
             WHERE ses.ses_collection_id = %s
             ORDER BY rup.tag LIMIT %s OFFSET %s"""
     for offset in offsets:
         cursor.execute(query, (hazard_output.id, limit, offset))
         for (rupture_data, ) in cursor.fetchall():
             yield pickle.loads(str(rupture_data))
示例#22
0
def job_stats(job):
    """
    A context manager saving information such as the number of sites
    and the disk space occupation in the job_stats table. The information
    is saved at the end of the job, even if the job fails.
    """
    dbname = DATABASES['default']['NAME']
    curs = models.getcursor('job_init')
    curs.execute("select pg_database_size(%s)", (dbname,))
    dbsize = curs.fetchall()[0][0]
    try:
        yield
    finally:
        curs.execute("select pg_database_size(%s)", (dbname,))
        new_dbsize = curs.fetchall()[0][0]
        save_job_stats(job, new_dbsize - dbsize)
示例#23
0
def del_calc(job_id):
    """
    Delete a calculation and all associated outputs.

    :param job_id:
        ID of a :class:`~openquake.engine.db.models.OqJob`.
    """
    try:
        job = models.OqJob.objects.get(id=job_id)
    except exceptions.ObjectDoesNotExist:
        raise RuntimeError('Unable to delete hazard calculation: '
                           'ID=%s does not exist' % job_id)

    user = getpass.getuser()
    if job.user_name == user:
        # we are allowed to delete this

        # but first, check if any risk calculations are referencing any of our
        # outputs, or the hazard calculation itself
        msg = UNABLE_TO_DEL_HC_FMT % (
            'The following risk calculations are referencing this hazard'
            ' calculation: %s')

        assoc_outputs = models.OqJob.objects.filter(hazard_calculation=job)
        if assoc_outputs.count() > 0:
            raise RuntimeError(
                msg % ', '.join(str(x.id) for x in assoc_outputs))

        # No risk calculation are referencing what we want to delete.
        # Carry on with the deletion. Notice that we cannot use job.delete()
        # directly because Django is so stupid that it reads from the database
        # all the records to delete before deleting them: thus, it runs out
        # of memory for large calculations
        curs = models.getcursor('admin')
        curs.execute('DELETE FROM uiapi.oq_job WHERE id=%s', (job_id,))
    else:
        # this doesn't belong to the current user
        raise RuntimeError(UNABLE_TO_DEL_HC_FMT % 'Access denied')
    try:
        os.remove(job.ds_calc_dir + '.hdf5')
    except:
        pass
    else:
        print('Removed %s' % job.ds_calc_dir + '.hdf5')
示例#24
0
def job_stats(job):
    """
    A context manager saving information such as the number of sites
    and the disk space occupation in the job_stats table. The information
    is saved at the end of the job, even if the job fails.
    """
    dbname = DATABASES['default']['NAME']
    curs = models.getcursor('job_init')
    curs.execute("select pg_database_size(%s)", (dbname,))
    dbsize = curs.fetchall()[0][0]

    js = job.jobstats
    try:
        yield
    finally:
        tb = traceback.format_exc()  # get the traceback of the error, if any
        job.is_running = False
        if tb != 'None\n':
            # rollback the transactions; unfortunately, for mysterious reasons,
            # this is not enough and an OperationError may still show up in the
            # finalization phase when forks are involved
            for conn in django_db.connections.all():
                conn.rollback()
        # try to save the job stats on the database and then clean up;
        # if there was an error in the calculation, this part may fail;
        # in such a situation, we simply log the cleanup error without
        # taking further action, so that the real error can propagate
        try:
            job.save()
            curs.execute("select pg_database_size(%s)", (dbname,))
            new_dbsize = curs.fetchall()[0][0]
            js.disk_space = new_dbsize - dbsize
            js.stop_time = datetime.utcnow()
            js.save()
            cleanup_after_job(job, terminate=TERMINATE)
        except:
            # log the non-interesting error
            logs.LOG.error('finalizing', exc_info=True)

        # log the real error, if any
        if tb != 'None\n':
            logs.LOG.critical(tb)
示例#25
0
def check_script(upgrade, dry_run=True, debug=True):
    """
    An utility to debug upgrade scripts written in Python

    :param upgrade: upgrade procedure
    :param dry_run: if True, do not change the database
    :param debug: if True, print the queries which are executed
    """
    from openquake.engine.db.models import getcursor
    conn = WrappedConnection(getcursor('admin').connection, debug=debug)
    try:
        upgrade(conn)
    except:
        conn.rollback()
        raise
    else:
        if dry_run:
            conn.rollback()
        else:
            conn.commit()
示例#26
0
def check_script(upgrade, dry_run=True, debug=True):
    """
    An utility to debug upgrade scripts written in Python

    :param upgrade: upgrade procedure
    :param dry_run: if True, do not change the database
    :param debug: if True, print the queries which are executed
    """
    from openquake.engine.db.models import getcursor
    conn = WrappedConnection(getcursor('admin').connection, debug=debug)
    try:
        upgrade(conn)
    except:
        conn.rollback()
        raise
    else:
        if dry_run:
            conn.rollback()
        else:
            conn.commit()
示例#27
0
    def assets_gen(self, hazard_output):
        """
        Iterator yielding site_id, assets.
        """
        cursor = models.getcursor('job_init')
        # NB: the ``distinct ON (exposure_data.id)`` combined with the
        # ``ORDER BY ST_Distance`` does the job to select the closest site.
        # The other ORDER BY are there to help debugging, it is always
        # nice to have numbers coming in a fixed order. They have an
        # insignificant effect on the performance.
        query = """
SELECT site_id, array_agg(asset_id ORDER BY asset_id) AS asset_ids FROM (
  SELECT DISTINCT ON (exp.id) exp.id AS asset_id, hsite.id AS site_id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON ST_DWithin(exp.site, hsite.location, %s)
  WHERE hsite.hazard_calculation_id = %s
  AND taxonomy = %s AND exposure_model_id = %s AND exp.site && %s
  ORDER BY exp.id, ST_Distance(exp.site, hsite.location, false)) AS x
GROUP BY site_id ORDER BY site_id;
   """
        args = (self.max_distance * KILOMETERS_TO_METERS,
                hazard_output.output.oq_job.hazard_calculation.id,
                self.assets[0].taxonomy, self.assets[0].exposure_model_id,
                self._assets_mesh.get_convex_hull().wkt)
        cursor.execute(query, args)
        sites_assets = cursor.fetchall()
        if not sites_assets:
            logs.LOG.warn('No close site found for %d assets of taxonomy %s',
                          len(self.assets), self.assets[0].taxonomy)
        for site_id, asset_ids in sites_assets:
            assets = [
                self.asset_dict[i] for i in asset_ids if i in self.asset_dict
            ]
            # notice the "if i in self.asset_dict": in principle, it should
            # not be necessary; in practice, the query may returns spurious
            # assets not in the initial set; this is why we are filtering
            # the spurious assets; it is a mysterious behaviour of PostGIS
            if assets:
                yield site_id, assets
示例#28
0
    def __iter__(self):
        """
        Iterator yielding site_id, assets.
        """
        cursor = models.getcursor('job_init')
        # NB: the ``distinct ON (exposure_data.id)`` combined with the
        # ``ORDER BY ST_Distance`` does the job to select the closest site.
        # The other ORDER BY are there to help debugging, it is always
        # nice to have numbers coming in a fixed order. They have an
        # insignificant effect on the performance.
        query = """
SELECT site_id, array_agg(asset_id ORDER BY asset_id) AS asset_ids FROM (
  SELECT DISTINCT ON (exp.id) exp.id AS asset_id, hsite.id AS site_id
  FROM riski.exposure_data AS exp
  JOIN hzrdi.hazard_site AS hsite
  ON ST_DWithin(exp.site, hsite.location, %s)
  WHERE hsite.hazard_calculation_id = %s
  AND taxonomy = %s AND exposure_model_id = %s AND exp.site && %s
  ORDER BY exp.id, ST_Distance(exp.site, hsite.location, false)) AS x
GROUP BY site_id ORDER BY site_id;
   """
        args = (self.max_distance * KILOMETERS_TO_METERS,
                self.hazard_output.oq_job.hazard_calculation.id,
                self.assets[0].taxonomy,
                self.assets[0].exposure_model_id,
                self._assets_mesh.get_convex_hull().wkt)
        cursor.execute(query, args)
        sites_assets = cursor.fetchall()
        if not sites_assets:
            logs.LOG.warn('No close site found for %d assets of taxonomy %s',
                          len(self.assets), self.assets[0].taxonomy)
        for site_id, asset_ids in sites_assets:
            assets = [self.asset_dict[i] for i in asset_ids
                      if i in self.asset_dict]
            # notice the "if i in self.asset_dict": in principle, it should
            # not be necessary; in practice, the query may returns spurious
            # assets not in the initial set; this is why we are filtering
            # the spurious assets; it is a mysterious behaviour of PostGIS
            if assets:
                yield site_id, assets
示例#29
0
def del_calc(job_id):
    """
    Delete a calculation and all associated outputs.

    :param job_id:
        ID of a :class:`~openquake.engine.db.models.OqJob`.
    """
    try:
        job = models.OqJob.objects.get(id=job_id)
    except exceptions.ObjectDoesNotExist:
        raise RuntimeError('Unable to delete hazard calculation: '
                           'ID=%s does not exist' % job_id)

    user = getpass.getuser()
    if job.user_name == user:
        # we are allowed to delete this

        # but first, check if any risk calculations are referencing any of our
        # outputs, or the hazard calculation itself
        msg = UNABLE_TO_DEL_HC_FMT % (
            'The following risk calculations are referencing this hazard'
            ' calculation: %s')

        assoc_outputs = models.OqJob.objects.filter(hazard_calculation=job)
        if assoc_outputs.count() > 0:
            raise RuntimeError(msg %
                               ', '.join(str(x.id) for x in assoc_outputs))

        # No risk calculation are referencing what we want to delete.
        # Carry on with the deletion. Notice that we cannot use job.delete()
        # directly because Django is so stupid that it reads from the database
        # all the records to delete before deleting them: thus, it runs out
        # of memory for large calculations
        curs = models.getcursor('admin')
        curs.execute('DELETE FROM uiapi.oq_job WHERE id=%s', (job_id, ))
    else:
        # this doesn't belong to the current user
        raise RuntimeError(UNABLE_TO_DEL_HC_FMT % 'Access denied')
示例#30
0
    def get_by_site(self, site, hazard_id):
        """
        :param site:
            An instance of :class:`django.contrib.gis.geos.point.Point`
            corresponding to the location of an asset.
        """
        cursor = models.getcursor('job_init')

        query = """
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s
        AND ST_DWithin(ST_GeographyFromText(%s), location::geography, %s)
        ORDER BY
            ST_Distance(location::geography, ST_GeographyFromText(%s), false)
        LIMIT 1
        """

        args = (hazard_id, site.wkt, self.max_distance * KILOMETERS_TO_METERS,
                site.wkt)

        cursor.execute(query, args)
        return cursor.fetchone()
示例#31
0
 def __init__(self, rc, taxonomy):
     self.rc = rc
     self.taxonomy = taxonomy
     cursor = models.getcursor('job_init')
     query = '''\
 SELECT exp.id AS asset_id, hsite.id AS site_id
 FROM riski.exposure_data AS exp
 JOIN hzrdi.hazard_site AS hsite
 ON exp.site::TEXT=hsite.location::TEXT
 WHERE hsite.hazard_calculation_id = %s
 AND exposure_model_id = %s AND taxonomy=%s
 AND ST_COVERS(ST_GeographyFromText(%s), exp.site)'''
     args = (rc.hazard_calculation.id, rc.exposure_model.id, taxonomy,
             rc.region_constraint.wkt)
     # print cursor.mogrify(query, args) useful when debugging
     cursor.execute(query, args)
     assets_sites = cursor.fetchall()
     if not assets_sites:
         raise AssetSiteAssociationError(
             'Could not associated any asset of taxonomy %s' % taxonomy)
     self.asset_ids, self.site_ids = zip(*assets_sites)
     self.assets = models.ExposureData.objects.get_asset_chunk(
         rc, taxonomy, asset_ids=self.asset_ids)
示例#32
0
 def _get_gmv_dict(self, ho):
     # return a nested dictionary site_id -> {rupture_id: gmv}
     imt_type, sa_period, sa_damping = from_string(self.imt)
     gmf_id = ho.output_container.id
     if sa_period:
         imt_query = 'imt=%s and sa_period=%s and sa_damping=%s'
     else:
         imt_query = 'imt=%s and sa_period is %s and sa_damping is %s'
     gmv_dict = {}  # dict site_id -> {rup_id: gmv}
     cursor = models.getcursor('job_init')
     cursor.execute('select site_id, rupture_ids, gmvs from '
                    'hzrdr.gmf_data where gmf_id=%s and site_id in %s '
                    'and {} order by site_id'.format(imt_query),
                    (gmf_id, tuple(set(self.site_ids)),
                     imt_type, sa_period, sa_damping))
     for sid, group in itertools.groupby(cursor, operator.itemgetter(0)):
         gmvs = []
         ruptures = []
         for site_id, rupture_ids, gmvs_chunk in group:
             gmvs.extend(gmvs_chunk)
             ruptures.extend(rupture_ids)
         gmv_dict[sid] = dict(itertools.izip(ruptures, gmvs))
     return gmv_dict
示例#33
0
    def get_by_site(self, site, hazard_id):
        """
        :param site:
            An instance of :class:`django.contrib.gis.geos.point.Point`
            corresponding to the location of an asset.
        """
        cursor = models.getcursor('job_init')

        query = """
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s
        AND ST_DWithin(ST_GeographyFromText(%s), location::geography, %s)
        ORDER BY
            ST_Distance(location::geography, ST_GeographyFromText(%s), false)
        LIMIT 1
        """

        args = (hazard_id, site.wkt, self.max_distance * KILOMETERS_TO_METERS,
                site.wkt)

        cursor.execute(query, args)
        return cursor.fetchone()
示例#34
0
    def get_data(self, imt):
        """
        Extracts the hazard curves for the given `imt` from the hazard output.

        :param str imt: Intensity Measure Type
        :returns: a list of N curves, each one being a list of pairs (iml, poe)
        """
        imt_type, sa_period, sa_damping = from_string(imt)

        oc = self.hazard_output.output_container
        if oc.output.output_type == 'hazard_curve':
            imls = oc.imls
        elif oc.output.output_type == 'hazard_curve_multi':
            oc = models.HazardCurve.objects.get(
                output__oq_job=oc.output.oq_job,
                output__output_type='hazard_curve',
                statistics=oc.statistics,
                lt_realization=oc.lt_realization,
                imt=imt_type,
                sa_period=sa_period,
                sa_damping=sa_damping)
            imls = oc.imls

        cursor = models.getcursor('job_init')
        query = """\
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s AND location = %s
        """
        all_curves = []
        for site_id in self.site_ids:
            location = models.HazardSite.objects.get(pk=site_id).location
            cursor.execute(query, (oc.id, 'SRID=4326; ' + location.wkt))
            poes = cursor.fetchall()[0][0]
            all_curves.append(zip(imls, poes))
        return all_curves
示例#35
0
 def _get_gmv_dict(self, ho):
     # return a nested dictionary site_id -> {rupture_id: gmv}
     imt_type, sa_period, sa_damping = from_string(self.imt)
     gmf_id = ho.output_container.id
     if sa_period:
         imt_query = 'imt=%s and sa_period=%s and sa_damping=%s'
     else:
         imt_query = 'imt=%s and sa_period is %s and sa_damping is %s'
     gmv_dict = {}  # dict site_id -> {rup_id: gmv}
     cursor = models.getcursor('job_init')
     cursor.execute(
         'select site_id, rupture_ids, gmvs from '
         'hzrdr.gmf_data where gmf_id=%s and site_id in %s '
         'and {} order by site_id'.format(imt_query),
         (gmf_id, tuple(set(
             self.site_ids)), imt_type, sa_period, sa_damping))
     for sid, group in itertools.groupby(cursor, operator.itemgetter(0)):
         gmvs = []
         ruptures = []
         for site_id, rupture_ids, gmvs_chunk in group:
             gmvs.extend(gmvs_chunk)
             ruptures.extend(rupture_ids)
         gmv_dict[sid] = dict(itertools.izip(ruptures, gmvs))
     return gmv_dict
示例#36
0
 def _get_gmv_dict(self, imt_type, sa_period, sa_damping):
     """
     :returns: a dictionary {rupture_id: gmv} for the given site and IMT
     """
     gmf_id = self.hazard_output.output_container.id
     if sa_period:
         imt_query = 'imt=%s and sa_period=%s and sa_damping=%s'
     else:
         imt_query = 'imt=%s and sa_period is %s and sa_damping is %s'
     gmv_dict = {}
     cursor = models.getcursor('job_init')
     cursor.execute('select site_id, rupture_ids, gmvs from '
                    'hzrdr.gmf_data where gmf_id=%s and site_id in %s '
                    'and {} order by site_id'.format(imt_query),
                    (gmf_id, tuple(set(self.site_ids)),
                     imt_type, sa_period, sa_damping))
     for sid, group in itertools.groupby(cursor, operator.itemgetter(0)):
         gmvs = []
         ruptures = []
         for site_id, rupture_ids, gmvs_chunk in group:
             gmvs.extend(gmvs_chunk)
             ruptures.extend(rupture_ids)
         gmv_dict[sid] = dict(itertools.izip(ruptures, gmvs))
     return gmv_dict
示例#37
0
import os
from django.core.wsgi import get_wsgi_application

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openquake.server.settings")

from openquake.engine.db import models
models.getcursor('job_init').execute(
    # cleanup of the flag oq_job.is_running
    'UPDATE uiapi.oq_job SET is_running=false WHERE is_running')

# This application object is used by the development server
# as well as any WSGI server configured to use this file.
application = get_wsgi_application()
示例#38
0
def main():
    arg_parser = set_up_arg_parser()

    args = arg_parser.parse_args()

    exports = args.exports or 'xml,csv'

    if args.version:
        print __version__
        sys.exit(0)

    if args.run or args.run_hazard or args.run_risk:
        # the logging will be configured in engine.py
        pass
    else:
        # configure a basic logging
        logging.basicConfig(level=logging.INFO)

    if args.config_file:
        os.environ[config.OQ_CONFIG_FILE_VAR] = \
            abspath(expanduser(args.config_file))
        config.refresh()

    if args.no_distribute:
        os.environ[openquake.engine.NO_DISTRIBUTE_VAR] = '1'

    if args.make_html_report:
        conn = models.getcursor('admin').connection
        print 'Written', make_report(conn, args.make_html_report)
        sys.exit(0)

    if args.upgrade_db:
        logs.set_level('info')
        conn = models.getcursor('admin').connection
        msg = upgrade_manager.what_if_I_upgrade(conn,
                                                extract_scripts='read_scripts')
        print msg
        if msg.startswith('Your database is already updated'):
            pass
        elif args.yes or confirm('Proceed? (y/n) '):
            upgrade_manager.upgrade_db(conn)
        sys.exit(0)

    if args.version_db:
        conn = models.getcursor('admin').connection
        print upgrade_manager.version_db(conn)
        sys.exit(0)

    if args.what_if_I_upgrade:
        conn = models.getcursor('admin').connection
        print upgrade_manager.what_if_I_upgrade(conn)
        sys.exit(0)

    if args.list_inputs:
        list_inputs(args.list_inputs)

    # hazard or hazard+risk
    elif args.run:
        job_inis = map(expanduser, args.run.split(','))
        if len(job_inis) not in (1, 2):
            sys.exit('%s should be a .ini filename or a pair of filenames '
                     'separated by a comma' % args.run)
        for job_ini in job_inis:
            open(job_ini).read()  # raise an IOError if the file does not exist
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        if args.lite:
            # run hazard and risk together
            engine.run_job_lite(job_inis, args.log_level, log_file,
                                args.exports)
        else:
            # run hazard
            job = engine.run_job(job_inis[0], args.log_level, log_file,
                                 args.exports)
            # run risk
            if len(job_inis) == 2:
                engine.run_job(job_inis[1],
                               args.log_level,
                               log_file,
                               args.exports,
                               hazard_calculation_id=job.id)
    # hazard
    elif args.list_hazard_calculations:
        list_calculations('hazard')
    elif args.run_hazard is not None:
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_job(expanduser(args.run_hazard), args.log_level, log_file,
                       args.exports)
    elif args.delete_hazard_calculation is not None:
        del_calc(args.delete_hazard_calculation, args.yes)
    # risk
    elif args.list_risk_calculations:
        list_calculations('risk')
    elif args.run_risk is not None:
        if (args.hazard_output_id is None
                and args.hazard_calculation_id is None):
            sys.exit(MISSING_HAZARD_MSG)
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_job(expanduser(args.run_risk),
                       args.log_level,
                       log_file,
                       args.exports,
                       hazard_output_id=args.hazard_output_id,
                       hazard_calculation_id=args.hazard_calculation_id)
    elif args.delete_risk_calculation is not None:
        del_calc(args.delete_risk_calculation, args.yes)

    # export
    elif args.list_outputs is not None:
        engine.list_outputs(args.list_outputs)
    elif args.list_hazard_outputs is not None:
        deprecate('--list-hazard-outputs', '--list-outputs')
        engine.list_outputs(args.list_hazard_outputs)
    elif args.list_risk_outputs is not None:
        deprecate('--list-risk-outputs', '--list-outputs')
        engine.list_outputs(args.list_risk_outputs)

    elif args.export_output is not None:
        output_id, target_dir = args.export_output
        export(int(output_id), expanduser(target_dir), exports)

    elif args.export_hazard_output is not None:
        deprecate('--export-hazard-output', '--export-output')
        output_id, target_dir = args.export_hazard_output
        export(int(output_id), expanduser(target_dir), exports)

    elif args.export_risk_output is not None:
        deprecate('--export-hazard-output', '--export-output')
        output_id, target_dir = args.export_risk_output
        export(int(output_id), expanduser(target_dir), exports)

    elif args.export_outputs is not None:
        job_id, target_dir = args.export_outputs
        export_outputs(int(job_id), expanduser(target_dir), exports)

    elif args.export_stats is not None:
        job_id, target_dir, output_type = args.export_stats
        export_stats(int(job_id), expanduser(target_dir), output_type, exports)

    # deprecated
    elif args.export_hazard_outputs is not None:
        deprecate('--export-hazard-outputs', '--export-outputs')
        job_id, target_dir = args.export_hazard_outputs
        export_outputs(int(job_id), expanduser(target_dir), exports)
    elif args.export_risk_outputs is not None:
        deprecate('--export-risk-outputs', '--export-outputs')
        job_id, target_dir = args.export_risk_outputs
        export_outputs(int(job_id), expanduser(target_dir), exports)
    # import
    elif args.load_gmf is not None:
        with open(args.load_gmf) as f:
            out = import_gmf_scenario(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.id)
    elif args.load_curve is not None:
        with open(args.load_curve) as f:
            out = import_hazard_curves(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.id)
    elif args.list_imported_outputs:
        list_imported_outputs()
    elif args.delete_uncompleted_calculations:
        delete_uncompleted_calculations()
    elif args.save_hazard_calculation:
        save_hazards.main(*args.save_hazard_calculation)
    elif args.load_hazard_calculation:
        job_ids = load_hazards.hazard_load(
            models.getcursor('admin').connection, args.load_hazard_calculation)
        print "Load hazard calculation with IDs: %s" % job_ids
    else:
        arg_parser.print_usage()
示例#39
0
def copy_output(platform_connection, output, foreign_calculation_id):
    """
    Copy `output` data from the engine database to the platform one.

    :param platform_connection: a psycopg2 connection handler
    :param output: a :class:`openquake.engine.db.models.Output` object
    :param foreign_calculation_id: the id of the foreign (platform) calculation
    """

    # the workflow is the following:
    # 1) Insert a pointer to the output into the output_layer table
    # 2) Create a temporary table on the platform
    # 3) Copy data from the engine to a temporary file
    # 4) Copy data to the temporary table from the temporary file
    # 5) Move data from the temporary table to the persistent one
    #    by considering foreign key issues
    engine_cursor = oqe_models.getcursor('admin')
    platform_cursor = platform_connection.cursor()

    with tempfile.TemporaryFile() as temporary_file:
        try:
            platform_cursor.execute(
                """INSERT INTO
                   icebox_outputlayer(display_name, calculation_id, engine_id)
                   VALUES(%s, %s, %s) RETURNING id""",
                (output.display_name, foreign_calculation_id, output.id))

            [[output_layer_id]] = platform_cursor.fetchall()

            iface = DBINTERFACE.get(output.output_type)

            if iface is None:
                # FIXME. Implement proper logging
                print "Output type %s not supported" % output.output_type
                return

            logger.info("Copying to temporary stream")
            engine_cursor.copy_expert(
                """COPY (%s) TO STDOUT
                   WITH (FORMAT 'csv', HEADER true,
                         ENCODING 'utf8', DELIMITER '|')""" % (
                iface.export_query % {
                    'output_id': output.id,
                    'calculation_id': output.oq_job.id}),
                temporary_file)

            temporary_file.seek(0)

            temp_table = "temp_%s" % iface.target_table
            platform_cursor.execute("DROP TABLE IF EXISTS %s" % temp_table)
            platform_cursor.execute("CREATE TABLE %s(%s)" % (
                temp_table, iface.fields))

            import_query = """COPY %s FROM STDIN
                              WITH (FORMAT 'csv',
                                    HEADER true,
                                    ENCODING 'utf8',
                                    DELIMITER '|')""" % temp_table
            logger.info("Copying from temporary stream")
            platform_cursor.copy_expert(import_query, temporary_file)

            platform_cursor.execute(iface.import_query % output_layer_id)
            platform_cursor.execute("DROP TABLE IF EXISTS %s" % temp_table)
        except Exception as e:
            # FIXME. Implement proper logging
            print str(e)
            platform_connection.rollback()
            raise
        else:
            platform_connection.commit()
示例#40
0
 def setUp(self):
     self.curs = getcursor('job_init')
     self.curs.execute('create table _example('
                       'id serial primary key, data text)')
示例#41
0
def main():
    arg_parser = set_up_arg_parser()

    args = arg_parser.parse_args()

    if args.version:
        print __version__
        sys.exit(0)

    if args.config_file:
        os.environ[config.OQ_CONFIG_FILE_VAR] = \
            abspath(expanduser(args.config_file))
        config.refresh()

    if args.no_distribute:
        os.environ[openquake.engine.NO_DISTRIBUTE_VAR] = '1'

    if args.upgrade_db:
        logging.basicConfig(level=logging.INFO)
        logs.set_level('info')
        conn = models.getcursor('admin').connection
        msg = upgrade_manager.what_if_I_upgrade(
            conn, extract_scripts='read_scripts')
        print msg
        if msg.startswith('Your database is already updated'):
            pass
        elif args.yes or confirm('Proceed? (y/n) '):
            upgrade_manager.upgrade_db(conn)
        sys.exit(0)

    if args.version_db:
        conn = models.getcursor('admin').connection
        print upgrade_manager.version_db(conn)
        sys.exit(0)

    if args.what_if_I_upgrade:
        conn = models.getcursor('admin').connection
        print upgrade_manager.what_if_I_upgrade(conn)
        sys.exit(0)

    if args.list_inputs:
        list_inputs(args.list_inputs)

    # hazard
    elif args.list_hazard_calculations:
        list_calculations(models.OqJob.objects)
    elif args.list_hazard_outputs is not None:
        engine.list_hazard_outputs(args.list_hazard_outputs)
    elif args.export_hazard is not None:
        output_id, target_dir = args.export_hazard
        output_id = int(output_id)
        export_hazard(output_id, expanduser(target_dir), args.export_type)
    elif args.export_hazard_outputs is not None:
        hc_id, target_dir = args.export_hazard_outputs
        export_hazard_outputs(int(hc_id), expanduser(target_dir),
                              args.export_type)
    elif args.run_hazard is not None:
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_job(expanduser(args.run_hazard), args.log_level,
                       log_file, args.exports)
    elif args.delete_hazard_calculation is not None:
        del_haz_calc(args.delete_hazard_calculation, args.yes)
    # risk
    elif args.list_risk_calculations:
        list_calculations(models.RiskCalculation.objects)
    elif args.list_risk_outputs is not None:
        engine.list_risk_outputs(args.list_risk_outputs)
    elif args.export_risk is not None:
        output_id, target_dir = args.export_risk
        export_risk(output_id, expanduser(target_dir), args.export_type)
    elif args.export_risk_outputs is not None:
        rc_id, target_dir = args.export_risk_outputs
        export_risk_outputs(int(rc_id), expanduser(target_dir),
                            args.export_type)
    elif args.run_risk is not None:
        if (args.hazard_output_id is None
                and args.hazard_calculation_id is None):
            sys.exit(MISSING_HAZARD_MSG)
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_job(expanduser(args.run_risk), args.log_level, log_file,
                       args.exports, hazard_output_id=args.hazard_output_id,
                       hazard_calculation_id=args.hazard_calculation_id)
    elif args.delete_risk_calculation is not None:
        del_risk_calc(args.delete_risk_calculation, args.yes)
    # import
    elif args.load_gmf is not None:
        with open(args.load_gmf) as f:
            out = import_gmf_scenario(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.id)
    elif args.load_curve is not None:
        with open(args.load_curve) as f:
            out = import_hazard_curves(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.id)
    elif args.list_imported_outputs:
        list_imported_outputs()
    elif args.delete_uncompleted_calculations:
        delete_uncompleted_calculations()
    elif args.save_hazard_calculation:
        save_hazards.main(*args.save_hazard_calculation)
    elif args.load_hazard_calculation:
        hc_ids = load_hazards.hazard_load(
            models.getcursor('admin').connection, args.load_hazard_calculation)
        print "Load hazard calculation with IDs: %s" % hc_ids
    else:
        arg_parser.print_usage()
示例#42
0
import os
import mock
import unittest
import psycopg2
import importlib
from contextlib import contextmanager

from openquake.engine.db.models import getcursor
from openquake.engine.db.upgrade_manager import (
    upgrade_db, version_db, what_if_I_upgrade,
    VersionTooSmall, DuplicatedVersion)

conn = getcursor('admin').connection
pkg = 'openquake.engine.tests.db.upgrades'
upgrader = importlib.import_module(pkg).upgrader


def count(conn, tablename):
    curs = conn.cursor()
    curs.execute('SELECT COUNT(*) FROM %s' % tablename)
    return curs.fetchall()[0][0]


@contextmanager
def temp_script(name, content):
    fname = os.path.join(upgrader.upgrade_dir, name)
    with open(fname, 'w') as s:
        s.write(content)
    try:
        yield
    finally:
示例#43
0
def main():
    arg_parser = set_up_arg_parser()

    args = arg_parser.parse_args()

    if args.version:
        engine.complain_and_exit(__version__)

    if args.config_file:
        os.environ[config.OQ_CONFIG_FILE_VAR] = \
            abspath(expanduser(args.config_file))
        config.refresh()

    if args.no_distribute:
        os.environ[openquake.engine.NO_DISTRIBUTE_VAR] = '1'

    if args.list_inputs:
        list_inputs(args.list_inputs)
    # hazard
    elif args.list_hazard_calculations:
        list_calculations(models.HazardCalculation.objects)
    elif args.list_hazard_outputs is not None:
        engine.list_hazard_outputs(args.list_hazard_outputs)
    elif args.export_hazard is not None:
        output_id, target_dir = args.export_hazard
        output_id = int(output_id)
        export_hazard(output_id, expanduser(target_dir), args.export_type)
    elif args.export_hazard_outputs is not None:
        hc_id, target_dir = args.export_hazard_outputs
        export_hazard_outputs(int(hc_id), expanduser(target_dir),
                              args.export_type)
    elif args.run_hazard is not None:
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_hazard(expanduser(args.run_hazard), args.log_level,
                          log_file, args.exports)
    elif args.delete_hazard_calculation is not None:
        del_haz_calc(args.delete_hazard_calculation, args.yes)
    # risk
    elif args.list_risk_calculations:
        list_calculations(models.RiskCalculation.objects)
    elif args.list_risk_outputs is not None:
        engine.list_risk_outputs(args.list_risk_outputs)
    elif args.export_risk is not None:
        output_id, target_dir = args.export_risk
        export_risk(output_id, expanduser(target_dir), args.export_type)
    elif args.export_risk_outputs is not None:
        rc_id, target_dir = args.export_risk_outputs
        export_risk_outputs(int(rc_id), expanduser(target_dir),
                              args.export_type)
    elif args.run_risk is not None:
        if (args.hazard_output_id is None
                and args.hazard_calculation_id is None):
            engine.complain_and_exit(MISSING_HAZARD_MSG)
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        engine.run_risk(expanduser(args.run_risk), args.log_level, log_file,
                        args.exports, hazard_output_id=args.hazard_output_id,
                        hazard_calculation_id=args.hazard_calculation_id)
    elif args.delete_risk_calculation is not None:
        del_risk_calc(args.delete_risk_calculation, args.yes)
    # import
    elif args.load_gmf is not None:
        with open(args.load_gmf) as f:
            out = import_gmf_scenario(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.hazard_calculation.id)
    elif args.load_curve is not None:
        with open(args.load_curve) as f:
            out = import_hazard_curves(f)
            print 'Added output id=%d of type %s; hazard_calculation_id=%d'\
                % (out.id, out.output_type, out.oq_job.hazard_calculation.id)
    elif args.list_imported_outputs:
        list_imported_outputs()
    elif args.delete_uncompleted_calculations:
        delete_uncompleted_calculations()
    elif args.save_hazard_calculation:
        save_hazards.main(*args.save_hazard_calculation)
    elif args.load_hazard_calculation:
        hc_ids = load_hazards.hazard_load(
            models.getcursor('admin').connection, args.load_hazard_calculation)
        print "Load hazard calculation with IDs: %s" % hc_ids
    else:
        arg_parser.print_usage()
示例#44
0
import os
import mock
import unittest
import psycopg2
import importlib
from contextlib import contextmanager

from openquake.engine.db.models import getcursor
from openquake.engine.db.upgrade_manager import upgrade_db, version_db, what_if_I_upgrade, DuplicatedVersion

conn = getcursor("admin").connection
pkg = "openquake.engine.tests.db.upgrades"
upgrader = importlib.import_module(pkg).upgrader


def count(conn, tablename):
    curs = conn.cursor()
    curs.execute("SELECT COUNT(*) FROM %s" % tablename)
    return curs.fetchall()[0][0]


@contextmanager
def temp_script(name, content):
    fname = os.path.join(upgrader.upgrade_dir, name)
    with open(fname, "w") as s:
        s.write(content)
    try:
        yield
    finally:
        os.remove(fname)
示例#45
0
def copy_output(platform_connection, output, foreign_calculation_id):
    """
    Copy `output` data from the engine database to the platform one.

    :param platform_connection: a psycopg2 connection handler
    :param output: a :class:`openquake.engine.db.models.Output` object
    :param foreign_calculation_id: the id of the foreign (platform) calculation
    """

    # the workflow is the following:
    # 1) Insert a pointer to the output into the output_layer table
    # 2) Create a temporary table on the platform
    # 3) Copy data from the engine to a temporary file
    # 4) Copy data to the temporary table from the temporary file
    # 5) Move data from the temporary table to the persistent one
    #    by considering foreign key issues
    engine_cursor = oqe_models.getcursor('admin')
    platform_cursor = platform_connection.cursor()

    with tempfile.TemporaryFile() as temporary_file:
        try:
            platform_cursor.execute(
                """INSERT INTO
                   icebox_outputlayer(display_name, calculation_id, engine_id)
                   VALUES(%s, %s, %s) RETURNING id""",
                (output.display_name, foreign_calculation_id, output.id))

            [[output_layer_id]] = platform_cursor.fetchall()

            iface = DBINTERFACE.get(output.output_type)

            if iface is None:
                # FIXME. Implement proper logging
                print "Output type %s not supported" % output.output_type
                return

            logger.info("Copying to temporary stream")
            engine_cursor.copy_expert(
                """COPY (%s) TO STDOUT
                   WITH (FORMAT 'csv', HEADER true,
                         ENCODING 'utf8', DELIMITER '|')""" %
                (iface.export_query % {
                    'output_id': output.id,
                    'calculation_id': output.oq_job.id
                }), temporary_file)

            temporary_file.seek(0)

            temp_table = "temp_%s" % iface.target_table
            platform_cursor.execute("DROP TABLE IF EXISTS %s" % temp_table)
            platform_cursor.execute("CREATE TABLE %s(%s)" %
                                    (temp_table, iface.fields))

            import_query = """COPY %s FROM STDIN
                              WITH (FORMAT 'csv',
                                    HEADER true,
                                    ENCODING 'utf8',
                                    DELIMITER '|')""" % temp_table
            logger.info("Copying from temporary stream")
            platform_cursor.copy_expert(import_query, temporary_file)

            platform_cursor.execute(iface.import_query % output_layer_id)
            platform_cursor.execute("DROP TABLE IF EXISTS %s" % temp_table)
        except Exception as e:
            # FIXME. Implement proper logging
            print str(e)
            platform_connection.rollback()
            raise
        else:
            platform_connection.commit()
示例#46
0
import os
import mock
import unittest
import psycopg2
import importlib
from contextlib import contextmanager

from openquake.engine.db.models import getcursor
from openquake.engine.db.upgrade_manager import (upgrade_db, version_db,
                                                 what_if_I_upgrade,
                                                 VersionTooSmall,
                                                 DuplicatedVersion)

conn = getcursor('admin').connection
pkg = 'openquake.engine.tests.db.upgrades'
upgrader = importlib.import_module(pkg).upgrader


def count(conn, tablename):
    curs = conn.cursor()
    curs.execute('SELECT COUNT(*) FROM %s' % tablename)
    return curs.fetchall()[0][0]


@contextmanager
def temp_script(name, content):
    fname = os.path.join(upgrader.upgrade_dir, name)
    with open(fname, 'w') as s:
        s.write(content)
    try:
        yield