Exemplo n.º 1
0
    def test_provenance_without_aggregation(self):
        """Test provenance of impact function without aggregation."""
        hazard_layer = load_test_vector_layer(
            'gisv4', 'hazard', 'classified_vector.geojson')
        exposure_layer = load_test_vector_layer(
            'gisv4', 'exposure', 'building-points.geojson')

        hazard = definition(hazard_layer.keywords['hazard'])
        exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            'gdal_version': gdal.__version__,
            'host_name': gethostname(),
            'map_title': get_map_title(hazard, exposure, hazard_category),
            'map_legend_title': exposure['layer_legend_title'],
            'inasafe_version': get_version(),
            'pyqt_version': PYQT_VERSION_STR,
            'qgis_version': QGis.QGIS_VERSION,
            'qt_version': QT_VERSION_STR,
            'user': getpass.getuser(),
            'os': readable_os_version(),
            'aggregation_layer': None,
            'aggregation_layer_id': None,
            'exposure_layer': exposure_layer.source(),
            'exposure_layer_id': exposure_layer.id(),
            'hazard_layer': hazard_layer.source(),
            'hazard_layer_id': hazard_layer.id(),
            'analysis_question': get_analysis_question(hazard, exposure),
            'aggregation_keywords': None,
            'exposure_keywords': deepcopy(exposure_layer.keywords),
            'hazard_keywords': deepcopy(hazard_layer.keywords),
        }

        # Set up impact function
        impact_function = ImpactFunction()
        impact_function.exposure = exposure_layer
        impact_function.hazard = hazard_layer
        status, message = impact_function.prepare()
        self.assertEqual(PREPARE_SUCCESS, status, message)
        status, message = impact_function.run()
        self.assertEqual(ANALYSIS_SUCCESS, status, message)

        self.maxDiff = None

        expected_provenance.update({
            'action_checklist': impact_function.action_checklist(),
            'analysis_extent': impact_function.analysis_extent.exportToWkt(),
            'impact_function_name': impact_function.name,
            'impact_function_title': impact_function.title,
            'notes': impact_function.notes(),
            'requested_extent': impact_function.requested_extent,
            'data_store_uri': impact_function.datastore.uri_path,
            'start_datetime': impact_function.start_datetime,
            'end_datetime': impact_function.end_datetime,
            'duration': impact_function.duration
        })

        self.assertDictEqual(expected_provenance, impact_function.provenance)
Exemplo n.º 2
0
    def test_provenance_without_aggregation(self):
        """Test provenance of impact function without aggregation."""
        hazard_layer = load_test_vector_layer('gisv4', 'hazard',
                                              'classified_vector.geojson')
        exposure_layer = load_test_vector_layer('gisv4', 'exposure',
                                                'building-points.geojson')

        hazard = definition(hazard_layer.keywords['hazard'])
        exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            provenance_gdal_version['provenance_key']:
            gdal.__version__,
            provenance_host_name['provenance_key']:
            gethostname(),
            provenance_map_title['provenance_key']:
            get_map_title(hazard, exposure, hazard_category),
            provenance_map_legend_title['provenance_key']:
            exposure['layer_legend_title'],
            provenance_user['provenance_key']:
            getpass.getuser(),
            provenance_os['provenance_key']:
            readable_os_version(),
            provenance_pyqt_version['provenance_key']:
            PYQT_VERSION_STR,
            provenance_qgis_version['provenance_key']:
            QGis.QGIS_VERSION,
            provenance_qt_version['provenance_key']:
            QT_VERSION_STR,
            provenance_inasafe_version['provenance_key']:
            get_version(),
            provenance_aggregation_layer['provenance_key']:
            None,
            provenance_aggregation_layer_id['provenance_key']:
            None,
            provenance_exposure_layer['provenance_key']:
            exposure_layer.source(),
            provenance_exposure_layer_id['provenance_key']:
            exposure_layer.id(),
            provenance_hazard_layer['provenance_key']:
            hazard_layer.source(),
            provenance_hazard_layer_id['provenance_key']:
            hazard_layer.id(),
            provenance_analysis_question['provenance_key']:
            get_analysis_question(hazard, exposure),
            provenance_aggregation_keywords['provenance_key']:
            None,
            provenance_exposure_keywords['provenance_key']:
            deepcopy(exposure_layer.keywords),
            provenance_hazard_keywords['provenance_key']:
            deepcopy(hazard_layer.keywords),
        }

        # Set up impact function
        impact_function = ImpactFunction()
        impact_function.exposure = exposure_layer
        impact_function.hazard = hazard_layer
        status, message = impact_function.prepare()
        self.assertEqual(PREPARE_SUCCESS, status, message)
        status, message = impact_function.run()
        self.assertEqual(ANALYSIS_SUCCESS, status, message)

        self.maxDiff = None

        expected_provenance.update({
            provenance_action_checklist['provenance_key']:
            impact_function.action_checklist(),
            provenance_analysis_extent['provenance_key']:
            impact_function.analysis_extent.exportToWkt(),
            provenance_impact_function_name['provenance_key']:
            impact_function.name,
            provenance_impact_function_title['provenance_key']:
            impact_function.title,
            provenance_notes['provenance_key']:
            impact_function.notes(),
            provenance_requested_extent['provenance_key']:
            impact_function.requested_extent,
            provenance_data_store_uri['provenance_key']:
            impact_function.datastore.uri_path,
            provenance_start_datetime['provenance_key']:
            impact_function.start_datetime,
            provenance_end_datetime['provenance_key']:
            impact_function.end_datetime,
            provenance_duration['provenance_key']:
            impact_function.duration
        })

        self.assertDictContainsSubset(expected_provenance,
                                      impact_function.provenance)

        output_layer_provenance_keys = [
            provenance_layer_exposure_summary['provenance_key'],
            provenance_layer_aggregate_hazard_impacted['provenance_key'],
            provenance_layer_aggregation_summary['provenance_key'],
            provenance_layer_analysis_impacted['provenance_key'],
            provenance_layer_exposure_summary_table['provenance_key']
        ]

        for key in output_layer_provenance_keys:
            self.assertIn(key, impact_function.provenance.keys())
Exemplo n.º 3
0
    def test_multi_exposure(self):
        """Test we can run a multi exposure analysis."""
        hazard_layer = load_test_vector_layer('gisv4', 'hazard',
                                              'classified_vector.geojson')
        building_layer = load_test_vector_layer('gisv4', 'exposure',
                                                'building-points.geojson')
        population_layer = load_test_vector_layer('gisv4', 'exposure',
                                                  'population.geojson')
        roads_layer = load_test_vector_layer('gisv4', 'exposure',
                                             'roads.geojson')
        aggregation_layer = load_test_vector_layer('gisv4', 'aggregation',
                                                   'small_grid.geojson')

        impact_function = MultiExposureImpactFunction()
        impact_function.hazard = hazard_layer
        impact_function.exposures = [
            building_layer, population_layer, roads_layer
        ]
        impact_function.aggregation = aggregation_layer

        code, message = impact_function.prepare()
        self.assertEqual(code, PREPARE_SUCCESS, message)

        code, message, exposure = impact_function.run()
        self.assertEqual(code, ANALYSIS_SUCCESS, message)

        # Test provenance
        hazard = definition(hazard_layer.keywords['hazard'])
        # exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            provenance_gdal_version['provenance_key']:
            gdal.__version__,
            provenance_host_name['provenance_key']:
            gethostname(),
            provenance_user['provenance_key']:
            getpass.getuser(),
            provenance_os['provenance_key']:
            readable_os_version(),
            provenance_pyqt_version['provenance_key']:
            PYQT_VERSION_STR,
            provenance_qgis_version['provenance_key']:
            qgis_version(),
            provenance_qt_version['provenance_key']:
            QT_VERSION_STR,
            provenance_inasafe_version['provenance_key']:
            get_version(),
            provenance_aggregation_layer['provenance_key']:
            aggregation_layer.source(),
            provenance_aggregation_layer_id['provenance_key']:
            aggregation_layer.id(),
            # provenance_exposure_layer['provenance_key']:
            #     exposure_layer.source(),
            # provenance_exposure_layer_id['provenance_key']:
            #     exposure_layer.id(),
            provenance_hazard_layer['provenance_key']:
            hazard_layer.source(),
            provenance_hazard_layer_id['provenance_key']:
            hazard_layer.id(),
            provenance_aggregation_keywords['provenance_key']:
            deepcopy(aggregation_layer.keywords),
            # provenance_exposure_keywords['provenance_key']:
            #     deepcopy(exposure_layer.keywords),
            provenance_hazard_keywords['provenance_key']:
            deepcopy(hazard_layer.keywords),
        }

        self.maxDiff = None

        expected_provenance.update({
            provenance_analysis_extent['provenance_key']:
            impact_function.analysis_extent.asWkt(),
            provenance_impact_function_name['provenance_key']:
            impact_function.name,
            # provenance_requested_extent['provenance_key']: impact_function.
            #     requested_extent,
            provenance_data_store_uri['provenance_key']:
            impact_function.datastore.uri_path,
            provenance_start_datetime['provenance_key']:
            impact_function.start_datetime,
            provenance_end_datetime['provenance_key']:
            impact_function.end_datetime,
            provenance_duration['provenance_key']:
            impact_function.duration
        })

        self.assertDictContainsSubset(expected_provenance,
                                      impact_function.provenance)

        output_layer_provenance_keys = [
            provenance_layer_aggregation_summary['provenance_key'],
            provenance_layer_analysis_impacted['provenance_key'],
        ]

        for key in output_layer_provenance_keys:
            self.assertIn(key, list(impact_function.provenance.keys()))

        # Test serialization/deserialization
        output_metadata = impact_function.aggregation_summary.keywords
        new_impact_function = MultiExposureImpactFunction. \
            load_from_output_metadata(output_metadata)
        self.assertEqualImpactFunction(impact_function, new_impact_function)

        # Check the analysis layer id equal with the actual layer
        old_analysis_layer_id = impact_function.provenance[
            provenance_layer_analysis_impacted['provenance_key']]
        new_analysis_layer_id = new_impact_function.provenance[
            provenance_layer_analysis_impacted['provenance_key']]
        self.assertEqual(old_analysis_layer_id, new_analysis_layer_id)
Exemplo n.º 4
0
def setup_logger(logger_name, log_file=None, sentry_url=None):
    """Run once when the module is loaded and enable logging.

    :param logger_name: The logger name that we want to set up.
    :type logger_name: str

    :param log_file: Optional full path to a file to write logs to.
    :type log_file: str

    :param sentry_url: Optional url to sentry api for remote
        logging. Defaults to URL defined in safe.definitions.sentry.py
        which is the sentry project for InaSAFE desktop.
    :type sentry_url: str

    Borrowed heavily from this:
    http://docs.python.org/howto/logging-cookbook.html

    Now to log a message do::

       LOGGER.debug('Some debug message')

    .. note:: The file logs are written to the inasafe user tmp dir e.g.:
       /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log
    """
    logger = logging.getLogger(logger_name)
    logging_level = int(os.environ.get('INASAFE_LOGGING_LEVEL', logging.DEBUG))
    logger.setLevel(logging_level)
    default_handler_level = logging_level

    # create formatter that will be added to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # create syslog handler which logs even debug messages
    # (ariel): Make this log to /var/log/safe.log instead of
    #               /var/log/syslog
    # (Tim) Ole and I discussed this - we prefer to log into the
    # user's temporary working directory.
    inasafe_log_path = log_file_path()
    if log_file is None:
        file_handler = logging.FileHandler(inasafe_log_path)
    else:
        file_handler = logging.FileHandler(log_file)
    file_handler.setLevel(default_handler_level)
    file_handler.setFormatter(formatter)
    add_logging_handler_once(logger, file_handler)

    if 'MUTE_LOGS' not in os.environ:
        # create console handler with a higher log level
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        console_handler.setFormatter(formatter)
        add_logging_handler_once(logger, console_handler)

    # create a QGIS handler
    qgis_handler = QgsLogHandler()
    qgis_handler.setFormatter(formatter)
    add_logging_handler_once(logger, qgis_handler)

    # Sentry handler - this is optional hence the localised import
    # If raven is available logging messages will be sent to
    # http://sentry.kartoza.com
    # We will log exceptions only there. You need to either:
    #  * Set env var 'INASAFE_SENTRY=1' present (value can be anything)
    # before this will be enabled or sentry is enabled in QSettings
    qsettings_flag = QSettings().value('inasafe/useSentry', False, type=bool)
    environment_flag = 'INASAFE_SENTRY' in os.environ

    if environment_flag or qsettings_flag:
        if sentry_url is None:
            sentry_url = PRODUCTION_SERVER

        tags = dict()
        tags[provenance_gdal_version['provenance_key']] = gdal.__version__
        tags[provenance_os['provenance_key']] = readable_os_version()
        qgis_short_version = provenance_qgis_version['provenance_key']
        qgis_full_version = qgis_short_version + '_full'
        versions = [str(v) for v in qgis_version_detailed()]
        tags[qgis_short_version] = '.'.join(versions[0:2])
        tags[qgis_full_version] = '.'.join(versions[0:3])
        tags[provenance_qt_version['provenance_key']] = QT_VERSION_STR

        hostname = os.environ.get('HOSTNAME_SENTRY', socket.gethostname())

        sentry_handler = SentryHandler(
            dsn=sentry_url,
            name=hostname,
            release=get_version(),
            tags=tags,
        )
        sentry_handler.setFormatter(formatter)
        sentry_handler.setLevel(logging.ERROR)
        if add_logging_handler_once(logger, sentry_handler):
            logger.debug('Sentry logging enabled in safe')
    else:
        logger.debug('Sentry logging disabled in safe')
Exemplo n.º 5
0
    def test_provenance_without_aggregation(self):
        """Test provenance of impact function without aggregation."""
        hazard_layer = load_test_vector_layer(
            'gisv4', 'hazard', 'classified_vector.geojson')
        exposure_layer = load_test_vector_layer(
            'gisv4', 'exposure', 'building-points.geojson')

        hazard = definition(hazard_layer.keywords['hazard'])
        exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            provenance_gdal_version['provenance_key']: gdal.__version__,
            provenance_host_name['provenance_key']: gethostname(),
            provenance_map_title['provenance_key']: get_map_title(
                hazard, exposure, hazard_category),
            provenance_map_legend_title['provenance_key']: exposure[
                'layer_legend_title'],
            provenance_user['provenance_key']: getpass.getuser(),
            provenance_os['provenance_key']: readable_os_version(),
            provenance_pyqt_version['provenance_key']: PYQT_VERSION_STR,
            provenance_qgis_version['provenance_key']: QGis.QGIS_VERSION,
            provenance_qt_version['provenance_key']: QT_VERSION_STR,
            provenance_inasafe_version['provenance_key']: get_version(),
            provenance_aggregation_layer['provenance_key']: None,
            provenance_aggregation_layer_id['provenance_key']: None,
            provenance_exposure_layer['provenance_key']:
                exposure_layer.source(),
            provenance_exposure_layer_id['provenance_key']:
                exposure_layer.id(),
            provenance_hazard_layer['provenance_key']: hazard_layer.source(),
            provenance_hazard_layer_id['provenance_key']: hazard_layer.id(),
            provenance_analysis_question['provenance_key']:
                get_analysis_question(hazard, exposure),
            provenance_aggregation_keywords['provenance_key']: None,
            provenance_exposure_keywords['provenance_key']:
                deepcopy(exposure_layer.keywords),
            provenance_hazard_keywords['provenance_key']: deepcopy(
                hazard_layer.keywords),
        }

        # Set up impact function
        impact_function = ImpactFunction()
        impact_function.exposure = exposure_layer
        impact_function.hazard = hazard_layer
        status, message = impact_function.prepare()
        self.assertEqual(PREPARE_SUCCESS, status, message)
        status, message = impact_function.run()
        self.assertEqual(ANALYSIS_SUCCESS, status, message)

        self.maxDiff = None

        expected_provenance.update({
            provenance_action_checklist['provenance_key']:
                impact_function.action_checklist(),
            provenance_analysis_extent['provenance_key']:
                impact_function.analysis_extent.exportToWkt(),
            provenance_impact_function_name['provenance_key']:
                impact_function.name,
            provenance_impact_function_title['provenance_key']:
                impact_function.title,
            provenance_notes['provenance_key']: impact_function.notes(),
            provenance_requested_extent['provenance_key']: impact_function.
                requested_extent,
            provenance_data_store_uri['provenance_key']: impact_function.
                datastore.uri_path,
            provenance_start_datetime['provenance_key']: impact_function.
                start_datetime,
            provenance_end_datetime['provenance_key']:
                impact_function.end_datetime,
            provenance_duration['provenance_key']: impact_function.duration
        })

        self.assertDictContainsSubset(
            expected_provenance, impact_function.provenance)

        output_layer_provenance_keys = [
            provenance_layer_exposure_summary['provenance_key'],
            provenance_layer_aggregate_hazard_impacted['provenance_key'],
            provenance_layer_aggregation_summary['provenance_key'],
            provenance_layer_analysis_impacted['provenance_key'],
            provenance_layer_exposure_summary_table['provenance_key']
        ]

        for key in output_layer_provenance_keys:
            self.assertIn(key, impact_function.provenance.keys())
Exemplo n.º 6
0
    def test_provenance_without_aggregation(self):
        """Test provenance of impact function without aggregation."""
        hazard_layer = load_test_vector_layer('gisv4', 'hazard',
                                              'classified_vector.geojson')
        exposure_layer = load_test_vector_layer('gisv4', 'exposure',
                                                'building-points.geojson')

        hazard = definition(hazard_layer.keywords['hazard'])
        exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            'gdal_version': gdal.__version__,
            'host_name': gethostname(),
            'map_title': get_map_title(hazard, exposure, hazard_category),
            'map_legend_title': exposure['layer_legend_title'],
            'inasafe_version': get_version(),
            'pyqt_version': PYQT_VERSION_STR,
            'qgis_version': QGis.QGIS_VERSION,
            'qt_version': QT_VERSION_STR,
            'user': getpass.getuser(),
            'os': readable_os_version(),
            'aggregation_layer': None,
            'aggregation_layer_id': None,
            'exposure_layer': exposure_layer.source(),
            'exposure_layer_id': exposure_layer.id(),
            'hazard_layer': hazard_layer.source(),
            'hazard_layer_id': hazard_layer.id(),
            'analysis_question': get_analysis_question(hazard, exposure),
            'aggregation_keywords': None,
            'exposure_keywords': deepcopy(exposure_layer.keywords),
            'hazard_keywords': deepcopy(hazard_layer.keywords),
        }

        # Set up impact function
        impact_function = ImpactFunction()
        impact_function.exposure = exposure_layer
        impact_function.hazard = hazard_layer
        status, message = impact_function.prepare()
        self.assertEqual(PREPARE_SUCCESS, status, message)
        status, message = impact_function.run()
        self.assertEqual(ANALYSIS_SUCCESS, status, message)

        self.maxDiff = None

        expected_provenance.update({
            'action_checklist':
            impact_function.action_checklist(),
            'analysis_extent':
            impact_function.analysis_extent.exportToWkt(),
            'impact_function_name':
            impact_function.name,
            'impact_function_title':
            impact_function.title,
            'notes':
            impact_function.notes(),
            'requested_extent':
            impact_function.requested_extent,
            'data_store_uri':
            impact_function.datastore.uri_path,
            'start_datetime':
            impact_function.start_datetime,
            'end_datetime':
            impact_function.end_datetime,
            'duration':
            impact_function.duration
        })

        self.assertDictEqual(expected_provenance, impact_function.provenance)
    def test_multi_exposure(self):
        """Test we can run a multi exposure analysis."""
        hazard_layer = load_test_vector_layer(
            'gisv4', 'hazard', 'classified_vector.geojson')
        building_layer = load_test_vector_layer(
            'gisv4', 'exposure', 'building-points.geojson')
        population_layer = load_test_vector_layer(
            'gisv4', 'exposure', 'population.geojson')
        roads_layer = load_test_vector_layer(
            'gisv4', 'exposure', 'roads.geojson')
        aggregation_layer = load_test_vector_layer(
            'gisv4', 'aggregation', 'small_grid.geojson')

        impact_function = MultiExposureImpactFunction()
        impact_function.hazard = hazard_layer
        impact_function.exposures = [
            building_layer, population_layer, roads_layer]
        impact_function.aggregation = aggregation_layer

        code, message = impact_function.prepare()
        self.assertEqual(code, PREPARE_SUCCESS, message)

        code, message, exposure = impact_function.run()
        self.assertEqual(code, ANALYSIS_SUCCESS, message)

        # Test provenance
        hazard = definition(hazard_layer.keywords['hazard'])
        # exposure = definition(exposure_layer.keywords['exposure'])
        hazard_category = definition(hazard_layer.keywords['hazard_category'])

        expected_provenance = {
            provenance_gdal_version['provenance_key']: gdal.__version__,
            provenance_host_name['provenance_key']: gethostname(),
            provenance_user['provenance_key']: getpass.getuser(),
            provenance_os['provenance_key']: readable_os_version(),
            provenance_pyqt_version['provenance_key']: PYQT_VERSION_STR,
            provenance_qgis_version['provenance_key']: qgis_version(),
            provenance_qt_version['provenance_key']: QT_VERSION_STR,
            provenance_inasafe_version['provenance_key']: get_version(),
            provenance_aggregation_layer['provenance_key']:
                aggregation_layer.source(),
            provenance_aggregation_layer_id['provenance_key']:
                aggregation_layer.id(),
            # provenance_exposure_layer['provenance_key']:
            #     exposure_layer.source(),
            # provenance_exposure_layer_id['provenance_key']:
            #     exposure_layer.id(),
            provenance_hazard_layer['provenance_key']: hazard_layer.source(),
            provenance_hazard_layer_id['provenance_key']: hazard_layer.id(),
            provenance_aggregation_keywords['provenance_key']: deepcopy(
                aggregation_layer.keywords),
            # provenance_exposure_keywords['provenance_key']:
            #     deepcopy(exposure_layer.keywords),
            provenance_hazard_keywords['provenance_key']: deepcopy(
                hazard_layer.keywords),
        }

        self.maxDiff = None

        expected_provenance.update({
            provenance_analysis_extent['provenance_key']:
                impact_function.analysis_extent.asWkt(),
            provenance_impact_function_name['provenance_key']:
                impact_function.name,
            # provenance_requested_extent['provenance_key']: impact_function.
            #     requested_extent,
            provenance_data_store_uri['provenance_key']: impact_function.
                datastore.uri_path,
            provenance_start_datetime['provenance_key']: impact_function.
                start_datetime,
            provenance_end_datetime['provenance_key']:
                impact_function.end_datetime,
            provenance_duration['provenance_key']: impact_function.duration
        })

        self.assertDictContainsSubset(
            expected_provenance, impact_function.provenance)

        output_layer_provenance_keys = [
            provenance_layer_aggregation_summary['provenance_key'],
            provenance_layer_analysis_impacted['provenance_key'],
        ]

        for key in output_layer_provenance_keys:
            self.assertIn(key, list(impact_function.provenance.keys()))

        # Test serialization/deserialization
        output_metadata = impact_function.aggregation_summary.keywords
        new_impact_function = MultiExposureImpactFunction. \
            load_from_output_metadata(output_metadata)
        self.assertEqualImpactFunction(
            impact_function, new_impact_function)

        # Check the analysis layer id equal with the actual layer
        old_analysis_layer_id = impact_function.provenance[
            provenance_layer_analysis_impacted['provenance_key']]
        new_analysis_layer_id = new_impact_function.provenance[
            provenance_layer_analysis_impacted['provenance_key']]
        self.assertEqual(old_analysis_layer_id, new_analysis_layer_id)