Esempio n. 1
0
    def XXtest_shakemap_population_exposure(self):
        """Population exposed to groundshaking matches USGS numbers
        """

        hazardfile = os.path.join(TEST_DATA, 'shakemap_sumatra_20110129.tif')
        hazard_layer = save_to_geonode(hazardfile, overwrite=True,
                                       user=self.user)
        hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

        exposurefile = os.path.join(TEST_DATA, 'population_indonesia_2008.tif')
        exposure_layer = save_to_geonode(exposurefile, overwrite=True,
                                         user=self.user)
        exposure_name = '%s:%s' % (exposure_layer.workspace,
                                   exposure_layer.name)


        #with warnings.catch_warnings():
        #    warnings.simplefilter('ignore')

        c = Client()
        rv = c.post('/api/v1/calculate/', data=dict(
                hazard_server=INTERNAL_SERVER_URL,
                hazard=hazard_name,
                exposure_server=INTERNAL_SERVER_URL,
                exposure=exposure_name,
                bbox=get_bounding_box_string(hazardfile),
                impact_function='USGSFatalityFunction',
                impact_level=10,
                keywords='test,shakemap,usgs',
                ))

        self.assertEqual(rv.status_code, 200)
        self.assertEqual(rv['Content-Type'], 'application/json')
        data = json.loads(rv.content)
        assert 'hazard_layer' in data.keys()
        assert 'exposure_layer' in data.keys()
        assert 'run_duration' in data.keys()
        assert 'run_date' in data.keys()
        assert 'layer' in data.keys()

        # Download result and check
        layer_name = data['layer'].split('/')[-1]

        result_layer = download(INTERNAL_SERVER_URL,
                                layer_name,
                                get_bounding_box(hazardfile))
        assert os.path.exists(result_layer.filename)

        # Read hazard data for reference
        hazard_raster = read_layer(hazardfile)
        H = hazard_raster.get_data()
        mmi_min, mmi_max = hazard_raster.get_extrema()

        # Read calculated result
        impact_raster = read_layer(result_layer.filename)
        I = impact_raster.get_data()
Esempio n. 2
0
    def test_interpolation_lembang(self):
        """Interpolation using Lembang data set
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/lembang_mmi_hazmap.asc' % TESTDATA
        exposure_filename = '%s/lembang_schools.shp' % TESTDATA

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        mmi_min, mmi_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Test riab's interpolation function
        I = hazard_raster.interpolate(exposure_vector,
                                      name='mmi')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        # Check that interpolated MMI was done as expected
        fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt' % TESTDATA)
        reference_points = []
        MMI = []
        DAM = []
        for line in fid.readlines()[1:]:
            fields = line.strip().split(',')

            lon = float(fields[4][1:-1])
            lat = float(fields[3][1:-1])
            mmi = float(fields[-1][1:-1])

            reference_points.append((lon, lat))
            MMI.append(mmi)

        # Verify that coordinates are consistent
        msg = 'Interpolated coordinates do not match those of test data'
        assert numpy.allclose(Icoordinates, reference_points), msg

        # Verify interpolated MMI with test result
        for i in range(len(MMI)):
            calculated_mmi = Iattributes[i]['mmi']

            # Check that interpolated points are within range
            msg = ('Interpolated mmi %f was outside extrema: '
                   '[%f, %f]. ' % (calculated_mmi, mmi_min, mmi_max))
            assert mmi_min <= calculated_mmi <= mmi_max, msg

            # Check that result is within 2% - this is good enough
            # as this was calculated using EQRM and thus different.
            assert numpy.allclose(calculated_mmi, MMI[i], rtol=0.02)
Esempio n. 3
0
    def test_interpolation_lembang(self):
        """Interpolation using Lembang data set
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/lembang_mmi_hazmap.asc' % TESTDATA
        exposure_filename = '%s/lembang_schools.shp' % TESTDATA

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        mmi_min, mmi_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Test riab's interpolation function
        I = hazard_raster.interpolate(exposure_vector,
                                      name='mmi')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        # Check that interpolated MMI was done as expected
        fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt' % TESTDATA)
        reference_points = []
        MMI = []
        DAM = []
        for line in fid.readlines()[1:]:
            fields = line.strip().split(',')

            lon = float(fields[4][1:-1])
            lat = float(fields[3][1:-1])
            mmi = float(fields[-1][1:-1])

            reference_points.append((lon, lat))
            MMI.append(mmi)

        # Verify that coordinates are consistent
        msg = 'Interpolated coordinates do not match those of test data'
        assert numpy.allclose(Icoordinates, reference_points), msg

        # Verify interpolated MMI with test result
        for i in range(len(MMI)):
            calculated_mmi = Iattributes[i]['mmi']

            # Check that interpolated points are within range
            msg = ('Interpolated mmi %f was outside extrema: '
                   '[%f, %f]. ' % (calculated_mmi, mmi_min, mmi_max))
            assert mmi_min <= calculated_mmi <= mmi_max, msg

            # Check that result is within 2% - this is good enough
            # as this was calculated using EQRM and thus different.
            assert numpy.allclose(calculated_mmi, MMI[i], rtol=0.02)
Esempio n. 4
0
    def XXtest_shakemap_population_exposure(self):
        """Population exposed to groundshaking matches USGS numbers
        """

        hazardfile = os.path.join(TESTDATA, 'shakemap_sumatra_20110129.tif')
        hazard_layer = save_to_geonode(hazardfile, overwrite=True,
                                       user=self.user)
        hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

        exposurefile = os.path.join(TESTDATA, 'population_indonesia_2008.tif')
        exposure_layer = save_to_geonode(exposurefile, overwrite=True,
                                         user=self.user)
        exposure_name = '%s:%s' % (exposure_layer.workspace,
                                   exposure_layer.name)

        #with warnings.catch_warnings():
        #    warnings.simplefilter('ignore')
        c = Client()
        rv = c.post('/impact/api/calculate/', data=dict(
                hazard_server=INTERNAL_SERVER_URL,
                hazard=hazard_name,
                exposure_server=INTERNAL_SERVER_URL,
                exposure=exposure_name,
                bbox=get_bounding_box_string(hazardfile),
                impact_function='USGSFatalityFunction',
                keywords='test,shakemap,usgs'))

        self.assertEqual(rv.status_code, 200)
        self.assertEqual(rv['Content-Type'], 'application/json')
        data = json.loads(rv.content)
        assert 'hazard_layer' in data.keys()
        assert 'exposure_layer' in data.keys()
        assert 'run_duration' in data.keys()
        assert 'run_date' in data.keys()
        assert 'layer' in data.keys()

        # Download result and check
        layer_name = data['layer'].split('/')[-1]

        result_layer = download(INTERNAL_SERVER_URL,
                                layer_name,
                                get_bounding_box(hazardfile))
        assert os.path.exists(result_layer.filename)

        # Read hazard data for reference
        hazard_raster = read_layer(hazardfile)
        H = hazard_raster.get_data()
        mmi_min, mmi_max = hazard_raster.get_extrema()

        # Read calculated result
        impact_raster = read_layer(result_layer.filename)
        I = impact_raster.get_data()
Esempio n. 5
0
    def test_no_projection(self):
        """Raster layers with no projection causes Exception to be raised
        """

        rastername = 'grid_without_projection.asc'
        filename = '%s/%s' % (TESTDATA, rastername)
        try:
            read_layer(filename)
        except RuntimeError:
            pass
        else:
            msg = 'Should have raised RuntimeError'
            raise Exception(msg)
Esempio n. 6
0
    def test_nodata_value(self):
        """NODATA value is correctly recorded in GDAL
        """

        # Read files with -9999 as nominated nodata value
        for rastername in ['Population_2010_clip.tif',
                           'Lembang_Earthquake_Scenario.asc',
                           'Earthquake_Ground_Shaking.asc']:

            filename = '%s/%s' % (TESTDATA, rastername)
            R = read_layer(filename)

            A = R.get_data()

            # Verify nodata value
            Amin = min(A.flat[:])
            msg = ('Raster must have -9999 as its minimum for this test. '
                   'We got %f for file %s' % (Amin, filename))
            assert Amin == -9999, msg

            # Verify that GDAL knows about this
            nodata = R.get_nodata_value()
            msg = ('File %s should have registered nodata '
                   'value %i but it was %s' % (filename, Amin, nodata))
            assert nodata == Amin, msg
Esempio n. 7
0
    def test_geotransform_from_geonode(self):
        """Geotransforms of GeoNode layers can be correctly determined
        """

        for filename in ['lembang_mmi_hazmap.asc',
                         'test_grid.asc']:

            # Upload file to GeoNode
            f = os.path.join(TESTDATA, filename)
            layer = save_to_geonode(f, user=self.user)

            # Read raster file and obtain reference resolution
            R = read_layer(f)
            ref_geotransform = R.get_geotransform()

            # Get geotransform from GeoNode
            layer_name = layer.typename
            metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)

            geotransform_name = 'geotransform'
            msg = ('Could not find attribute "%s" in metadata. '
                   'Values are: %s' % (geotransform_name, metadata.keys()))
            assert geotransform_name in metadata, msg

            gn_geotransform = metadata[geotransform_name]
            msg = ('Geotransform obtained from GeoNode for layer %s '
                   'was not correct. I got %s but expected %s'
                   '' % (layer_name, gn_geotransform, ref_geotransform))
            assert numpy.allclose(ref_geotransform, gn_geotransform), msg
Esempio n. 8
0
    def test_bins(self):
        """Linear and quantile bins are correct
        """

        for filename in ['%s/population_padang_1.asc' % TESTDATA,
                         '%s/test_grid.asc' % TESTDATA]:

            R = read_layer(filename)
            min, max = R.get_extrema()

            for N in [2, 3, 5, 7, 10, 16]:
                linear_intervals = R.get_bins(N=N, quantiles=False)

                assert linear_intervals[0] == min
                assert linear_intervals[-1] == max

                d = (max - min) / N
                for i in range(N):
                    assert numpy.allclose(linear_intervals[i], min + i * d)

                quantiles = R.get_bins(N=N, quantiles=True)
                A = R.get_data(nan=True).flat[:]

                mask = numpy.logical_not(numpy.isnan(A))  # Omit NaN's
                l1 = len(A)
                A = A.compress(mask)
                l2 = len(A)

                if filename == '%s/test_grid.asc' % TESTDATA:
                    # Check that NaN's were removed
                    assert l1 == 35
                    assert l2 == 30

                # Assert that there are no NaN's
                assert not numpy.alltrue(numpy.isnan(A))

                number_of_elements = len(A)
                average_elements_per_bin = number_of_elements / N

                # Count elements in each bin and check
                i0 = quantiles[0]
                for i1 in quantiles[1:]:
                    count = numpy.sum((i0 < A) & (A < i1))
                    if i0 == quantiles[0]:
                        refcount = count

                    if i1 < quantiles[-1]:
                        # Number of elements in each bin must vary by no
                        # more than 1
                        assert abs(count - refcount) <= 1
                        assert abs(count - average_elements_per_bin) <= 3
                    else:
                        # The last bin is allowed vary by more
                        pass

                    i0 = i1
Esempio n. 9
0
    def test_vector_class(self):
        """Consistency of vector class for point data
        """

        # Read data file
        layername = 'lembang_schools.shp'
        filename = '%s/%s' % (TESTDATA, layername)
        V = read_layer(filename)

        # Make a smaller dataset
        V_ref = V.get_topN('FLOOR_AREA', 5)

        geometry = V_ref.get_geometry()
        data = V_ref.get_data()
        projection = V_ref.get_projection()

        # Create new object from test data
        V_new = Vector(data=data, projection=projection, geometry=geometry)

        # Check
        assert V_new == V_ref
        assert not V_new != V_ref

        # Write this new object, read it again and check
        tmp_filename = unique_filename(suffix='.shp')
        V_new.write_to_file(tmp_filename)

        V_tmp = read_layer(tmp_filename)
        assert V_tmp == V_ref
        assert not V_tmp != V_ref

        # Check that equality raises exception when type is wrong
        try:
            V_tmp == Raster()
        except TypeError:
            pass
        else:
            msg = 'Should have raised TypeError'
            raise Exception(msg)
Esempio n. 10
0
    def test_interpolation_tsunami(self):
        """Interpolation using tsunami data set
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Test riab's interpolation function
        I = hazard_raster.interpolate(exposure_vector,
                                      name='depth')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        # Verify interpolated values with test result
        for i in range(len(Icoordinates)):

            interpolated_depth = Iattributes[i]['depth']
            # Check that interpolated points are within range
            msg = ('Interpolated depth %f at point %i was outside extrema: '
                   '[%f, %f]. ' % (interpolated_depth, i,
                                   depth_min, depth_max))

            if not numpy.isnan(interpolated_depth):
                # FIXME (Ole): putting in tolerances for now. Remove when
                # new interpolation is implemented (issue #19)
                tol = 0.8
                assert depth_min - tol <= interpolated_depth <= depth_max, msg
Esempio n. 11
0
    def test_interpolation_tsunami(self):
        """Interpolation using tsunami data set works

        This is test for issue #19 about interpolation overshoot
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Test riab's interpolation function
        I = hazard_raster.interpolate(exposure_vector,
                                      name='depth')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        # Verify interpolated values with test result
        for i in range(len(Icoordinates)):

            interpolated_depth = Iattributes[i]['depth']
            # Check that interpolated points are within range
            msg = ('Interpolated depth %f at point %i was outside extrema: '
                   '[%f, %f]. ' % (interpolated_depth, i,
                                   depth_min, depth_max))

            if not numpy.isnan(interpolated_depth):
                assert depth_min <= interpolated_depth <= depth_max, msg
Esempio n. 12
0
    def test_vector_extrema(self):
        """Vector extremum calculation works
        """

        for layername in ['lembang_schools.shp',
                          'tsunami_exposure_BB.shp']:

            filename = '%s/%s' % (TESTDATA, layername)
            L = read_layer(filename)

            if layername == 'tsunami_exposure_BB.shp':
                attributes = L.get_data()

                for name in ['STR_VALUE', 'CONT_VALUE']:
                    minimum, maximum = L.get_extrema(name)
                    assert minimum <= maximum

                    x = [a[name] for a in attributes]
                    assert numpy.allclose([min(x), max(x)],
                                          [minimum, maximum],
                                          rtol=1.0e-12, atol=1.0e-12)

            elif layername == 'lembang_schools.shp':
                minimum, maximum = L.get_extrema('FLOOR_AREA')
                assert minimum == maximum  # All identical
                assert maximum == 250

                try:
                    L.get_extrema('NONEXISTING_ATTRIBUTE_NAME_8462')
                except AssertionError:
                    pass
                else:
                    msg = ('Non existing attribute name should have '
                           'raised AssertionError')
                    raise Exception(msg)

                try:
                    L.get_extrema()
                except RuntimeError:
                    pass
                else:
                    msg = ('Missing attribute name should have '
                           'raised RuntimeError')
                    raise Exception(msg)
Esempio n. 13
0
    def test_native_raster_resolution(self):
        """Raster layer retains native resolution through Geoserver

        Raster layer can be uploaded and downloaded again with
        native resolution. This is one test for ticket #103
        """

        hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc' %
                           TESTDATA)

        # Get reference values
        H = read_layer(hazard_filename)
        A_ref = H.get_data(nan=True)
        depth_min_ref, depth_max_ref = H.get_extrema()

        # Upload to internal geonode
        hazard_layer = save_to_geonode(hazard_filename, user=self.user)
        hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

        # Download data again with native resolution
        bbox = get_bounding_box_string(hazard_filename)
        H = download(INTERNAL_SERVER_URL, hazard_name, bbox)
        A = H.get_data(nan=True)

        # Compare shapes
        msg = ('Shape of downloaded raster was [%i, %i]. '
               'Expected [%i, %i].' %
               (A.shape[0], A.shape[1], A_ref.shape[0], A_ref.shape[1]))
        assert numpy.allclose(A_ref.shape, A.shape, rtol=0, atol=0), msg

        # Compare extrema to values reference values (which have also been
        # verified by QGIS for this layer and tested in test_engine.py)
        depth_min, depth_max = H.get_extrema()
        msg = ('Extrema of downloaded file were [%f, %f] but '
               'expected [%f, %f]' %
               (depth_min, depth_max, depth_min_ref, depth_max_ref))
        assert numpy.allclose([depth_min, depth_max],
                              [depth_min_ref, depth_max_ref],
                              rtol=1.0e-6,
                              atol=1.0e-10), msg

        # Compare data number by number
        assert nanallclose(A, A_ref, rtol=1.0e-8)
Esempio n. 14
0
    def test_analysis_of_vector_data_top_N(self):
        """Analysis of vector data - get top N of an attribute
        """

        for vectorname in ['lembang_schools.shp',
                           'tsunami_exposure_BB.shp']:

            filename = '%s/%s' % (TESTDATA, vectorname)
            layer = read_layer(filename)
            coords = layer.get_geometry()
            attributes = layer.get_data()

            # Check exceptions
            try:
                L = layer.get_topN(attribute='FLOOR_AREA', N=0)
            except AssertionError:
                pass
            else:
                msg = 'Exception should have been raised for N == 0'
                raise Exception(msg)

            # Check results
            for N in [5, 10, 11, 17]:
                if vectorname == 'lembang_schools.shp':
                    L = layer.get_topN(attribute='FLOOR_AREA', N=N)
                    assert len(L) == N
                    assert L.get_projection() == layer.get_projection()
                    #print [a['FLOOR_AREA'] for a in L.attributes]
                elif vectorname == 'tsunami_exposure_BB.shp':
                    L = layer.get_topN(attribute='STR_VALUE', N=N)
                    assert len(L) == N
                    assert L.get_projection() == layer.get_projection()
                    val = [a['STR_VALUE'] for a in L.data]

                    ref = [a['STR_VALUE'] for a in attributes]
                    ref.sort()

                    assert numpy.allclose(val, ref[-N:],
                                          atol=1.0e-12, rtol=1.0e-12)
                else:
                    raise Exception
Esempio n. 15
0
    def test_geotransform_from_geonode(self):
        """Geotransforms of GeoNode layers can be correctly determined
        """

        for filename in ['lembang_mmi_hazmap.asc',
                         'test_grid.asc',
                         'shakemap_padang_20090930.asc',
                         'Population_2010_clip.tif']:

            # Upload file to GeoNode
            f = os.path.join(TEST_DATA, filename)
            layer = save_to_geonode(f, user=self.user)
            name = '%s:%s' % (layer.workspace, layer.name)

            # Read raster file and obtain reference resolution
            R = read_layer(f)
            ref_geotransform = R.get_geotransform()

            # ARIEL: geotransform is a vector of six numbers:
            #
            #          (top left x, w-e pixel resolution, rotation,
            #           top left y, rotation, n-s pixel resolution).
            #
            # We should (at least) use elements 0, 1, 3, 5
            # to uniquely determine if rasters are aligned
            # - This depends on what you can get from geonode

            # Get geotransform from GeoNode
            layer_name = layer.name
            metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)

            geotransform_name = 'geotransform'
            msg = ('Could not find attribute "%s" in metadata. Values are: %s' %
                    (geotransform_name, metadata.keys()))
            assert geotransform_name in metadata, msg

            gn_geotransform = metadata['geo_transform']
            msg = ('Geotransform obtained from GeoNode for layer %s '
                   'was not correct. I got %s but expected %s'
                   '' % (name, gn_geotransform, ref_geotransform))
            assert numpy.allclose(ref_geotransform, gn_geotransform), msg
Esempio n. 16
0
    def test_layer_API(self):
        """Vector and Raster instances have a similar API
        """

        # Exceptions
        exclude = ['get_topN', 'get_bins',
                   'get_geotransform', 'get_nodata_value']

        V = Vector()  # Empty vector instance
        R = Raster()  # Empty raster instance

        assert same_API(V, R, exclude=exclude)

        for layername in ['lembang_schools.shp',
                          'Lembang_Earthquake_Scenario.asc']:

            filename = '%s/%s' % (TESTDATA, layername)
            L = read_layer(filename)

            assert same_API(L, V, exclude=exclude)
            assert same_API(L, R, exclude=exclude)
Esempio n. 17
0
    def test_raster_extrema(self):
        """Raster extrema (including NAN's) are correct.
        """

        for rastername in ['Earthquake_Ground_Shaking_clip.tif',
                             'Population_2010_clip.tif',
                             'shakemap_padang_20090930.asc',
                             'population_padang_1.asc',
                             'population_padang_2.asc']:

            filename = '%s/%s' % (TESTDATA, rastername)
            R = read_layer(filename)

            # Check consistency of raster

            # Use numpy to establish the extrema instead of gdal
            minimum, maximum = R.get_extrema()

            # Check that arrays with NODATA value replaced by NaN's agree
            A = R.get_data(nan=False)
            B = R.get_data(nan=True)

            assert A.dtype == B.dtype
            assert numpy.nanmax(A - B) == 0
            assert numpy.nanmax(B - A) == 0
            assert numpy.nanmax(numpy.abs(A - B)) == 0

            # Check that extrema are OK
            assert numpy.allclose(maximum, numpy.max(A[:]))
            assert numpy.allclose(maximum, numpy.nanmax(B[:]))
            assert numpy.allclose(minimum, numpy.nanmin(B[:]))

            # Check that nodata can be replaced by 0.0
            C = R.get_data(nan=0.0)
            msg = '-9999 should have been replaced by 0.0 in %s' % rastername
            assert min(C.flat[:]) != -9999, msg
Esempio n. 18
0
    def Xtest_raster_upload(self):
        """Raster layer can be uploaded and downloaded again correctly
        """

        hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'
                           % TESTDATA)

        # Get reference values
        H = read_layer(hazard_filename)
        A_ref = H.get_data()
        depth_min_ref, depth_max_ref = H.get_extrema()

        # Upload to internal geonode
        hazard_layer = save_to_geonode(hazard_filename, user=self.user)
        hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

        # Download data again
        bbox = get_bounding_box_string(hazard_filename)
        H = download(INTERNAL_SERVER_URL, hazard_name, bbox)
        A = H.get_data()

        # Compare shapes
        msg = ('Shape of downloaded raster was [%i, %i]. '
               'Expected [%i, %i].' % (A.shape[0], A.shape[1],
                                       A_ref.shape[0], A_ref.shape[1]))
        assert numpy.allclose(A_ref.shape, A.shape, rtol=0, atol=0), msg

        # Compare extrema to values reference values (which have also been
        # verified by QGIS for this layer and tested in test_engine.py)
        depth_min, depth_max = H.get_extrema()
        msg = ('Extrema of downloaded file were [%f, %f] but '
               'expected [%f, %f]' % (depth_min, depth_max,
                                      depth_min_ref, depth_max_ref))
        assert numpy.allclose([depth_min, depth_max],
                              [depth_min_ref, depth_max_ref],
                              rtol=1.0e-6, atol=1.0e-10), msg
Esempio n. 19
0
    def test_tsunami_loss_use_case(self):
        """Building loss from tsunami use case works
        """

        # This test merely exercises the use case as there is
        # no reference data. It does check the sanity of values as
        # far as possible.

        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)
        exposure_with_depth_filename = ('%s/tsunami_exposure_BB_'
                                        'with_depth.shp' % TESTDATA)
        reference_impact_filename = ('%s/tsunami_impact_assessment_'
                                     'BB.shp' % TESTDATA)

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tsunami Building Loss Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        icoordinates = impact_vector.get_geometry()
        iattributes = impact_vector.get_data()
        N = len(icoordinates)

        # Ensure that calculated point locations coincide with
        # original exposure point locations
        ref_exp = read_layer(exposure_filename)
        refcoordinates = ref_exp.get_geometry()

        assert N == len(refcoordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data')
        assert numpy.allclose(icoordinates, refcoordinates), msg

        # Ensure that calculated point locations coincide with
        # original exposure point (with depth) locations
        ref_depth = read_layer(exposure_with_depth_filename)
        refdepth_coordinates = ref_depth.get_geometry()
        refdepth_attributes = ref_depth.get_data()
        assert N == len(refdepth_coordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data (with depth)')
        assert numpy.allclose(icoordinates, refdepth_coordinates), msg

        # Read reference results
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        ref_impact = read_layer(reference_impact_filename)
        refimpact_coordinates = ref_impact.get_geometry()
        refimpact_attributes = ref_impact.get_data()

        # Check for None
        for i in range(N):
            if refimpact_attributes[i] is None:
                msg = 'Element %i was None' % i
                raise Exception(msg)

        # Check sanity of calculated attributes
        for i in range(N):
            lon, lat = icoordinates[i]

            depth = iattributes[i]['DEPTH']

            # Ignore NaN's
            if numpy.isnan(depth):
                continue

            structural_damage = iattributes[i]['STRUCT_DAM']
            contents_damage = iattributes[i]['CONTENTS_D']
            for imp in [structural_damage, contents_damage]:
                msg = ('Percent damage was outside range: %f' % imp)
                assert 0 <= imp <= 1, msg

            structural_loss = iattributes[i]['STRUCT_LOS']
            contents_loss = iattributes[i]['CONTENTS_L']
            if depth < 0.3:
                assert structural_loss == 0.0
                assert contents_loss == 0.0
            else:
                assert structural_loss > 0.0
                assert contents_loss > 0.0

            number_of_people = iattributes[i]['NEXIS_PEOP']
            people_affected = iattributes[i]['PEOPLE_AFF']
            people_severely_affected = iattributes[i]['PEOPLE_SEV']

            if 0.01 < depth < 1.0:
                assert people_affected == number_of_people
            else:
                assert people_affected == 0

            if depth >= 1.0:
                assert people_severely_affected == number_of_people
            else:
                assert people_severely_affected == 0

            # Contents and structural damage is done according
            # to different damage curves and should therefore be different
            if depth > 0 and contents_damage > 0:
                assert contents_damage != structural_damage
Esempio n. 20
0
    def test_rasters_and_arrays(self):
        """Consistency of rasters and associated arrays
        """

        # Create test data
        lon_ul = 100  # Longitude of upper left corner
        lat_ul = 10   # Latitude of upper left corner
        numlon = 8    # Number of longitudes
        numlat = 5    # Number of latitudes
        dlon = 1
        dlat = -1

        # Define array where latitudes are rows and longitude columns
        A1 = numpy.zeros((numlat, numlon))

        # Establish coordinates for lower left corner
        lat_ll = lat_ul - numlat
        lon_ll = lon_ul

        # Define pixel centers along each direction
        lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)
        lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)

        # Define raster with latitudes going bottom-up (south to north).
        # Longitudes go left-right (west to east)
        for i in range(numlat):
            for j in range(numlon):
                A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])

        # Upper left corner
        assert A1[0, 0] == 105.25
        assert A1[0, 0] == linear_function(lon[0], lat[4])

        # Lower left corner
        assert A1[4, 0] == 103.25
        assert A1[4, 0] == linear_function(lon[0], lat[0])

        # Upper right corner
        assert A1[0, 7] == 112.25
        assert A1[0, 7] == linear_function(lon[7], lat[4])

        # Lower right corner
        assert A1[4, 7] == 110.25
        assert A1[4, 7] == linear_function(lon[7], lat[0])

        # Generate raster object and write
        projection = ('GEOGCS["WGS 84",'
                      'DATUM["WGS_1984",'
                      'SPHEROID["WGS 84",6378137,298.2572235630016,'
                      'AUTHORITY["EPSG","7030"]],'
                      'AUTHORITY["EPSG","6326"]],'
                      'PRIMEM["Greenwich",0],'
                      'UNIT["degree",0.0174532925199433],'
                      'AUTHORITY["EPSG","4326"]]')
        geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)
        R1 = Raster(A1, projection, geotransform)

        msg = ('Dimensions of raster array do not match those of '
               'raster object')
        assert numlat == R1.rows, msg
        assert numlon == R1.columns, msg

        # Write back to new (tif) file
        out_filename = unique_filename(suffix='.tif')
        R1.write_to_file(out_filename)

        # Read again and check consistency
        R2 = read_layer(out_filename)

        msg = ('Dimensions of written raster array do not match those '
               'of input raster file\n')
        msg += ('    Dimensions of input file '
                '%s:  (%s, %s)\n' % (R1.filename, numlat, numlon))
        msg += ('    Dimensions of output file %s: '
                '(%s, %s)' % (R2.filename, R2.rows, R2.columns))

        assert numlat == R2.rows, msg
        assert numlon == R2.columns, msg

        A2 = R2.get_data()

        assert numpy.allclose(numpy.min(A1), numpy.min(A2))
        assert numpy.allclose(numpy.max(A1), numpy.max(A2))

        msg = 'Array values of written raster array were not as expected'
        assert numpy.allclose(A1, A2), msg

        msg = 'Geotransforms were different'
        assert R1.get_geotransform() == R2.get_geotransform(), msg

        p1 = R1.get_projection(proj4=True)
        p2 = R2.get_projection(proj4=True)
        msg = 'Projections were different: %s != %s' % (p1, p2)
        assert p1 == p1, msg

        # Exercise projection __eq__ method
        assert R1.projection == R2.projection

        # Check that equality raises exception when type is wrong
        try:
            R1.projection == 234
        except TypeError:
            pass
        else:
            msg = 'Should have raised TypeError'
            raise Exception(msg)
Esempio n. 21
0
    def test_tephra_load_impact(self):
        """Hypothetical tephra load scenario can be computed

        This test also exercises reprojection of UTM data
        """

        # File names for hazard level and exposure

        # FIXME - when we know how to reproject, replace hazard
        # file with UTM version (i.e. without _geographic).
        hazard_filename = '%s/%s/%s' % (DEMODATA, 'hazard',
                                        'Ashload_Gede_VEI4_geographic.asc')
        exposure_filename = '%s/lembang_schools.shp' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tephra Impact Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        load_min, load_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        coordinates = impact_vector.get_geometry()
        attributes = impact_vector.get_data()

        # FIXME: Remove this tolerance when interpolation is better (issue #19)
        tol = 1.0e-8

        # Test that results are as expected
        # FIXME: Change test when we decide what values should actually be
        #        calculated :-) :-) :-)
        for a in attributes:
            load = a['Ashload']
            impact = a['Percent_da']

            # Test interpolation
            msg = 'Load %.15f was outside bounds [%f, %f]' % (load,
                                                           load_min,
                                                           load_max)
            if not numpy.isnan(load):
                assert load_min - tol <= load <= load_max, msg

            # Test calcalated values
            if 0.01 <= load < 90.0:
                assert impact == 25
            elif 90.0 <= load < 150.0:
                assert impact == 50
            elif 150.0 <= load < 300.0:
                assert impact == 75
            elif load >= 300.0:
                assert impact == 100
            else:
                assert impact == 0
Esempio n. 22
0
    def test_earthquake_fatality_estimation_allen(self):
        """Fatalities from ground shaking can be computed correctly 1
           using aligned rasters
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
        exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Earthquake Fatality Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]

        # Call calculation engine
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Do calculation manually and check result
        hazard_raster = read_layer(hazard_filename)
        H = hazard_raster.get_data(nan=0)

        exposure_raster = read_layer(exposure_filename)
        E = exposure_raster.get_data(nan=0)

        # Calculate impact manually
        a = 0.97429
        b = 11.037
        F = 10 ** (a * H - b) * E

        # Verify correctness of result
        calculated_raster = read_layer(impact_filename)
        C = calculated_raster.get_data(nan=0)

        # Compare shape and extrema
        msg = ('Shape of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (C.shape, F.shape))
        assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg

        msg = ('Minimum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
        assert numpy.allclose(numpy.min(C), numpy.min(F),
                              rtol=1e-12, atol=1e-12), msg
        msg = ('Maximum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
        assert numpy.allclose(numpy.max(C), numpy.max(F),
                              rtol=1e-12, atol=1e-12), msg

        # Compare every single value numerically
        msg = 'Array values of written raster array were not as expected'
        assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg

        # Check that extrema are in range
        xmin, xmax = calculated_raster.get_extrema()
        assert numpy.alltrue(C >= xmin)
        assert numpy.alltrue(C <= xmax)
        assert numpy.alltrue(C >= 0)
Esempio n. 23
0
    def test_jakarta_flood_study(self):
        """HKV Jakarta flood study calculated correctly using aligned rasters
        """

        # FIXME (Ole): Redo with population as shapefile later

        # Name file names for hazard level, exposure and expected fatalities

        population = 'Population_Jakarta_geographic.asc'
        plugin_name = 'Flood Impact Function'

        # Expected values from HKV
        expected_values = [2485442, 1537920]

        i = 0
        for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
                         'Flood_Design_Depth_Jakarta_geographic.asc']:

            hazard_filename = '%s/%s/%s' % (DEMODATA, 'hazard', filename)
            exposure_filename = '%s/%s/%s' % (DEMODATA, 'exposure',
                                              population)

            # Get layers using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            # Call impact calculation engine
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_function=IF)

            # Do calculation manually and check result
            hazard_raster = read_layer(hazard_filename)
            H = hazard_raster.get_data(nan=0)

            exposure_raster = read_layer(exposure_filename)
            P = exposure_raster.get_data(nan=0)

            # Calculate impact manually
            pixel_area = 2500
            I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area

            # Verify correctness against results from HKV
            res = sum(I.flat)
            ref = expected_values[i]
            #print filename, 'Result=%f' % res, ' Expected=%f' % ref
            #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)

            msg = 'Got result %f but expected %f' % (res, ref)
            assert numpy.allclose(res, ref, rtol=1.0e-2), msg

            # Verify correctness of result
            calculated_raster = read_layer(impact_filename)
            C = calculated_raster.get_data(nan=0)

            # Check caption (FIXME (Ole): Do this when issue #77 has been done)
            #assert calculated_raster.get_caption().startswith('Number')
            # ...and more tests here

            # Compare shape and extrema
            msg = ('Shape of calculated raster differs from reference raster: '
                   'C=%s, I=%s' % (C.shape, I.shape))
            assert numpy.allclose(C.shape, I.shape,
                                  rtol=1e-12, atol=1e-12), msg

            msg = ('Minimum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))
            assert numpy.allclose(numpy.min(C), numpy.min(I),
                                  rtol=1e-12, atol=1e-12), msg
            msg = ('Maximum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))
            assert numpy.allclose(numpy.max(C), numpy.max(I),
                                  rtol=1e-12, atol=1e-12), msg

            # Compare every single value numerically
            msg = 'Array values of written raster array were not as expected'
            assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg

            # Check that extrema are in range
            xmin, xmax = calculated_raster.get_extrema()
            assert numpy.alltrue(C >= xmin)
            assert numpy.alltrue(C <= xmax)
            assert numpy.alltrue(C >= 0)

            i += 1
Esempio n. 24
0
    def test_specified_raster_resolution(self):
        """Raster layers can be downloaded with specific resolution

        This is another test for ticket #103

        Native test data:

        maumere....asc
        ncols 931
        nrows 463
        cellsize 0.00018

        Population_Jakarta
        ncols         638
        nrows         649
        cellsize      0.00045228819716044

        Population_2010
        ncols         5525
        nrows         2050
        cellsize      0.0083333333333333


        Here we download it at a range of fixed resolutions that
        are both coarser and finer, and check that the dimensions
        of the downloaded matrix are as expected.

        We also check that the extrema of the subsampled matrix are sane
        """

        for test_filename in [
                'maumere_aos_depth_20m_land_wgs84.asc',
                'Population_Jakarta_geographic.asc', 'Population_2010.asc'
        ]:

            hazard_filename = ('%s/%s' % (TESTDATA, test_filename))

            # Get reference values
            H = read_layer(hazard_filename)
            depth_min_ref, depth_max_ref = H.get_extrema()
            native_resolution = H.get_resolution()

            # Upload to internal geonode
            hazard_layer = save_to_geonode(hazard_filename, user=self.user)
            hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

            # Test for a range of resolutions
            for res in [
                    0.02,
                    0.01,
                    0.005,
                    0.002,
                    0.001,
                    0.0005,  # Coarser
                    0.0002,
                    0.0001,
                    0.00006,
                    0.00003
            ]:  # Finer

                # To save time don't do finest resolution for the
                # two population sets
                if test_filename.startswith('Population') and res < 0.00006:
                    break

                # Set bounding box
                bbox = get_bounding_box_string(hazard_filename)
                compare_extrema = True
                if test_filename == 'Population_2010.asc':
                    # Make bbox small for finer resolutions to
                    # save time and to test that as well.
                    # However, extrema obviously won't match those
                    # of the full dataset. Once we can clip
                    # datasets, we can remove this restriction.
                    if res < 0.005:
                        bbox = '106.685974,-6.373421,106.974534,-6.079886'
                        compare_extrema = False
                bb = bboxstring2list(bbox)

                # Download data at specified resolution
                H = download(INTERNAL_SERVER_URL,
                             hazard_name,
                             bbox,
                             resolution=res)
                A = H.get_data()

                # Verify that data has the requested bobx and resolution
                actual_bbox = H.get_bounding_box()
                msg = ('Bounding box for %s was not as requested. I got %s '
                       'but '
                       'expected %s' % (hazard_name, actual_bbox, bb))
                assert numpy.allclose(actual_bbox, bb, rtol=1.0e-6)

                # FIXME (Ole): How do we sensibly resolve the issue with
                #              resx, resy vs one resolution (issue #173)
                actual_resolution = H.get_resolution()[0]

                # FIXME (Ole): Resolution is often far from the requested
                #              see issue #102
                #              Here we have to accept up to 5%
                tolerance102 = 5.0e-2
                msg = ('Resolution of %s was not as requested. I got %s but '
                       'expected %s' % (hazard_name, actual_resolution, res))
                assert numpy.allclose(actual_resolution,
                                      res,
                                      rtol=tolerance102), msg

                # Determine expected shape from bbox (W, S, E, N)
                ref_rows = int(round((bb[3] - bb[1]) / res))
                ref_cols = int(round((bb[2] - bb[0]) / res))

                # Compare shapes (generally, this may differ by 1)
                msg = ('Shape of downloaded raster was [%i, %i]. '
                       'Expected [%i, %i].' %
                       (A.shape[0], A.shape[1], ref_rows, ref_cols))
                assert (ref_rows == A.shape[0] and ref_cols == A.shape[1]), msg

                # Assess that the range of the interpolated data is sane
                if not compare_extrema:
                    continue

                # For these test sets we get exact match of the minimum
                msg = (
                    'Minimum of %s resampled at resolution %f '
                    'was %f. Expected %f.' %
                    (hazard_layer.name, res, numpy.nanmin(A), depth_min_ref))
                assert numpy.allclose(depth_min_ref,
                                      numpy.nanmin(A),
                                      rtol=0.0,
                                      atol=0.0), msg

                # At the maximum it depends on the subsampling
                msg = (
                    'Maximum of %s resampled at resolution %f '
                    'was %f. Expected %f.' %
                    (hazard_layer.name, res, numpy.nanmax(A), depth_max_ref))
                if res < native_resolution[0]:
                    # When subsampling to finer resolutions we expect a
                    # close match
                    assert numpy.allclose(depth_max_ref,
                                          numpy.nanmax(A),
                                          rtol=1.0e-10,
                                          atol=1.0e-8), msg
                elif res < native_resolution[0] * 10:
                    # When upsampling to coarser resolutions we expect
                    # ballpark match (~20%)
                    assert numpy.allclose(depth_max_ref,
                                          numpy.nanmax(A),
                                          rtol=0.17,
                                          atol=0.0), msg
                else:
                    # Upsampling to very coarse resolutions, just want sanity
                    assert 0 < numpy.nanmax(A) <= depth_max_ref
Esempio n. 25
0
    def test_lembang_building_examples(self):
        """Lembang building impact calculation works through the API
        """

        # Test for a range of hazard layers

        for mmi_filename in ['lembang_mmi_hazmap.asc']:
                             #'Lembang_Earthquake_Scenario.asc']:

            # Upload input data
            hazardfile = os.path.join(TESTDATA, mmi_filename)
            hazard_layer = save_to_geonode(hazardfile, user=self.user)
            hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

            exposurefile = os.path.join(TESTDATA, 'lembang_schools.shp')
            exposure_layer = save_to_geonode(exposurefile, user=self.user)
            exposure_name = '%s:%s' % (exposure_layer.workspace,
                                       exposure_layer.name)

            # Call calculation routine

            # FIXME (Ole): The system freaks out if there are spaces in
            #              bbox string. Please let us catch that and deal
            #              nicely with it - also do this in download()
            bbox = '105.592,-7.809,110.159,-5.647'

            #print
            #print get_bounding_box(hazardfile)
            #print get_bounding_box(exposurefile)

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')

                c = Client()
                rv = c.post('/impact/api/calculate/', data=dict(
                        hazard_server=INTERNAL_SERVER_URL,
                        hazard=hazard_name,
                        exposure_server=INTERNAL_SERVER_URL,
                        exposure=exposure_name,
                        bbox=bbox,
                        impact_function='Earthquake Building Damage Function',
                        keywords='test,schools,lembang',
                        ))

            self.assertEqual(rv.status_code, 200)
            self.assertEqual(rv['Content-Type'], 'application/json')
            data = json.loads(rv.content)
            assert 'hazard_layer' in data.keys()
            assert 'exposure_layer' in data.keys()
            assert 'run_duration' in data.keys()
            assert 'run_date' in data.keys()
            assert 'layer' in data.keys()

            # Download result and check
            layer_name = data['layer'].split('/')[-1]

            result_layer = download(INTERNAL_SERVER_URL,
                                    layer_name,
                                    bbox)
            assert os.path.exists(result_layer.filename)

            # Read hazard data for reference
            hazard_raster = read_layer(hazardfile)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            # Read calculated result
            impact_vector = read_layer(result_layer.filename)
            coordinates = impact_vector.get_geometry()
            attributes = impact_vector.get_data()

            # Verify calculated result
            count = 0
            for i in range(len(attributes)):
                lon, lat = coordinates[i][:]
                calculated_mmi = attributes[i]['MMI']

                if calculated_mmi == 0.0:
                    # FIXME (Ole): Some points have MMI==0 here.
                    # Weird but not a show stopper
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f was outside extrema: '
                       '[%f, %f] at location '
                       '[%f, %f]. ' % (calculated_mmi,
                                       mmi_min, mmi_max,
                                       lon, lat))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                # Check calculated damage
                calculated_dam = attributes[i]['DAMAGE']

                ref_dam = lembang_damage_function(calculated_mmi)
                msg = ('Calculated damage was not as expected '
                       'for hazard layer %s' % hazardfile)
                assert numpy.allclose(calculated_dam, ref_dam,
                                      rtol=1.0e-12), msg

                count += 1

            # Make only a few points were 0
            assert count > len(attributes) - 4
Esempio n. 26
0
    def test_jakarta_flood_study(self):
        """HKV Jakarta flood study calculated correctly using the API
        """

        # FIXME (Ole): Redo with population as shapefile later

        # Expected values from HKV
        expected_values = [2485442, 1537920]

        # Name files for hazard level, exposure and expected fatalities
        population = 'Population_Jakarta_geographic'
        plugin_name = 'FloodImpactFunction'

        # Upload exposure data for this test
        exposure_filename = '%s/%s.asc' % (TESTDATA, population)
        exposure_layer = save_to_geonode(exposure_filename,
                                         user=self.user, overwrite=True)

        workspace = exposure_layer.workspace
        msg = 'Expected workspace to be "geonode". Got %s' % workspace
        assert workspace == 'geonode'

        layer_name = exposure_layer.name
        msg = 'Expected layer name to be "%s". Got %s' % (population,
                                                          layer_name)
        assert layer_name.lower() == population.lower(), msg

        exposure_name = '%s:%s' % (workspace, layer_name)

        # Check metadata
        assert_bounding_box_matches(exposure_layer, exposure_filename)
        exp_bbox_string = get_bounding_box_string(exposure_filename)
        check_layer(exposure_layer, full=True)

        # Now we know that exposure layer is good, lets upload some
        # hazard layers and do the calculations

        i = 0
        for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
                         'Flood_Design_Depth_Jakarta_geographic.asc']:

            hazard_filename = os.path.join(TESTDATA, filename)
            exposure_filename = os.path.join(TESTDATA, population)

            # Save
            hazard_filename = '%s/%s' % (TESTDATA, filename)
            hazard_layer = save_to_geonode(hazard_filename,
                                           user=self.user, overwrite=True)
            hazard_name = '%s:%s' % (hazard_layer.workspace,
                                     hazard_layer.name)

            # Check metadata
            assert_bounding_box_matches(hazard_layer, hazard_filename)
            haz_bbox_string = get_bounding_box_string(hazard_filename)
            check_layer(hazard_layer, full=True)

            # Run calculation
            c = Client()
            rv = c.post('/impact/api/calculate/', data=dict(
                    hazard_server=INTERNAL_SERVER_URL,
                    hazard=hazard_name,
                    exposure_server=INTERNAL_SERVER_URL,
                    exposure=exposure_name,
                    bbox=exp_bbox_string,
                    impact_function=plugin_name,
                    keywords='test,flood,HKV'))

            self.assertEqual(rv.status_code, 200)
            self.assertEqual(rv['Content-Type'], 'application/json')
            data = json.loads(rv.content)
            if 'errors' in data:
                errors = data['errors']
                if errors is not None:
                    raise Exception(errors)

            assert 'hazard_layer' in data
            assert 'exposure_layer' in data
            assert 'run_duration' in data
            assert 'run_date' in data
            assert 'layer' in data

            # Do calculation manually and check result
            hazard_raster = read_layer(hazard_filename)
            H = hazard_raster.get_data(nan=0)

            exposure_raster = read_layer(exposure_filename + '.asc')
            P = exposure_raster.get_data(nan=0)

            # Calculate impact manually
            pixel_area = 2500
            I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area

            # Verify correctness against results from HKV
            res = sum(I.flat)
            ref = expected_values[i]
            #print filename, 'Result=%f' % res, ' Expected=%f' % ref
            #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)

            msg = 'Got result %f but expected %f' % (res, ref)
            assert numpy.allclose(res, ref, rtol=1.0e-2), msg

            # Verify correctness of result
            # Download result and check
            layer_name = data['layer'].split('/')[-1]

            result_layer = download(INTERNAL_SERVER_URL,
                                    layer_name,
                                    get_bounding_box_string(hazard_filename))
            assert os.path.exists(result_layer.filename)

            calculated_raster = read_layer(result_layer.filename)
            C = calculated_raster.get_data(nan=0)

            # FIXME (Ole): Bring this back
            # Check caption
            #caption = calculated_raster.get_caption()
            #print
            #print caption
            #expct = 'people'
            #msg = ('Caption %s did not contain expected '
            #       'keyword %s' % (caption, expct))
            #assert expct in caption, msg

            # Compare shape and extrema
            msg = ('Shape of calculated raster differs from reference raster: '
                   'C=%s, I=%s' % (C.shape, I.shape))
            assert numpy.allclose(C.shape, I.shape,
                                  rtol=1e-12, atol=1e-12), msg

            msg = ('Minimum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.nanmin(C), numpy.nanmin(I)))
            assert numpy.allclose(numpy.nanmin(C), numpy.nanmin(I),
                                  rtol=1e-6, atol=1e-12), msg
            msg = ('Maximum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.nanmax(C), numpy.nanmax(I)))
            assert numpy.allclose(numpy.nanmax(C), numpy.nanmax(I),
                                  rtol=1e-6, atol=1e-12), msg

            # Compare every single value numerically (a bit loose -
            # probably due to single precision conversions when
            # data flows through geonode)
            #
            # FIXME: Not working - but since this test is about
            # issue #162 we'll leave it for now. TODO with NAN
            # Manually verified that the two expected values are correct,
            # though.
            #msg = 'Array values of written raster array were not as expected'
            #print C
            #print I
            #print numpy.amax(numpy.abs(C-I))
            #assert numpy.allclose(C, I, rtol=1e-2, atol=1e-5), msg

            # Check that extrema are in range
            xmin, xmax = calculated_raster.get_extrema()

            assert numpy.alltrue(C[-numpy.isnan(C)] >= xmin), msg
            assert numpy.alltrue(C[-numpy.isnan(C)] <= xmax)
            assert numpy.alltrue(C[-numpy.isnan(C)] >= 0)

            i += 1
Esempio n. 27
0
    def test_raster_scaling(self):
        """Raster layers can be scaled when resampled

        This is a test for ticket #168

        Native test .asc data has

        ncols         5525
        nrows         2050
        cellsize      0.0083333333333333

        Scaling is necessary for raster data that represents density
        such as population per km^2
        """

        for test_filename in [
                'Population_Jakarta_geographic.asc', 'Population_2010.asc'
        ]:

            raster_filename = ('%s/%s' % (TESTDATA, test_filename))

            # Get reference values
            R = read_layer(raster_filename)
            R_min_ref, R_max_ref = R.get_extrema()
            native_resolution = R.get_resolution()

            # Upload to internal geonode
            raster_layer = save_to_geonode(raster_filename, user=self.user)
            raster_name = '%s:%s' % (raster_layer.workspace, raster_layer.name)

            # Test for a range of resolutions
            for res in [
                    0.02,
                    0.01,
                    0.005,
                    0.002,
                    0.001,
                    0.0005,  # Coarser
                    0.0002
            ]:  # Finer

                # To save time don't do finest resolution for the
                # large population set
                if test_filename.startswith('Population_2010') and res < 0.005:
                    break

                bbox = get_bounding_box_string(raster_filename)

                R = download(INTERNAL_SERVER_URL,
                             raster_name,
                             bbox,
                             resolution=res)
                A_native = R.get_data(scaling=False)
                A_scaled = R.get_data(scaling=True)

                sigma = (R.get_resolution()[0] / native_resolution[0])**2

                # Compare extrema
                expected_scaled_max = sigma * numpy.nanmax(A_native)
                msg = ('Resampled raster was not rescaled correctly: '
                       'max(A_scaled) was %f but expected %f' %
                       (numpy.nanmax(A_scaled), expected_scaled_max))

                assert numpy.allclose(expected_scaled_max,
                                      numpy.nanmax(A_scaled),
                                      rtol=1.0e-8,
                                      atol=1.0e-8), msg

                expected_scaled_min = sigma * numpy.nanmin(A_native)
                msg = ('Resampled raster was not rescaled correctly: '
                       'min(A_scaled) was %f but expected %f' %
                       (numpy.nanmin(A_scaled), expected_scaled_min))
                assert numpy.allclose(expected_scaled_min,
                                      numpy.nanmin(A_scaled),
                                      rtol=1.0e-8,
                                      atol=1.0e-12), msg

                # Compare elementwise
                msg = 'Resampled raster was not rescaled correctly'
                assert nanallclose(A_native * sigma,
                                   A_scaled,
                                   rtol=1.0e-8,
                                   atol=1.0e-8), msg

                # Check that it also works with manual scaling
                A_manual = R.get_data(scaling=sigma)
                msg = 'Resampled raster was not rescaled correctly'
                assert nanallclose(A_manual,
                                   A_scaled,
                                   rtol=1.0e-8,
                                   atol=1.0e-8), msg

                # Check that an exception is raised for bad arguments
                try:
                    R.get_data(scaling='bad')
                except:
                    pass
                else:
                    msg = 'String argument should have raised exception'
                    raise Exception(msg)

                try:
                    R.get_data(scaling='(1, 3)')
                except:
                    pass
                else:
                    msg = 'Tuple argument should have raised exception'
                    raise Exception(msg)

                # Check None option without existence of density keyword
                A_none = R.get_data(scaling=None)
                msg = 'Data should not have changed'
                assert nanallclose(A_native,
                                   A_none,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), msg

                # Try with None and density keyword
                R.keywords['density'] = 'true'
                A_none = R.get_data(scaling=None)
                msg = 'Resampled raster was not rescaled correctly'
                assert nanallclose(A_scaled,
                                   A_none,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), msg

                R.keywords['density'] = 'Yes'
                A_none = R.get_data(scaling=None)
                msg = 'Resampled raster was not rescaled correctly'
                assert nanallclose(A_scaled,
                                   A_none,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), msg

                R.keywords['density'] = 'False'
                A_none = R.get_data(scaling=None)
                msg = 'Data should not have changed'
                assert nanallclose(A_native,
                                   A_none,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), msg

                R.keywords['density'] = 'no'
                A_none = R.get_data(scaling=None)
                msg = 'Data should not have changed'
                assert nanallclose(A_native,
                                   A_none,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), msg
Esempio n. 28
0
    def test_tephra_load_impact(self):
        """Hypothetical tephra load scenario can be computed

        This test also exercises reprojection of UTM data
        """

        # File names for hazard level and exposure

        # FIXME - when we know how to reproject, replace hazard
        # file with UTM version (i.e. without _geographic).
        hazard_filename = os.path.join(TESTDATA,
                                       'Ashload_Gede_VEI4_geographic.asc')
        exposure_filename = os.path.join(TESTDATA, 'lembang_schools.shp')

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tephra Impact Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        load_min, load_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        coordinates = impact_vector.get_geometry()
        attributes = impact_vector.get_data()

        # Test that results are as expected
        # FIXME: Change test when we decide what values should actually be
        #        calculated :-) :-) :-)
        for a in attributes:
            load = a['ASHLOAD']
            impact = a['DAMAGE']

            # Test interpolation
            msg = 'Load %.15f was outside bounds [%f, %f]' % (load,
                                                           load_min,
                                                           load_max)
            if not numpy.isnan(load):
                assert load_min <= load <= load_max, msg

            # Test calcalated values
            #if 0.01 <= load < 90.0:
            #    assert impact == 1
            #elif 90.0 <= load < 150.0:
            #    assert impact == 2
            #elif 150.0 <= load < 300.0:
            #    assert impact == 3
            #elif load >= 300.0:
            #    assert impact == 4
            #else:
            #    assert impact == 0

            if 0.01 <= load < 0.5:
                assert impact == 0
            elif 0.5 <= load < 2.0:
                assert impact == 1
            elif 2.0 <= load < 10.0:
                assert impact == 2
            elif load >= 10.0:
                assert impact == 3
            else:
                assert impact == 0
Esempio n. 29
0
    def test_riab_interpolation(self):
        """Interpolation using Raster and Vector objects
        """

        # Create test data
        lon_ul = 100  # Longitude of upper left corner
        lat_ul = 10   # Latitude of upper left corner
        numlon = 8    # Number of longitudes
        numlat = 5    # Number of latitudes
        dlon = 1
        dlat = -1

        # Define array where latitudes are rows and longitude columns
        A = numpy.zeros((numlat, numlon))

        # Establish coordinates for lower left corner
        lat_ll = lat_ul - numlat
        lon_ll = lon_ul

        # Define pixel centers along each direction
        longitudes = numpy.linspace(lon_ll + 0.5,
                                    lon_ll + numlon - 0.5,
                                    numlon)
        latitudes = numpy.linspace(lat_ll + 0.5,
                                   lat_ll + numlat - 0.5,
                                   numlat)

        # Define raster with latitudes going bottom-up (south to north).
        # Longitudes go left-right (west to east)
        for i in range(numlat):
            for j in range(numlon):
                A[numlat - 1 - i, j] = linear_function(longitudes[j],
                                                       latitudes[i])

        # Write array to a raster file
        geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)
        projection = ('GEOGCS["GCS_WGS_1984",'
                      'DATUM["WGS_1984",'
                      'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
                      'PRIMEM["Greenwich",0.0],'
                      'UNIT["Degree",0.0174532925199433]]')

        raster_filename = unique_filename(suffix='.tif')
        write_raster_data(A,
                          projection,
                          geotransform,
                          raster_filename)

        # Write test interpolation point to a vector file
        coordinates = []
        for xi in longitudes:
            for eta in latitudes:
                coordinates.append((xi, eta))

        vector_filename = unique_filename(suffix='.shp')
        write_vector_data(data=None,
                          projection=projection,
                          geometry=coordinates,
                          filename=vector_filename)

        # Read both datasets back in
        R = read_layer(raster_filename)
        V = read_layer(vector_filename)

        # Then test that axes and data returned by R are correct
        x, y = R.get_geometry()
        msg = 'X axes was %s, should have been %s' % (longitudes, x)
        assert numpy.allclose(longitudes, x), msg
        msg = 'Y axes was %s, should have been %s' % (latitudes, y)
        assert numpy.allclose(latitudes, y), msg
        AA = R.get_data()
        msg = 'Raster data was %s, should have been %s' % (AA, A)
        assert numpy.allclose(AA, A), msg

        # Test riab's interpolation function
        I = R.interpolate(V, name='value')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()

        assert numpy.allclose(Icoordinates, coordinates)

        # Test that interpolated points are correct
        for i, (xi, eta) in enumerate(Icoordinates):

            z = Iattributes[i]['value']
            #print xi, eta, z, linear_function(xi, eta)
            assert numpy.allclose(z, linear_function(xi, eta),
                                  rtol=1e-12)

        # FIXME (Ole): Need test for values outside grid.
        #              They should be NaN or something

        # Cleanup
        # FIXME (Ole): Shape files are a collection of files. How to remove?
        os.remove(vector_filename)
Esempio n. 30
0
    def test_interpolation_tsunami_maumere(self):
        """Interpolation using tsunami data set from Maumere

        This is a test for interpolation (issue #19)
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'
                           % TESTDATA)
        exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)

        # Read input data
        H = read_layer(hazard_filename)
        A = H.get_data()
        depth_min, depth_max = H.get_extrema()

        # Compare extrema to values read off QGIS for this layer
        assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],
                              rtol=1.0e-6, atol=1.0e-10)

        E = read_layer(exposure_filename)
        coordinates = E.get_geometry()
        attributes = E.get_data()

        # Test riab's interpolation function
        I = H.interpolate(E, name='depth')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        N = len(Icoordinates)
        assert N == 891

        # Verify interpolated values with test result
        for i in range(N):

            interpolated_depth = Iattributes[i]['depth']
            pointid = attributes[i]['POINTID']

            if pointid == 263:

                #print i, pointid, attributes[i],
                #print interpolated_depth, coordinates[i]

                # Check that location is correct
                assert numpy.allclose(coordinates[i],
                                      [122.20367299, -8.61300358])

                # This is known to be outside inundation area so should
                # near zero
                assert numpy.allclose(interpolated_depth, 0.0,
                                      rtol=1.0e-12, atol=1.0e-12)

            if pointid == 148:
                # Check that location is correct
                assert numpy.allclose(coordinates[i],
                                      [122.2045912, -8.608483265])

                # This is in an inundated area with a surrounding depths of
                # 4.531, 3.911
                # 2.675, 2.583
                assert interpolated_depth < 4.531
                assert interpolated_depth < 3.911
                assert interpolated_depth > 2.583
                assert interpolated_depth > 2.675

                # This is a characterisation test for bilinear interpolation
                assert numpy.allclose(interpolated_depth, 3.62477215491,
                                      rtol=1.0e-12, atol=1.0e-12)

            # Check that interpolated points are within range
            msg = ('Interpolated depth %f at point %i was outside extrema: '
                   '[%f, %f]. ' % (interpolated_depth, i,
                                   depth_min, depth_max))

            if not numpy.isnan(interpolated_depth):
                assert depth_min <= interpolated_depth <= depth_max, msg
Esempio n. 31
0
    def test_padang_building_examples(self):
        """Padang building impact calculation works through the API
        """

        # Test for a range of hazard layers
        for mmi_filename in ['Shakemap_Padang_2009.asc']:
                               #'Lembang_Earthquake_Scenario.asc']:

            # Upload input data
            hazardfile = os.path.join(TESTDATA, mmi_filename)
            hazard_layer = save_to_geonode(hazardfile, user=self.user)
            hazard_name = '%s:%s' % (hazard_layer.workspace,
                                        hazard_layer.name)

            exposurefile = os.path.join(TESTDATA, 'Padang_WGS84.shp')
            exposure_layer = save_to_geonode(exposurefile, user=self.user)
            exposure_name = '%s:%s' % (exposure_layer.workspace,
                                          exposure_layer.name)

            # Call calculation routine

            # FIXME (Ole): The system freaks out if there are spaces in
            #              bbox string. Please let us catch that and deal
            #              nicely with it - also do this in download()
            bbox = '96.956, -5.51, 104.63933, 2.289497'

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')

                c = Client()
                rv = c.post('/impact/api/calculate/', data=dict(
                            hazard_server=INTERNAL_SERVER_URL,
                            hazard=hazard_name,
                            exposure_server=INTERNAL_SERVER_URL,
                            exposure=exposure_name,
                            bbox=bbox,
                            impact_function='Padang Earthquake ' \
                                            'Building Damage Function',
                            keywords='test,buildings,padang',
                            ))

                self.assertEqual(rv.status_code, 200)
                self.assertEqual(rv['Content-Type'], 'application/json')
                data = json.loads(rv.content)
                assert 'hazard_layer' in data.keys()
                assert 'exposure_layer' in data.keys()
                assert 'run_duration' in data.keys()
                assert 'run_date' in data.keys()
                assert 'layer' in data.keys()

                # Download result and check
                layer_name = data['layer'].split('/')[-1]

                result_layer = download(INTERNAL_SERVER_URL,
                                       layer_name,
                                       bbox)
                assert os.path.exists(result_layer.filename)

                # Read hazard data for reference
                hazard_raster = read_layer(hazardfile)
                A = hazard_raster.get_data()
                mmi_min, mmi_max = hazard_raster.get_extrema()

                # Read calculated result
                impact_vector = read_layer(result_layer.filename)
                coordinates = impact_vector.get_geometry()
                attributes = impact_vector.get_data()

                # Verify calculated result
                count = 0
                verified_count = 0
                for i in range(len(attributes)):
                    lon, lat = coordinates[i][:]
                    calculated_mmi = attributes[i]['MMI']

                    if calculated_mmi == 0.0:
                        # FIXME (Ole): Some points have MMI==0 here.
                        # Weird but not a show stopper
                        continue

                    # Check that interpolated points are within range
                    msg = ('Interpolated mmi %f was outside extrema: '
                           '[%f, %f] at location '
                           '[%f, %f]. ' % (calculated_mmi,
                                           mmi_min, mmi_max,
                                           lon, lat))
                    assert mmi_min <= calculated_mmi <= mmi_max, msg

                    building_class = attributes[i]['TestBLDGCl']

                    # Check calculated damage
                    calculated_dam = attributes[i]['DAMAGE']
                    verified_dam = padang_check_results(calculated_mmi,
                                                        building_class)
                    #print calculated_mmi, building_class, calculated_dam
                    if verified_dam:
                        msg = ('Calculated damage was not as expected '
                                 'for hazard layer %s. I got %f '
                               'but expected %f' % (hazardfile,
                                                    calculated_dam,
                                                    verified_dam))
                        assert numpy.allclose(calculated_dam, verified_dam,
                                               rtol=1.0e-4), msg
                        verified_count += 1
                    count += 1

                msg = ('No points was verified in output. Please create '
                       'table withe reference data')
                assert verified_count > 0, msg
                msg = 'Number buildings was not 3896.'
                assert count == 3896, msg
Esempio n. 32
0
    def test_reading_and_writing_of_vector_data(self):
        """Vector data can be read and written correctly
        """

        # First test that some error conditions are caught
        filename = unique_filename(suffix='nshoe66u')
        try:
            read_layer(filename)
        except Exception:
            pass
        else:
            msg = 'Exception for unknown extension should have been raised'
            raise Exception(msg)

        filename = unique_filename(suffix='.gml')
        try:
            read_layer(filename)
        except IOError:
            pass
        else:
            msg = 'Exception for non-existing file should have been raised'
            raise Exception(msg)

        # Read and verify test data
        for vectorname in ['lembang_schools.shp',
                           'tsunami_exposure_BB.shp']:

            filename = '%s/%s' % (TESTDATA, vectorname)
            layer = read_layer(filename)
            coords = layer.get_geometry()
            attributes = layer.get_data()

            # Check basic data integrity
            N = len(layer)
            assert coords.shape[0] == N
            assert coords.shape[1] == 2
            assert len(layer) == N

            assert isinstance(layer.get_name(), basestring)

            # Check projection
            wkt = layer.get_projection(proj4=False)
            assert wkt.startswith('GEOGCS')

            assert layer.projection == Projection(DEFAULT_PROJECTION)

            # Check integrity of each feature
            field_names = None
            for i in range(N):
                # Consistency between of geometry and fields

                x1 = coords[i, 0]
                x2 = attributes[i]['LONGITUDE']
                assert x2 is not None
                msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)
                assert numpy.allclose(x1, x2), msg

                x1 = coords[i, 1]
                x2 = attributes[i]['LATITUDE']
                assert x2 is not None
                msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)
                assert numpy.allclose(x1, x2), msg

                # Verify that each feature has the same fields
                if field_names is None:
                    field_names = attributes[i].keys()
                else:
                    assert len(field_names) == len(attributes[i].keys())
                    assert field_names == attributes[i].keys()

            # Write data back to file
            # FIXME (Ole): I would like to use gml here, but OGR does not
            #              store the spatial reference!
            out_filename = unique_filename(suffix='.shp')
            write_point_data(attributes, wkt, coords, out_filename)

            # Read again and check
            layer = read_layer(out_filename)
            coords = layer.get_geometry()
            attributes = layer.get_data()

            # Check basic data integrity
            N = len(layer)
            assert coords.shape[0] == N
            assert coords.shape[1] == 2

            # Check projection
            assert layer.projection == Projection(DEFAULT_PROJECTION)

            # Check integrity of each feature
            field_names = None
            for i in range(N):

                # Consistency between of geometry and fields
                x1 = coords[i, 0]
                x2 = attributes[i]['LONGITUDE']
                assert x2 is not None
                msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)
                assert numpy.allclose(x1, x2), msg

                x1 = coords[i, 1]
                x2 = attributes[i]['LATITUDE']
                assert x2 is not None
                msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)
                assert numpy.allclose(x1, x2), msg

                # Verify that each feature has the same fields
                if field_names is None:
                    field_names = attributes[i].keys()
                else:
                    assert len(field_names) == len(attributes[i].keys())
                    assert field_names == attributes[i].keys()

            # Test individual extraction
            lon = layer.get_data(attribute='LONGITUDE')
            assert numpy.allclose(lon, coords[:, 0])
Esempio n. 33
0
    def test_keywords_download(self):
        """Keywords are downloaded from GeoServer along with layer data
        """

        # Upload test data
        filenames = [
            'Lembang_Earthquake_Scenario.asc', 'Padang_WGS84.shp',
            'maumere_aos_depth_20m_land_wgs84.asc'
        ]
        layers = []
        paths = []
        for filename in filenames:
            basename, ext = os.path.splitext(filename)

            path = os.path.join(TESTDATA, filename)

            # Upload to GeoNode
            layer = save_to_geonode(path, user=self.user, overwrite=True)

            # Record layer and file
            layers.append(layer)
            paths.append(path)

        # Check integrity
        for i, layer in enumerate(layers):

            # Get reference keyword dictionary from file
            L = read_layer(paths[i])
            ref_keywords = L.get_keywords()

            # Get keywords metadata from GeoServer
            layer_name = '%s:%s' % (layer.workspace, layer.name)
            metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)
            assert 'keywords' in metadata
            geo_keywords = metadata['keywords']
            msg = ('Uploaded keywords were not as expected: I got %s '
                   'but expected %s' % (geo_keywords, ref_keywords))
            for kw in ref_keywords:
                # Check that all keywords were uploaded
                # It is OK for new automatic keywords to have appeared
                #  (e.g. resolution) - see issue #171
                assert kw in geo_keywords, msg
                assert ref_keywords[kw] == geo_keywords[kw], msg

            # Download data
            bbox = get_bounding_box_string(paths[i])
            H = download(INTERNAL_SERVER_URL, layer_name, bbox)

            dwn_keywords = H.get_keywords()
            msg = ('Downloaded keywords were not as expected: I got %s '
                   'but expected %s' % (dwn_keywords, geo_keywords))
            assert geo_keywords == dwn_keywords, msg

            # Check that the layer and its .keyword file is there.
            msg = 'Downloaded layer %s was not found' % H.filename
            assert os.path.isfile(H.filename), msg

            kw_filename = os.path.splitext(H.filename)[0] + '.keywords'
            msg = 'Downloaded keywords file %s was not found' % kw_filename
            assert os.path.isfile(kw_filename), msg

            # Check that keywords are OK when reading downloaded file
            L = read_layer(H.filename)
            read_keywords = L.get_keywords()
            msg = ('Keywords in downloaded file %s were not as expected: '
                   'I got %s but expected %s' %
                   (kw_filename, read_keywords, geo_keywords))
            assert read_keywords == geo_keywords, msg
Esempio n. 34
0
    def test_jakarta_flood_study(self):
        """HKV Jakarta flood study calculated correctly using aligned rasters
        """

        # FIXME (Ole): Redo with population as shapefile later

        # Name file names for hazard level, exposure and expected fatalities

        population = 'Population_Jakarta_geographic.asc'
        plugin_name = 'Flood Impact Function'

        # Expected values from HKV
        expected_values = [2485442, 1537920]

        i = 0
        for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
                         'Flood_Design_Depth_Jakarta_geographic.asc']:

            hazard_filename = os.path.join(TESTDATA, filename)
            exposure_filename = os.path.join(TESTDATA, population)

            # Get layers using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            # Call impact calculation engine
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Do calculation manually and check result
            hazard_raster = read_layer(hazard_filename)
            H = hazard_raster.get_data(nan=0)

            exposure_raster = read_layer(exposure_filename)
            P = exposure_raster.get_data(nan=0)

            # Calculate impact manually
            pixel_area = 2500
            I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area

            # Verify correctness against results from HKV
            res = sum(I.flat)
            ref = expected_values[i]
            #print filename, 'Result=%f' % res, ' Expected=%f' % ref
            #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)

            msg = 'Got result %f but expected %f' % (res, ref)
            assert numpy.allclose(res, ref, rtol=1.0e-2), msg

            # Verify correctness of result
            calculated_raster = read_layer(impact_filename)
            C = calculated_raster.get_data(nan=0)

            # Check caption
            caption = calculated_raster.get_caption()
            expct = 'People'
            msg = ('Caption %s did not contain expected '
                   'keyword %s' % (caption, expct))
            assert expct in caption, msg

            # Compare shape and extrema
            msg = ('Shape of calculated raster differs from reference raster: '
                   'C=%s, I=%s' % (C.shape, I.shape))
            assert numpy.allclose(C.shape, I.shape,
                                  rtol=1e-12, atol=1e-12), msg

            msg = ('Minimum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))
            assert numpy.allclose(numpy.min(C), numpy.min(I),
                                  rtol=1e-12, atol=1e-12), msg
            msg = ('Maximum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))
            assert numpy.allclose(numpy.max(C), numpy.max(I),
                                  rtol=1e-12, atol=1e-12), msg

            # Compare every single value numerically
            msg = 'Array values of written raster array were not as expected'
            assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg

            # Check that extrema are in range
            xmin, xmax = calculated_raster.get_extrema()
            assert numpy.alltrue(C >= xmin)
            assert numpy.alltrue(C <= xmax)
            assert numpy.alltrue(C >= 0)

            i += 1
Esempio n. 35
0
    def Xtest_interpolation_tsunami_maumere(self):
        """Interpolation using tsunami data set from Maumere

        This data showed some very wrong results from interpolation overshoot
        when first attempted in August 2011 - hence this test
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'
                           % TESTDATA)
        exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)

        # Read input data
        H = read_layer(hazard_filename)
        A = H.get_data()
        depth_min, depth_max = H.get_extrema()

        # Compare extrema to values read off QGIS for this layer
        assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],
                              rtol=1.0e-6, atol=1.0e-10)

        E = read_layer(exposure_filename)
        coordinates = E.get_geometry()
        attributes = E.get_data()

        # Test riab's interpolation function
        I = H.interpolate(E, name='depth')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()
        assert numpy.allclose(Icoordinates, coordinates)

        N = len(Icoordinates)
        assert N == 891

        # Verify interpolated values with test result
        for i in range(N):

            interpolated_depth = Iattributes[i]['depth']
            pointid = attributes[i]['POINTID']

            if pointid == 263:

                #print i, pointid, attributes[i],
                #print interpolated_depth, coordinates[i]

                # Check that location is correct
                assert numpy.allclose(coordinates[i],
                                      [122.20367299, -8.61300358])

                # This is known to be outside inundation area so should
                # near zero
                assert numpy.allclose(interpolated_depth, 0.0,
                                      rtol=1.0e-12, atol=1.0e-12)

            if pointid == 148:
                # Check that location is correct
                assert numpy.allclose(coordinates[i],
                                      [122.2045912, -8.608483265])

                # This is in an inundated area with a surrounding depths of
                # 4.531, 3.911
                # 2.675, 2.583
                assert interpolated_depth < 4.531
                assert interpolated_depth > 2.583
                assert numpy.allclose(interpolated_depth, 3.553,
                                      rtol=1.0e-5, atol=1.0e-5)

            # Check that interpolated points are within range
            msg = ('Interpolated depth %f at point %i was outside extrema: '
                   '[%f, %f]. ' % (interpolated_depth, i,
                                   depth_min, depth_max))

            if not numpy.isnan(interpolated_depth):
                tol = 1.0e-6
                assert depth_min - tol <= interpolated_depth <= depth_max, msg
Esempio n. 36
0
    def test_earthquake_damage_schools(self):
        """Lembang building damage from ground shaking works

        This test also exercises interpolation of hazard level (raster) to
        building locations (vector data).
        """

        for mmi_filename in ['lembang_mmi_hazmap.asc',
                             'Earthquake_Ground_Shaking_clip.tif',  # NaN's
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = '%s/lembang_schools.shp' % TESTDATA

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake Building Damage Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # First check that interpolated MMI was done as expected
            fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt'
                       % TESTDATA)
            reference_points = []
            MMI = []
            DAM = []
            for line in fid.readlines()[1:]:
                fields = line.strip().split(',')

                lon = float(fields[4][1:-1])
                lat = float(fields[3][1:-1])
                mmi = float(fields[-1][1:-1])
                dam = float(fields[-2][1:-1])

                reference_points.append((lon, lat))
                MMI.append(mmi)
                DAM.append(dam)

            # Verify that coordinates are consistent
            msg = 'Interpolated coordinates do not match those of test data'
            assert numpy.allclose(icoordinates, reference_points), msg

            # Verify interpolated MMI with test result
            min_damage = sys.maxint
            max_damage = -min_damage
            for i in range(len(MMI)):
                lon, lat = icoordinates[i][:]
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at location '
                       '[%f, %f].' % (calculated_mmi, hazard_filename,
                                      mmi_min, mmi_max, lon, lat))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                # Set up some tolerances for comparison with test set.
                if mmi_filename.startswith('Lembang_Earthquake'):
                    pct = 3
                else:
                    pct = 2

                # Check that interpolated result is within specified tolerance
                msg = ('Calculated MMI %f deviated more than %.1f%% from '
                       'what was expected %f' % (calculated_mmi, pct, MMI[i]))
                assert numpy.allclose(calculated_mmi, MMI[i],
                                      rtol=float(pct) / 100), msg

                calculated_dam = iattributes[i]['DAMAGE']
                if calculated_dam > max_damage:
                    max_damage = calculated_dam

                if calculated_dam < min_damage:
                    min_damage = calculated_dam

                ref_dam = lembang_damage_function(calculated_mmi)
                msg = ('Calculated damage was not as expected')
                assert numpy.allclose(calculated_dam, ref_dam,
                                      rtol=1.0e-12), msg

                # Test that test data is correct by calculating damage based
                # on reference MMI.
                # FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET
                #expected_test_damage = lembang_damage_function(MMI[i])
                #msg = ('Test data is inconsistent: i = %i, MMI = %f,'
                #       'expected_test_damage = %f, '
                #       'actual_test_damage = %f' % (i, MMI[i],
                #                                    expected_test_damage,
                #                                    DAM[i]))
                #if not numpy.allclose(expected_test_damage,
                #                      DAM[i], rtol=1.0e-12):
                #    print msg

                # Note this test doesn't work, but the question is whether the
                # independent test data is correct.
                # Also small fluctuations in MMI can cause very large changes
                # in computed damage for this example.
                # print mmi, MMI[i], calculated_damage, DAM[i]
                #msg = ('Calculated damage was not as expected for point %i:'
                #       'Got %f, expected %f' % (i, calculated_dam, DAM[i]))
                #assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg

            assert min_damage >= 0
            assert max_damage <= 100
Esempio n. 37
0
    def test_earthquake_damage_schools(self):
        """Lembang building damage from ground shaking works

        This test also exercises ineterpolation of hazard level (raster) to
        building locations (vector data).
        """

        for mmi_filename in ['lembang_mmi_hazmap.asc',
                             'Earthquake_Ground_Shaking_clip.tif',  # NaN's
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = '%s/lembang_schools.shp' % TESTDATA

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake School Damage Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            impact_filename = calculate_impact(layers=[H, E],
                                               impact_function=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # First check that interpolated MMI was done as expected
            fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt'
                       % TESTDATA)
            reference_points = []
            MMI = []
            DAM = []
            for line in fid.readlines()[1:]:
                fields = line.strip().split(',')

                lon = float(fields[4][1:-1])
                lat = float(fields[3][1:-1])
                mmi = float(fields[-1][1:-1])
                dam = float(fields[-2][1:-1])

                reference_points.append((lon, lat))
                MMI.append(mmi)
                DAM.append(dam)

            # Verify that coordinates are consistent
            msg = 'Interpolated coordinates do not match those of test data'
            assert numpy.allclose(icoordinates, reference_points), msg

            # Verify interpolated MMI with test result
            min_damage = sys.maxint
            max_damage = -min_damage
            for i in range(len(MMI)):
                lon, lat = icoordinates[i][:]
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at location '
                       '[%f, %f].' % (calculated_mmi, hazard_filename,
                                      mmi_min, mmi_max, lon, lat))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                # Set up some tolerances. Revise when NaN interpolation works
                if mmi_filename.startswith('Lembang_Earthquake'):
                    pct = 10
                else:
                    pct = 2

                # Check that interpolated result is within specified tolerance
                msg = ('Calculated MMI %f deviated more than %.1f%% from '
                       'what was expected %f' % (calculated_mmi, pct, MMI[i]))
                assert numpy.allclose(calculated_mmi, MMI[i],
                                      rtol=float(pct) / 100), msg

                # FIXME (Ole): Has to shorten name to 10 characters
                #              until issue #1 has been resolved.
                calculated_dam = iattributes[i]['Percent_da']
                if calculated_dam > max_damage:
                    max_damage = calculated_dam

                if calculated_dam < min_damage:
                    min_damage = calculated_dam

                ref_dam = lembang_damage_function(calculated_mmi)
                msg = ('Calculated damage was not as expected')
                assert numpy.allclose(calculated_dam, ref_dam,
                                      rtol=1.0e-12), msg

                # Test that test data is correct by calculating damage based
                # on reference MMI.
                # FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET
                #expected_test_damage = lembang_damage_function(MMI[i])
                #msg = ('Test data is inconsistent: i = %i, MMI = %f,'
                #       'expected_test_damage = %f, '
                #       'actual_test_damage = %f' % (i, MMI[i],
                #                                    expected_test_damage,
                #                                    DAM[i]))
                #if not numpy.allclose(expected_test_damage,
                #                      DAM[i], rtol=1.0e-12):
                #    print msg

                # Note this test doesn't work, but the question is whether the
                # independent test data is correct.
                # Also small fluctuations in MMI can cause very large changes
                # in computed damage for this example.
                # print mmi, MMI[i], calculated_damage, DAM[i]
                #msg = ('Calculated damage was not as expected for point %i:'
                #       'Got %f, expected %f' % (i, calculated_dam, DAM[i]))
                #assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg

            assert min_damage >= 0
            assert max_damage <= 100
Esempio n. 38
0
    def test_earthquake_impact_OSM_data(self):
        """Earthquake layer interpolation to OSM building data works

        The impact function used is based on the guidelines plugin

        This test also exercises interpolation of hazard level (raster) to
        building locations (vector data).
        """

        # FIXME: Still needs some reference data to compare to
        for mmi_filename in ['Shakemap_Padang_2009.asc',
                             # Time consuming
                             #'Earthquake_Ground_Shaking.asc',
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = ('%s/OSM_building_polygons_20110905.shp'
                                 % TESTDATA)

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake Guidelines Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # Verify interpolated MMI with test result
            for i in range(len(iattributes)):
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at point %i '
                       % (calculated_mmi, hazard_filename,
                          mmi_min, mmi_max, i))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                calculated_dam = iattributes[i]['DMGLEVEL']
                assert calculated_dam in [1, 2, 3]
Esempio n. 39
0
    def test_tsunami_loss_use_case(self):
        """Building loss from tsunami use case works
        """

        from impact.plugins.tsunami import NEXIS_building_impact_model
        # This test merely exercises the use case as there is
        # no reference data. It does check the sanity of values as
        # far as possible.

        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)
        exposure_with_depth_filename = ('%s/tsunami_exposure_BB_'
                                        'with_depth.shp' % TESTDATA)
        reference_impact_filename = ('%s/tsunami_impact_assessment_'
                                     'BB.shp' % TESTDATA)

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tsunami Building Loss Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        icoordinates = impact_vector.get_geometry()
        iattributes = impact_vector.get_data()
        N = len(icoordinates)

        # Ensure that calculated point locations coincide with
        # original exposure point locations
        ref_exp = read_layer(exposure_filename)
        refcoordinates = ref_exp.get_geometry()

        assert N == len(refcoordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data')
        assert numpy.allclose(icoordinates, refcoordinates), msg

        # Ensure that calculated point locations coincide with
        # original exposure point (with depth) locations
        ref_depth = read_layer(exposure_with_depth_filename)
        refdepth_coordinates = ref_depth.get_geometry()
        refdepth_attributes = ref_depth.get_data()
        assert N == len(refdepth_coordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data (with depth)')
        assert numpy.allclose(icoordinates, refdepth_coordinates), msg

        # Read reference results
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        ref_impact = read_layer(reference_impact_filename)
        refimpact_coordinates = ref_impact.get_geometry()
        refimpact_attributes = ref_impact.get_data()

        # Check for None
        for i in range(N):
            if refimpact_attributes[i] is None:
                msg = 'Element %i was None' % i
                raise Exception(msg)

        # Check sanity of calculated attributes
        for i in range(N):
            lon, lat = icoordinates[i, :]

            depth = iattributes[i]['DEPTH']

            # Ignore NaN's
            if numpy.isnan(depth):
                continue

            structural_damage = iattributes[i]['STRUCT_DAM']
            contents_damage = iattributes[i]['CONTENTS_D']
            for imp in [structural_damage, contents_damage]:
                msg = ('Percent damage was outside range: %f' % imp)
                assert 0 <= imp <= 1, msg

            structural_loss = iattributes[i]['STRUCT_LOS']
            contents_loss = iattributes[i]['CONTENTS_L']
            if depth < 0.3:
                assert structural_loss == 0.0
                assert contents_loss == 0.0
            else:
                assert structural_loss > 0.0
                assert contents_loss > 0.0

            number_of_people = iattributes[i]['NEXIS_PEOP']
            people_affected = iattributes[i]['PEOPLE_AFF']
            people_severely_affected = iattributes[i]['PEOPLE_SEV']

            if 0.01 < depth < 1.0:
                assert people_affected == number_of_people
            else:
                assert people_affected == 0

            if depth >= 1.0:
                assert people_severely_affected == number_of_people
            else:
                assert people_severely_affected == 0

            # Contents and structural damage is done according
            # to different damage curves and should therefore be different
            if depth > 0 and contents_damage > 0:
                assert contents_damage != structural_damage
Esempio n. 40
0
    def test_earthquake_fatality_estimation_allen(self):
        """Fatalities from ground shaking can be computed correctly 1
           using aligned rasters
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
        exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Earthquake Fatality Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]

        # Call calculation engine
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Do calculation manually and check result
        hazard_raster = read_layer(hazard_filename)
        H = hazard_raster.get_data(nan=0)

        exposure_raster = read_layer(exposure_filename)
        E = exposure_raster.get_data(nan=0)

        # Calculate impact manually
        a = 0.97429
        b = 11.037
        F = 10 ** (a * H - b) * E

        # Verify correctness of result
        calculated_raster = read_layer(impact_filename)
        C = calculated_raster.get_data(nan=0)

        # Compare shape and extrema
        msg = ('Shape of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (C.shape, F.shape))
        assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg

        msg = ('Minimum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
        assert numpy.allclose(numpy.min(C), numpy.min(F),
                              rtol=1e-12, atol=1e-12), msg
        msg = ('Maximum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
        assert numpy.allclose(numpy.max(C), numpy.max(F),
                              rtol=1e-12, atol=1e-12), msg

        # Compare every single value numerically
        msg = 'Array values of written raster array were not as expected'
        assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg

        # Check that extrema are in range
        xmin, xmax = calculated_raster.get_extrema()
        assert numpy.alltrue(C >= xmin)
        assert numpy.alltrue(C <= xmax)
        assert numpy.alltrue(C >= 0)
Esempio n. 41
0
    def test_riab_interpolation(self):
        """Interpolation using Raster and Vector objects
        """

        # Create test data
        lon_ul = 100  # Longitude of upper left corner
        lat_ul = 10   # Latitude of upper left corner
        numlon = 8    # Number of longitudes
        numlat = 5    # Number of latitudes
        dlon = 1
        dlat = -1

        # Define array where latitudes are rows and longitude columns
        A = numpy.zeros((numlat, numlon))

        # Establish coordinates for lower left corner
        lat_ll = lat_ul - numlat
        lon_ll = lon_ul

        # Define pixel centers along each direction
        longitudes = numpy.linspace(lon_ll + 0.5,
                                    lon_ll + numlon - 0.5,
                                    numlon)
        latitudes = numpy.linspace(lat_ll + 0.5,
                                   lat_ll + numlat - 0.5,
                                   numlat)

        # Define raster with latitudes going bottom-up (south to north).
        # Longitudes go left-right (west to east)
        for i in range(numlat):
            for j in range(numlon):
                A[numlat - 1 - i, j] = linear_function(longitudes[j],
                                                       latitudes[i])

        # Create bilinear interpolation function
        F = raster_spline(longitudes, latitudes, A)

        # Write array to a raster file
        geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)
        projection = ('GEOGCS["GCS_WGS_1984",'
                      'DATUM["WGS_1984",'
                      'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
                      'PRIMEM["Greenwich",0.0],'
                      'UNIT["Degree",0.0174532925199433]]')

        raster_filename = unique_filename(suffix='.tif')
        write_raster_data(A,
                          projection,
                          geotransform,
                          raster_filename)

        # Write test interpolation point to a vector file
        coordinates = []
        for xi in longitudes:
            for eta in latitudes:
                coordinates.append((xi, eta))

        vector_filename = unique_filename(suffix='.shp')
        write_point_data(data=None,
                         projection=projection,
                         geometry=coordinates,
                         filename=vector_filename)

        # Read both datasets back in
        R = read_layer(raster_filename)
        V = read_layer(vector_filename)

        # Then test that axes and data returned by R are correct
        x, y = R.get_geometry()
        msg = 'X axes was %s, should have been %s' % (longitudes, x)
        assert numpy.allclose(longitudes, x), msg
        msg = 'Y axes was %s, should have been %s' % (latitudes, y)
        assert numpy.allclose(latitudes, y), msg
        AA = R.get_data()
        msg = 'Raster data was %s, should have been %s' % (AA, A)
        assert numpy.allclose(AA, A), msg

        # Test riab's interpolation function
        I = R.interpolate(V, name='value')
        Icoordinates = I.get_geometry()
        Iattributes = I.get_data()

        assert numpy.allclose(Icoordinates, coordinates)

        # Test that interpolated points are correct
        for i, (xi, eta) in enumerate(Icoordinates):

            z = Iattributes[i]['value']
            #print xi, eta, z, linear_function(xi, eta)
            assert numpy.allclose(z, linear_function(xi, eta),
                                  rtol=1e-12)

        # FIXME (Ole): Need test for values outside grid.
        #              They should be NaN or something

        # Cleanup
        # FIXME (Ole): Shape files are a collection of files. How to remove?
        os.remove(vector_filename)
Esempio n. 42
0
    def test_reading_and_writing_of_real_rasters(self):
        """Rasters can be read and written correctly
        """

        for rastername in ['Earthquake_Ground_Shaking_clip.tif',
                             'Population_2010_clip.tif',
                             'shakemap_padang_20090930.asc',
                             'population_padang_1.asc',
                             'population_padang_2.asc']:

            filename = '%s/%s' % (TESTDATA, rastername)
            R1 = read_layer(filename)

            # Check consistency of raster
            A1 = R1.get_data()
            M, N = A1.shape

            msg = ('Dimensions of raster array do not match those of '
                   'raster file %s' % R1.filename)
            assert M == R1.rows, msg
            assert N == R1.columns, msg

            # Write back to new (tif) file
            out_filename = unique_filename(suffix='.tif')
            write_raster_data(A1,
                              R1.get_projection(),
                              R1.get_geotransform(),
                              out_filename)

            # Read again and check consistency
            R2 = read_layer(out_filename)

            msg = ('Dimensions of written raster array do not match those '
                   'of input raster file\n')
            msg += ('    Dimensions of input file '
                    '%s:  (%s, %s)\n' % (R1.filename, M, N))
            msg += ('    Dimensions of output file %s: '
                    '(%s, %s)' % (R2.filename, R2.rows, R2.columns))

            assert M == R2.rows, msg
            assert N == R2.columns, msg

            A2 = R2.get_data()

            assert numpy.allclose(numpy.min(A1), numpy.min(A2))
            assert numpy.allclose(numpy.max(A1), numpy.max(A2))

            msg = 'Array values of written raster array were not as expected'
            assert numpy.allclose(A1, A2), msg

            msg = 'Geotransforms were different'
            assert R1.get_geotransform() == R2.get_geotransform(), msg

            p1 = R1.get_projection(proj4=True)
            p2 = R2.get_projection(proj4=True)
            msg = 'Projections were different: %s != %s' % (p1, p2)
            assert p1 == p1, msg

            # Use overridden == and != to verify
            assert R1 == R2
            assert not R1 != R2

            # Check that equality raises exception when type is wrong
            try:
                R1 == Vector()
            except TypeError:
                pass
            else:
                msg = 'Should have raised TypeError'
                raise Exception(msg)
Esempio n. 43
0
    def test_data_resampling_example(self):
        """Raster data is unchanged when going through geonode

        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'
                           % TESTDATA)
        exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)

        #------------
        # Hazard data
        #------------
        # Read hazard input data for reference
        H_ref = read_layer(hazard_filename)

        A_ref = H_ref.get_data()
        depth_min_ref, depth_max_ref = H_ref.get_extrema()

        # Upload to internal geonode
        hazard_layer = save_to_geonode(hazard_filename, user=self.user)
        hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

        # Download data again
        bbox = get_bounding_box_string(hazard_filename)  # The biggest
        H = download(INTERNAL_SERVER_URL, hazard_name, bbox)

        A = H.get_data()
        depth_min, depth_max = H.get_extrema()

        # FIXME (Ole): The layer read from file is single precision only:
        # Issue #17
        # Here's the explanation why interpolation below produce slightly
        # different results (but why?)
        # The layer read from file is single precision which may be due to
        # the way it is converted from ASC to TIF. In other words the
        # problem may be in raster.write_to_file. Float64 is
        # specified there, so this is a mystery.
        #print 'A', A.dtype          # Double precision
        #print 'A_ref', A_ref.dtype  # Single precision

        # Compare extrema to values from numpy array
        assert numpy.allclose(depth_max, numpy.nanmax(A),
                              rtol=1.0e-12, atol=1.0e-12)

        assert numpy.allclose(depth_max_ref, numpy.nanmax(A_ref),
                              rtol=1.0e-12, atol=1.0e-12)

        # Compare to reference
        assert numpy.allclose([depth_min, depth_max],
                              [depth_min_ref, depth_max_ref],
                              rtol=1.0e-12, atol=1.0e-12)

        # Compare extrema to values read off QGIS for this layer
        assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],
                              rtol=1.0e-6, atol=1.0e-10)

        # Investigate difference visually
        #from matplotlib.pyplot import matshow, show
        #matshow(A)
        #matshow(A_ref)
        #matshow(A - A_ref)
        #show()

        #print
        for i in range(A.shape[0]):
            for j in range(A.shape[1]):
                if not numpy.isnan(A[i, j]):
                    err = abs(A[i, j] - A_ref[i, j])
                    if err > 0:
                        msg = ('%i, %i: %.15f, %.15f, %.15f'
                               % (i, j, A[i, j], A_ref[i, j], err))
                        raise Exception(msg)
                    #if A[i,j] > 16:
                    #    print i, j, A[i, j], A_ref[i, j]

        # Compare elements (nan & numbers)
        id_nan = numpy.isnan(A)
        id_nan_ref = numpy.isnan(A_ref)
        assert numpy.all(id_nan == id_nan_ref)
        assert numpy.allclose(A[-id_nan], A_ref[-id_nan],
                              rtol=1.0e-15, atol=1.0e-15)

        #print 'MAX', A[245, 283], A_ref[245, 283]
        #print 'MAX: %.15f %.15f %.15f' %(A[245, 283], A_ref[245, 283])
        assert numpy.allclose(A[245, 283], A_ref[245, 283],
                              rtol=1.0e-15, atol=1.0e-15)

        #--------------
        # Exposure data
        #--------------
        # Read exposure input data for reference
        E_ref = read_layer(exposure_filename)

        # Upload to internal geonode
        exposure_layer = save_to_geonode(exposure_filename, user=self.user)
        exposure_name = '%s:%s' % (exposure_layer.workspace,
                                   exposure_layer.name)

        # Download data again
        E = download(INTERNAL_SERVER_URL, exposure_name, bbox)

        # Check exposure data against reference
        coordinates = E.get_geometry()
        coordinates_ref = E_ref.get_geometry()
        assert numpy.allclose(coordinates, coordinates_ref,
                              rtol=1.0e-12, atol=1.0e-12)

        attributes = E.get_data()
        attributes_ref = E_ref.get_data()
        for i, att in enumerate(attributes):
            att_ref = attributes_ref[i]
            for key in att:
                assert att[key] == att_ref[key]

        # Test riab's interpolation function
        I = H.interpolate(E, name='depth')
        icoordinates = I.get_geometry()

        I_ref = H_ref.interpolate(E_ref, name='depth')
        icoordinates_ref = I_ref.get_geometry()

        assert numpy.allclose(coordinates,
                              icoordinates,
                              rtol=1.0e-12, atol=1.0e-12)
        assert numpy.allclose(coordinates,
                              icoordinates_ref,
                              rtol=1.0e-12, atol=1.0e-12)

        iattributes = I.get_data()
        assert numpy.allclose(icoordinates, coordinates)

        N = len(icoordinates)
        assert N == 891

        # Set tolerance for single precision until issue #17 has been fixed
        # It appears that the single precision leads to larger interpolation
        # errors
        rtol_issue17 = 2.0e-3
        atol_issue17 = 1.0e-4

        # Verify interpolated values with test result
        for i in range(N):

            interpolated_depth_ref = I_ref.get_data()[i]['depth']
            interpolated_depth = iattributes[i]['depth']

            assert nanallclose(interpolated_depth,
                               interpolated_depth_ref,
                               rtol=rtol_issue17, atol=atol_issue17)

            pointid = attributes[i]['POINTID']

            if pointid == 263:

                #print i, pointid, attributes[i],
                #print interpolated_depth, coordinates[i]

                # Check that location is correct
                assert numpy.allclose(coordinates[i],
                                      [122.20367299, -8.61300358],
                                      rtol=1.0e-7, atol=1.0e-12)

                # This is known to be outside inundation area so should
                # near zero
                assert numpy.allclose(interpolated_depth, 0.0,
                                      rtol=1.0e-12, atol=1.0e-12)

            if pointid == 148:
                # Check that location is correct
                #print coordinates[i]
                assert numpy.allclose(coordinates[i],
                                      [122.2045912, -8.608483265],
                                      rtol=1.0e-7, atol=1.0e-12)

                # This is in an inundated area with a surrounding depths of
                # 4.531, 3.911
                # 2.675, 2.583
                assert interpolated_depth < 4.531
                assert interpolated_depth < 3.911
                assert interpolated_depth > 2.583
                assert interpolated_depth > 2.675

                #print interpolated_depth
                # This is a characterisation test for bilinear interpolation
                assert numpy.allclose(interpolated_depth, 3.62477215491,
                                      rtol=rtol_issue17, atol=1.0e-12)

            # Check that interpolated points are within range
            msg = ('Interpolated depth %f at point %i was outside extrema: '
                   '[%f, %f]. ' % (interpolated_depth, i,
                                   depth_min, depth_max))

            if not numpy.isnan(interpolated_depth):
                assert depth_min <= interpolated_depth <= depth_max, msg
Esempio n. 44
0
    def test_padang_building_examples(self):
        """Padang building impact calculation works through the API
        """

        # Test for a range of hazard layers
        for mmi_filename in ['Shakemap_Padang_2009.asc']:
            #'Lembang_Earthquake_Scenario.asc']:

            # Upload input data
            hazardfile = os.path.join(TESTDATA, mmi_filename)
            hazard_layer = save_to_geonode(hazardfile, user=self.user)
            hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)

            exposurefile = os.path.join(TESTDATA, 'Padang_WGS84.shp')
            exposure_layer = save_to_geonode(exposurefile, user=self.user)
            exposure_name = '%s:%s' % (exposure_layer.workspace,
                                       exposure_layer.name)

            # Call calculation routine

            # FIXME (Ole): The system freaks out if there are spaces in
            #              bbox string. Please let us catch that and deal
            #              nicely with it - also do this in download()
            bbox = '96.956, -5.51, 104.63933, 2.289497'

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')

                c = Client()
                rv = c.post('/impact/api/calculate/', data=dict(
                            hazard_server=INTERNAL_SERVER_URL,
                            hazard=hazard_name,
                            exposure_server=INTERNAL_SERVER_URL,
                            exposure=exposure_name,
                            bbox=bbox,
                            impact_function='Padang Earthquake ' \
                                            'Building Damage Function',
                            keywords='test,buildings,padang',
                            ))

                self.assertEqual(rv.status_code, 200)
                self.assertEqual(rv['Content-Type'], 'application/json')
                data = json.loads(rv.content)
                assert 'hazard_layer' in data.keys()
                assert 'exposure_layer' in data.keys()
                assert 'run_duration' in data.keys()
                assert 'run_date' in data.keys()
                assert 'layer' in data.keys()

                # Download result and check
                layer_name = data['layer'].split('/')[-1]

                result_layer = download(INTERNAL_SERVER_URL, layer_name, bbox)
                assert os.path.exists(result_layer.filename)

                # Read hazard data for reference
                hazard_raster = read_layer(hazardfile)
                A = hazard_raster.get_data()
                mmi_min, mmi_max = hazard_raster.get_extrema()

                # Read calculated result
                impact_vector = read_layer(result_layer.filename)
                coordinates = impact_vector.get_geometry()
                attributes = impact_vector.get_data()

                # Verify calculated result
                count = 0
                verified_count = 0
                for i in range(len(attributes)):
                    lon, lat = coordinates[i][:]
                    calculated_mmi = attributes[i]['MMI']

                    if calculated_mmi == 0.0:
                        # FIXME (Ole): Some points have MMI==0 here.
                        # Weird but not a show stopper
                        continue

                    # Check that interpolated points are within range
                    msg = ('Interpolated mmi %f was outside extrema: '
                           '[%f, %f] at location '
                           '[%f, %f]. ' %
                           (calculated_mmi, mmi_min, mmi_max, lon, lat))
                    assert mmi_min <= calculated_mmi <= mmi_max, msg

                    building_class = attributes[i]['TestBLDGCl']

                    # Check calculated damage
                    calculated_dam = attributes[i]['DAMAGE']
                    verified_dam = padang_check_results(
                        calculated_mmi, building_class)
                    #print calculated_mmi, building_class, calculated_dam
                    if verified_dam:
                        msg = ('Calculated damage was not as expected '
                               'for hazard layer %s. I got %f '
                               'but expected %f' %
                               (hazardfile, calculated_dam, verified_dam))
                        assert numpy.allclose(calculated_dam,
                                              verified_dam,
                                              rtol=1.0e-4), msg
                        verified_count += 1
                    count += 1

                msg = ('No points was verified in output. Please create '
                       'table withe reference data')
                assert verified_count > 0, msg
                msg = 'Number buildings was not 3896.'
                assert count == 3896, msg