Example #1
0
def calculate(request, save_output=save_to_geonode):
    start = datetime.datetime.now()

    if request.method == 'GET':
        # FIXME: Add a basic form here to be able to generate the POST request.
        return HttpResponse('This should be accessed by robots, not humans.'
                            'In other words using HTTP POST instead of GET.')
    elif request.method == 'POST':
        data = request.POST
        impact_function_name = data['impact_function']
        hazard_server = data['hazard_server']
        hazard_layer = data['hazard']
        exposure_server = data['exposure_server']
        exposure_layer = data['exposure']
        bbox = data['bbox']
        keywords = data['keywords']

    if request.user.is_anonymous():
        theuser = get_valid_user()
    else:
        theuser = request.user

    # Create entry in database
    calculation = Calculation(user=theuser,
                              run_date=start,
                              hazard_server=hazard_server,
                              hazard_layer=hazard_layer,
                              exposure_server=exposure_server,
                              exposure_layer=exposure_layer,
                              impact_function=impact_function_name,
                              success=False)

    try:

        # Input checks
        msg = 'This cannot happen :-)'
        assert isinstance(bbox, basestring), msg

        check_bbox_string(bbox)

        # Find the intersection of bounding boxes for viewport,
        # hazard and exposure.
        vpt_bbox = bboxstring2list(bbox)
        haz_bbox = get_metadata(hazard_server,
                                hazard_layer)['bounding_box']
        exp_bbox = get_metadata(exposure_server,
                                exposure_layer)['bounding_box']

        # Impose minimum bounding box size (as per issue #101).
        # FIXME (Ole): This will need to be revisited in conjunction with
        # raster resolutions at some point.
        min_res = 0.00833334
        eps = 1.0e-1
        vpt_bbox = minimal_bounding_box(vpt_bbox, min_res, eps=eps)
        haz_bbox = minimal_bounding_box(haz_bbox, min_res, eps=eps)
        exp_bbox = minimal_bounding_box(exp_bbox, min_res, eps=eps)

        # New bounding box for data common to hazard, exposure and viewport
        # Download only data within this intersection
        intersection = bbox_intersection(vpt_bbox, haz_bbox, exp_bbox)
        if intersection is None:
            # Bounding boxes did not overlap
            msg = ('Bounding boxes of hazard data, exposure data and '
                   'viewport did not overlap, so no computation was '
                   'done. Please try again.')
            logger.info(msg)
            raise Exception(msg)

        bbox = bboxlist2string(intersection)

        plugin_list = get_plugins(impact_function_name)
        _, impact_function = plugin_list[0].items()[0]
        impact_function_source = inspect.getsource(impact_function)

        calculation.impact_function_source = impact_function_source
        calculation.bbox = bbox

        calculation.save()

        msg = 'Performing requested calculation'
        logger.info(msg)

        # Download selected layer objects
        msg = ('- Downloading hazard layer %s from %s' % (hazard_layer,
                                                      hazard_server))
        logger.info(msg)

        H = download(hazard_server, hazard_layer, bbox)

        msg = ('- Downloading exposure layer %s from %s' % (exposure_layer,
                                                        exposure_server))
        logger.info(msg)
        E = download(exposure_server, exposure_layer, bbox)

        # Calculate result using specified impact function
        msg = ('- Calculating impact using %s' % impact_function)
        logger.info(msg)

        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=impact_function)

        # Upload result to internal GeoServer
        msg = ('- Uploading impact layer %s' % impact_filename)
        logger.info(msg)
        result = save_output(impact_filename,
                         title='output_%s' % start.isoformat(),
                         user=theuser)
    except Exception, e:
        #FIXME: Reimplement error saving for calculation
        logger.error(e)
        errors = e.__str__()
        trace = exception_format(e)
        calculation.errors = errors
        calculation.stacktrace = trace
        calculation.save()
        jsondata = json.dumps({'errors': errors, 'stacktrace': trace})
        return HttpResponse(jsondata, mimetype='application/json')
Example #2
0
    def test_tsunami_loss_use_case(self):
        """Building loss from tsunami use case works
        """

        from impact.plugins.tsunami import NEXIS_building_impact_model
        # This test merely exercises the use case as there is
        # no reference data. It does check the sanity of values as
        # far as possible.

        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)
        exposure_with_depth_filename = ('%s/tsunami_exposure_BB_'
                                        'with_depth.shp' % TESTDATA)
        reference_impact_filename = ('%s/tsunami_impact_assessment_'
                                     'BB.shp' % TESTDATA)

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tsunami Building Loss Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        icoordinates = impact_vector.get_geometry()
        iattributes = impact_vector.get_data()
        N = len(icoordinates)

        # Ensure that calculated point locations coincide with
        # original exposure point locations
        ref_exp = read_layer(exposure_filename)
        refcoordinates = ref_exp.get_geometry()

        assert N == len(refcoordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data')
        assert numpy.allclose(icoordinates, refcoordinates), msg

        # Ensure that calculated point locations coincide with
        # original exposure point (with depth) locations
        ref_depth = read_layer(exposure_with_depth_filename)
        refdepth_coordinates = ref_depth.get_geometry()
        refdepth_attributes = ref_depth.get_data()
        assert N == len(refdepth_coordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data (with depth)')
        assert numpy.allclose(icoordinates, refdepth_coordinates), msg

        # Read reference results
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        ref_impact = read_layer(reference_impact_filename)
        refimpact_coordinates = ref_impact.get_geometry()
        refimpact_attributes = ref_impact.get_data()

        # Check for None
        for i in range(N):
            if refimpact_attributes[i] is None:
                msg = 'Element %i was None' % i
                raise Exception(msg)

        # Check sanity of calculated attributes
        for i in range(N):
            lon, lat = icoordinates[i, :]

            depth = iattributes[i]['DEPTH']

            # Ignore NaN's
            if numpy.isnan(depth):
                continue

            structural_damage = iattributes[i]['STRUCT_DAM']
            contents_damage = iattributes[i]['CONTENTS_D']
            for imp in [structural_damage, contents_damage]:
                msg = ('Percent damage was outside range: %f' % imp)
                assert 0 <= imp <= 1, msg

            structural_loss = iattributes[i]['STRUCT_LOS']
            contents_loss = iattributes[i]['CONTENTS_L']
            if depth < 0.3:
                assert structural_loss == 0.0
                assert contents_loss == 0.0
            else:
                assert structural_loss > 0.0
                assert contents_loss > 0.0

            number_of_people = iattributes[i]['NEXIS_PEOP']
            people_affected = iattributes[i]['PEOPLE_AFF']
            people_severely_affected = iattributes[i]['PEOPLE_SEV']

            if 0.01 < depth < 1.0:
                assert people_affected == number_of_people
            else:
                assert people_affected == 0

            if depth >= 1.0:
                assert people_severely_affected == number_of_people
            else:
                assert people_severely_affected == 0

            # Contents and structural damage is done according
            # to different damage curves and should therefore be different
            if depth > 0 and contents_damage > 0:
                assert contents_damage != structural_damage
Example #3
0
    def test_tephra_load_impact(self):
        """Hypothetical tephra load scenario can be computed

        This test also exercises reprojection of UTM data
        """

        # File names for hazard level and exposure

        # FIXME - when we know how to reproject, replace hazard
        # file with UTM version (i.e. without _geographic).
        hazard_filename = '%s/%s/%s' % (DEMODATA, 'hazard',
                                        'Ashload_Gede_VEI4_geographic.asc')
        exposure_filename = '%s/lembang_schools.shp' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tephra Impact Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        load_min, load_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        coordinates = impact_vector.get_geometry()
        attributes = impact_vector.get_data()

        # FIXME: Remove this tolerance when interpolation is better (issue #19)
        tol = 1.0e-8

        # Test that results are as expected
        # FIXME: Change test when we decide what values should actually be
        #        calculated :-) :-) :-)
        for a in attributes:
            load = a['Ashload']
            impact = a['Percent_da']

            # Test interpolation
            msg = 'Load %.15f was outside bounds [%f, %f]' % (load,
                                                           load_min,
                                                           load_max)
            if not numpy.isnan(load):
                assert load_min - tol <= load <= load_max, msg

            # Test calcalated values
            if 0.01 <= load < 90.0:
                assert impact == 25
            elif 90.0 <= load < 150.0:
                assert impact == 50
            elif 150.0 <= load < 300.0:
                assert impact == 75
            elif load >= 300.0:
                assert impact == 100
            else:
                assert impact == 0
Example #4
0
    def test_earthquake_damage_schools(self):
        """Lembang building damage from ground shaking works

        This test also exercises ineterpolation of hazard level (raster) to
        building locations (vector data).
        """

        for mmi_filename in ['lembang_mmi_hazmap.asc',
                             'Earthquake_Ground_Shaking_clip.tif',  # NaN's
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = '%s/lembang_schools.shp' % TESTDATA

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake School Damage Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            impact_filename = calculate_impact(layers=[H, E],
                                               impact_function=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # First check that interpolated MMI was done as expected
            fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt'
                       % TESTDATA)
            reference_points = []
            MMI = []
            DAM = []
            for line in fid.readlines()[1:]:
                fields = line.strip().split(',')

                lon = float(fields[4][1:-1])
                lat = float(fields[3][1:-1])
                mmi = float(fields[-1][1:-1])
                dam = float(fields[-2][1:-1])

                reference_points.append((lon, lat))
                MMI.append(mmi)
                DAM.append(dam)

            # Verify that coordinates are consistent
            msg = 'Interpolated coordinates do not match those of test data'
            assert numpy.allclose(icoordinates, reference_points), msg

            # Verify interpolated MMI with test result
            min_damage = sys.maxint
            max_damage = -min_damage
            for i in range(len(MMI)):
                lon, lat = icoordinates[i][:]
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at location '
                       '[%f, %f].' % (calculated_mmi, hazard_filename,
                                      mmi_min, mmi_max, lon, lat))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                # Set up some tolerances. Revise when NaN interpolation works
                if mmi_filename.startswith('Lembang_Earthquake'):
                    pct = 10
                else:
                    pct = 2

                # Check that interpolated result is within specified tolerance
                msg = ('Calculated MMI %f deviated more than %.1f%% from '
                       'what was expected %f' % (calculated_mmi, pct, MMI[i]))
                assert numpy.allclose(calculated_mmi, MMI[i],
                                      rtol=float(pct) / 100), msg

                # FIXME (Ole): Has to shorten name to 10 characters
                #              until issue #1 has been resolved.
                calculated_dam = iattributes[i]['Percent_da']
                if calculated_dam > max_damage:
                    max_damage = calculated_dam

                if calculated_dam < min_damage:
                    min_damage = calculated_dam

                ref_dam = lembang_damage_function(calculated_mmi)
                msg = ('Calculated damage was not as expected')
                assert numpy.allclose(calculated_dam, ref_dam,
                                      rtol=1.0e-12), msg

                # Test that test data is correct by calculating damage based
                # on reference MMI.
                # FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET
                #expected_test_damage = lembang_damage_function(MMI[i])
                #msg = ('Test data is inconsistent: i = %i, MMI = %f,'
                #       'expected_test_damage = %f, '
                #       'actual_test_damage = %f' % (i, MMI[i],
                #                                    expected_test_damage,
                #                                    DAM[i]))
                #if not numpy.allclose(expected_test_damage,
                #                      DAM[i], rtol=1.0e-12):
                #    print msg

                # Note this test doesn't work, but the question is whether the
                # independent test data is correct.
                # Also small fluctuations in MMI can cause very large changes
                # in computed damage for this example.
                # print mmi, MMI[i], calculated_damage, DAM[i]
                #msg = ('Calculated damage was not as expected for point %i:'
                #       'Got %f, expected %f' % (i, calculated_dam, DAM[i]))
                #assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg

            assert min_damage >= 0
            assert max_damage <= 100
Example #5
0
    def test_earthquake_fatality_estimation_allen(self):
        """Fatalities from ground shaking can be computed correctly 1
           using aligned rasters
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
        exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Earthquake Fatality Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]

        # Call calculation engine
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_function=IF)

        # Do calculation manually and check result
        hazard_raster = read_layer(hazard_filename)
        H = hazard_raster.get_data(nan=0)

        exposure_raster = read_layer(exposure_filename)
        E = exposure_raster.get_data(nan=0)

        # Calculate impact manually
        a = 0.97429
        b = 11.037
        F = 10 ** (a * H - b) * E

        # Verify correctness of result
        calculated_raster = read_layer(impact_filename)
        C = calculated_raster.get_data(nan=0)

        # Compare shape and extrema
        msg = ('Shape of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (C.shape, F.shape))
        assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg

        msg = ('Minimum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
        assert numpy.allclose(numpy.min(C), numpy.min(F),
                              rtol=1e-12, atol=1e-12), msg
        msg = ('Maximum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
        assert numpy.allclose(numpy.max(C), numpy.max(F),
                              rtol=1e-12, atol=1e-12), msg

        # Compare every single value numerically
        msg = 'Array values of written raster array were not as expected'
        assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg

        # Check that extrema are in range
        xmin, xmax = calculated_raster.get_extrema()
        assert numpy.alltrue(C >= xmin)
        assert numpy.alltrue(C <= xmax)
        assert numpy.alltrue(C >= 0)
Example #6
0
    def test_tephra_load_impact(self):
        """Hypothetical tephra load scenario can be computed

        This test also exercises reprojection of UTM data
        """

        # File names for hazard level and exposure

        # FIXME - when we know how to reproject, replace hazard
        # file with UTM version (i.e. without _geographic).
        hazard_filename = os.path.join(TESTDATA,
                                       'Ashload_Gede_VEI4_geographic.asc')
        exposure_filename = os.path.join(TESTDATA, 'lembang_schools.shp')

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tephra Impact Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Read input data
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        load_min, load_max = hazard_raster.get_extrema()

        exposure_vector = read_layer(exposure_filename)
        coordinates = exposure_vector.get_geometry()
        attributes = exposure_vector.get_data()

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        coordinates = impact_vector.get_geometry()
        attributes = impact_vector.get_data()

        # Test that results are as expected
        # FIXME: Change test when we decide what values should actually be
        #        calculated :-) :-) :-)
        for a in attributes:
            load = a['ASHLOAD']
            impact = a['DAMAGE']

            # Test interpolation
            msg = 'Load %.15f was outside bounds [%f, %f]' % (load,
                                                           load_min,
                                                           load_max)
            if not numpy.isnan(load):
                assert load_min <= load <= load_max, msg

            # Test calcalated values
            #if 0.01 <= load < 90.0:
            #    assert impact == 1
            #elif 90.0 <= load < 150.0:
            #    assert impact == 2
            #elif 150.0 <= load < 300.0:
            #    assert impact == 3
            #elif load >= 300.0:
            #    assert impact == 4
            #else:
            #    assert impact == 0

            if 0.01 <= load < 0.5:
                assert impact == 0
            elif 0.5 <= load < 2.0:
                assert impact == 1
            elif 2.0 <= load < 10.0:
                assert impact == 2
            elif load >= 10.0:
                assert impact == 3
            else:
                assert impact == 0
Example #7
0
    def test_jakarta_flood_study(self):
        """HKV Jakarta flood study calculated correctly using aligned rasters
        """

        # FIXME (Ole): Redo with population as shapefile later

        # Name file names for hazard level, exposure and expected fatalities

        population = 'Population_Jakarta_geographic.asc'
        plugin_name = 'Flood Impact Function'

        # Expected values from HKV
        expected_values = [2485442, 1537920]

        i = 0
        for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
                         'Flood_Design_Depth_Jakarta_geographic.asc']:

            hazard_filename = '%s/%s/%s' % (DEMODATA, 'hazard', filename)
            exposure_filename = '%s/%s/%s' % (DEMODATA, 'exposure',
                                              population)

            # Get layers using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            # Call impact calculation engine
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_function=IF)

            # Do calculation manually and check result
            hazard_raster = read_layer(hazard_filename)
            H = hazard_raster.get_data(nan=0)

            exposure_raster = read_layer(exposure_filename)
            P = exposure_raster.get_data(nan=0)

            # Calculate impact manually
            pixel_area = 2500
            I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area

            # Verify correctness against results from HKV
            res = sum(I.flat)
            ref = expected_values[i]
            #print filename, 'Result=%f' % res, ' Expected=%f' % ref
            #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)

            msg = 'Got result %f but expected %f' % (res, ref)
            assert numpy.allclose(res, ref, rtol=1.0e-2), msg

            # Verify correctness of result
            calculated_raster = read_layer(impact_filename)
            C = calculated_raster.get_data(nan=0)

            # Check caption (FIXME (Ole): Do this when issue #77 has been done)
            #assert calculated_raster.get_caption().startswith('Number')
            # ...and more tests here

            # Compare shape and extrema
            msg = ('Shape of calculated raster differs from reference raster: '
                   'C=%s, I=%s' % (C.shape, I.shape))
            assert numpy.allclose(C.shape, I.shape,
                                  rtol=1e-12, atol=1e-12), msg

            msg = ('Minimum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))
            assert numpy.allclose(numpy.min(C), numpy.min(I),
                                  rtol=1e-12, atol=1e-12), msg
            msg = ('Maximum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))
            assert numpy.allclose(numpy.max(C), numpy.max(I),
                                  rtol=1e-12, atol=1e-12), msg

            # Compare every single value numerically
            msg = 'Array values of written raster array were not as expected'
            assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg

            # Check that extrema are in range
            xmin, xmax = calculated_raster.get_extrema()
            assert numpy.alltrue(C >= xmin)
            assert numpy.alltrue(C <= xmax)
            assert numpy.alltrue(C >= 0)

            i += 1
Example #8
0
    def test_earthquake_fatality_estimation_allen(self):
        """Fatalities from ground shaking can be computed correctly 1
           using aligned rasters
        """

        # Name file names for hazard level, exposure and expected fatalities
        hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
        exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Earthquake Fatality Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]

        # Call calculation engine
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Do calculation manually and check result
        hazard_raster = read_layer(hazard_filename)
        H = hazard_raster.get_data(nan=0)

        exposure_raster = read_layer(exposure_filename)
        E = exposure_raster.get_data(nan=0)

        # Calculate impact manually
        a = 0.97429
        b = 11.037
        F = 10 ** (a * H - b) * E

        # Verify correctness of result
        calculated_raster = read_layer(impact_filename)
        C = calculated_raster.get_data(nan=0)

        # Compare shape and extrema
        msg = ('Shape of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (C.shape, F.shape))
        assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg

        msg = ('Minimum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
        assert numpy.allclose(numpy.min(C), numpy.min(F),
                              rtol=1e-12, atol=1e-12), msg
        msg = ('Maximum of calculated raster differs from reference raster: '
               'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
        assert numpy.allclose(numpy.max(C), numpy.max(F),
                              rtol=1e-12, atol=1e-12), msg

        # Compare every single value numerically
        msg = 'Array values of written raster array were not as expected'
        assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg

        # Check that extrema are in range
        xmin, xmax = calculated_raster.get_extrema()
        assert numpy.alltrue(C >= xmin)
        assert numpy.alltrue(C <= xmax)
        assert numpy.alltrue(C >= 0)
Example #9
0
    def test_tsunami_loss_use_case(self):
        """Building loss from tsunami use case works
        """

        # This test merely exercises the use case as there is
        # no reference data. It does check the sanity of values as
        # far as possible.

        hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'
                           'geographic.asc' % TESTDATA)
        exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)
        exposure_with_depth_filename = ('%s/tsunami_exposure_BB_'
                                        'with_depth.shp' % TESTDATA)
        reference_impact_filename = ('%s/tsunami_impact_assessment_'
                                     'BB.shp' % TESTDATA)

        # Calculate impact using API
        H = read_layer(hazard_filename)
        E = read_layer(exposure_filename)

        plugin_name = 'Tsunami Building Loss Function'
        plugin_list = get_plugins(plugin_name)
        assert len(plugin_list) == 1
        assert plugin_list[0].keys()[0] == plugin_name

        IF = plugin_list[0][plugin_name]
        impact_filename = calculate_impact(layers=[H, E],
                                           impact_fcn=IF)

        # Read calculated result
        impact_vector = read_layer(impact_filename)
        icoordinates = impact_vector.get_geometry()
        iattributes = impact_vector.get_data()
        N = len(icoordinates)

        # Ensure that calculated point locations coincide with
        # original exposure point locations
        ref_exp = read_layer(exposure_filename)
        refcoordinates = ref_exp.get_geometry()

        assert N == len(refcoordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data')
        assert numpy.allclose(icoordinates, refcoordinates), msg

        # Ensure that calculated point locations coincide with
        # original exposure point (with depth) locations
        ref_depth = read_layer(exposure_with_depth_filename)
        refdepth_coordinates = ref_depth.get_geometry()
        refdepth_attributes = ref_depth.get_data()
        assert N == len(refdepth_coordinates)
        msg = ('Coordinates of impact results do not match those of '
               'exposure data (with depth)')
        assert numpy.allclose(icoordinates, refdepth_coordinates), msg

        # Read reference results
        hazard_raster = read_layer(hazard_filename)
        A = hazard_raster.get_data()
        depth_min, depth_max = hazard_raster.get_extrema()

        ref_impact = read_layer(reference_impact_filename)
        refimpact_coordinates = ref_impact.get_geometry()
        refimpact_attributes = ref_impact.get_data()

        # Check for None
        for i in range(N):
            if refimpact_attributes[i] is None:
                msg = 'Element %i was None' % i
                raise Exception(msg)

        # Check sanity of calculated attributes
        for i in range(N):
            lon, lat = icoordinates[i]

            depth = iattributes[i]['DEPTH']

            # Ignore NaN's
            if numpy.isnan(depth):
                continue

            structural_damage = iattributes[i]['STRUCT_DAM']
            contents_damage = iattributes[i]['CONTENTS_D']
            for imp in [structural_damage, contents_damage]:
                msg = ('Percent damage was outside range: %f' % imp)
                assert 0 <= imp <= 1, msg

            structural_loss = iattributes[i]['STRUCT_LOS']
            contents_loss = iattributes[i]['CONTENTS_L']
            if depth < 0.3:
                assert structural_loss == 0.0
                assert contents_loss == 0.0
            else:
                assert structural_loss > 0.0
                assert contents_loss > 0.0

            number_of_people = iattributes[i]['NEXIS_PEOP']
            people_affected = iattributes[i]['PEOPLE_AFF']
            people_severely_affected = iattributes[i]['PEOPLE_SEV']

            if 0.01 < depth < 1.0:
                assert people_affected == number_of_people
            else:
                assert people_affected == 0

            if depth >= 1.0:
                assert people_severely_affected == number_of_people
            else:
                assert people_severely_affected == 0

            # Contents and structural damage is done according
            # to different damage curves and should therefore be different
            if depth > 0 and contents_damage > 0:
                assert contents_damage != structural_damage
Example #10
0
    def test_earthquake_impact_OSM_data(self):
        """Earthquake layer interpolation to OSM building data works

        The impact function used is based on the guidelines plugin

        This test also exercises interpolation of hazard level (raster) to
        building locations (vector data).
        """

        # FIXME: Still needs some reference data to compare to
        for mmi_filename in ['Shakemap_Padang_2009.asc',
                             # Time consuming
                             #'Earthquake_Ground_Shaking.asc',
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = ('%s/OSM_building_polygons_20110905.shp'
                                 % TESTDATA)

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake Guidelines Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # Verify interpolated MMI with test result
            for i in range(len(iattributes)):
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at point %i '
                       % (calculated_mmi, hazard_filename,
                          mmi_min, mmi_max, i))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                calculated_dam = iattributes[i]['DMGLEVEL']
                assert calculated_dam in [1, 2, 3]
Example #11
0
    def test_earthquake_damage_schools(self):
        """Lembang building damage from ground shaking works

        This test also exercises interpolation of hazard level (raster) to
        building locations (vector data).
        """

        for mmi_filename in ['lembang_mmi_hazmap.asc',
                             'Earthquake_Ground_Shaking_clip.tif',  # NaN's
                             'Lembang_Earthquake_Scenario.asc']:

            # Name file names for hazard level and exposure
            hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)
            exposure_filename = '%s/lembang_schools.shp' % TESTDATA

            # Calculate impact using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_name = 'Earthquake Building Damage Function'
            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Read input data
            hazard_raster = read_layer(hazard_filename)
            A = hazard_raster.get_data()
            mmi_min, mmi_max = hazard_raster.get_extrema()

            exposure_vector = read_layer(exposure_filename)
            coordinates = exposure_vector.get_geometry()
            attributes = exposure_vector.get_data()

            # Read calculated result
            impact_vector = read_layer(impact_filename)
            icoordinates = impact_vector.get_geometry()
            iattributes = impact_vector.get_data()

            # First check that interpolated MMI was done as expected
            fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt'
                       % TESTDATA)
            reference_points = []
            MMI = []
            DAM = []
            for line in fid.readlines()[1:]:
                fields = line.strip().split(',')

                lon = float(fields[4][1:-1])
                lat = float(fields[3][1:-1])
                mmi = float(fields[-1][1:-1])
                dam = float(fields[-2][1:-1])

                reference_points.append((lon, lat))
                MMI.append(mmi)
                DAM.append(dam)

            # Verify that coordinates are consistent
            msg = 'Interpolated coordinates do not match those of test data'
            assert numpy.allclose(icoordinates, reference_points), msg

            # Verify interpolated MMI with test result
            min_damage = sys.maxint
            max_damage = -min_damage
            for i in range(len(MMI)):
                lon, lat = icoordinates[i][:]
                calculated_mmi = iattributes[i]['MMI']

                if numpy.isnan(calculated_mmi):
                    continue

                # Check that interpolated points are within range
                msg = ('Interpolated mmi %f from file %s was outside '
                       'extrema: [%f, %f] at location '
                       '[%f, %f].' % (calculated_mmi, hazard_filename,
                                      mmi_min, mmi_max, lon, lat))
                assert mmi_min <= calculated_mmi <= mmi_max, msg

                # Set up some tolerances for comparison with test set.
                if mmi_filename.startswith('Lembang_Earthquake'):
                    pct = 3
                else:
                    pct = 2

                # Check that interpolated result is within specified tolerance
                msg = ('Calculated MMI %f deviated more than %.1f%% from '
                       'what was expected %f' % (calculated_mmi, pct, MMI[i]))
                assert numpy.allclose(calculated_mmi, MMI[i],
                                      rtol=float(pct) / 100), msg

                calculated_dam = iattributes[i]['DAMAGE']
                if calculated_dam > max_damage:
                    max_damage = calculated_dam

                if calculated_dam < min_damage:
                    min_damage = calculated_dam

                ref_dam = lembang_damage_function(calculated_mmi)
                msg = ('Calculated damage was not as expected')
                assert numpy.allclose(calculated_dam, ref_dam,
                                      rtol=1.0e-12), msg

                # Test that test data is correct by calculating damage based
                # on reference MMI.
                # FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET
                #expected_test_damage = lembang_damage_function(MMI[i])
                #msg = ('Test data is inconsistent: i = %i, MMI = %f,'
                #       'expected_test_damage = %f, '
                #       'actual_test_damage = %f' % (i, MMI[i],
                #                                    expected_test_damage,
                #                                    DAM[i]))
                #if not numpy.allclose(expected_test_damage,
                #                      DAM[i], rtol=1.0e-12):
                #    print msg

                # Note this test doesn't work, but the question is whether the
                # independent test data is correct.
                # Also small fluctuations in MMI can cause very large changes
                # in computed damage for this example.
                # print mmi, MMI[i], calculated_damage, DAM[i]
                #msg = ('Calculated damage was not as expected for point %i:'
                #       'Got %f, expected %f' % (i, calculated_dam, DAM[i]))
                #assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg

            assert min_damage >= 0
            assert max_damage <= 100
Example #12
0
    def test_jakarta_flood_study(self):
        """HKV Jakarta flood study calculated correctly using aligned rasters
        """

        # FIXME (Ole): Redo with population as shapefile later

        # Name file names for hazard level, exposure and expected fatalities

        population = 'Population_Jakarta_geographic.asc'
        plugin_name = 'Flood Impact Function'

        # Expected values from HKV
        expected_values = [2485442, 1537920]

        i = 0
        for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
                         'Flood_Design_Depth_Jakarta_geographic.asc']:

            hazard_filename = os.path.join(TESTDATA, filename)
            exposure_filename = os.path.join(TESTDATA, population)

            # Get layers using API
            H = read_layer(hazard_filename)
            E = read_layer(exposure_filename)

            plugin_list = get_plugins(plugin_name)
            assert len(plugin_list) == 1
            assert plugin_list[0].keys()[0] == plugin_name

            IF = plugin_list[0][plugin_name]

            # Call impact calculation engine
            impact_filename = calculate_impact(layers=[H, E],
                                               impact_fcn=IF)

            # Do calculation manually and check result
            hazard_raster = read_layer(hazard_filename)
            H = hazard_raster.get_data(nan=0)

            exposure_raster = read_layer(exposure_filename)
            P = exposure_raster.get_data(nan=0)

            # Calculate impact manually
            pixel_area = 2500
            I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area

            # Verify correctness against results from HKV
            res = sum(I.flat)
            ref = expected_values[i]
            #print filename, 'Result=%f' % res, ' Expected=%f' % ref
            #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)

            msg = 'Got result %f but expected %f' % (res, ref)
            assert numpy.allclose(res, ref, rtol=1.0e-2), msg

            # Verify correctness of result
            calculated_raster = read_layer(impact_filename)
            C = calculated_raster.get_data(nan=0)

            # Check caption
            caption = calculated_raster.get_caption()
            expct = 'People'
            msg = ('Caption %s did not contain expected '
                   'keyword %s' % (caption, expct))
            assert expct in caption, msg

            # Compare shape and extrema
            msg = ('Shape of calculated raster differs from reference raster: '
                   'C=%s, I=%s' % (C.shape, I.shape))
            assert numpy.allclose(C.shape, I.shape,
                                  rtol=1e-12, atol=1e-12), msg

            msg = ('Minimum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))
            assert numpy.allclose(numpy.min(C), numpy.min(I),
                                  rtol=1e-12, atol=1e-12), msg
            msg = ('Maximum of calculated raster differs from reference '
                   'raster: '
                   'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))
            assert numpy.allclose(numpy.max(C), numpy.max(I),
                                  rtol=1e-12, atol=1e-12), msg

            # Compare every single value numerically
            msg = 'Array values of written raster array were not as expected'
            assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg

            # Check that extrema are in range
            xmin, xmax = calculated_raster.get_extrema()
            assert numpy.alltrue(C >= xmin)
            assert numpy.alltrue(C <= xmax)
            assert numpy.alltrue(C >= 0)

            i += 1
Example #13
0
File: views.py Project: sabman/riab
def calculate(request, save_output=dummy_save):
    start = datetime.datetime.now()

    if request.method == 'GET':
        # FIXME: Add a basic form here to be able to generate the POST request.
        return HttpResponse('This should be accessed by robots, not humans.'
                            'In other words using HTTP POST instead of GET.')
    elif request.method == 'POST':
        data = request.POST
        impact_function_name = data['impact_function']
        hazard_server = data['hazard_server']
        hazard_layer = data['hazard']
        exposure_server = data['exposure_server']
        exposure_layer = data['exposure']
        bbox = data['bbox']
        keywords = data['keywords']


    theuser = get_guaranteed_valid_user(request.user)

    plugin_list = get_plugins(impact_function_name)
    _, impact_function = plugin_list[0].items()[0]
    impact_function_source = inspect.getsource(impact_function)

    # Create entry in database
    calculation = Calculation(user=theuser,
                              run_date=start,
                              hazard_server=hazard_server,
                              hazard_layer=hazard_layer,
                              exposure_server='exposure_server',
                              exposure_layer='exposure_layer',
                              impact_function=impact_function_name,
                              impact_function_source=impact_function_source,
                              bbox=bbox,
                              success=False)
    calculation.save()

    logger.info('Performing requested calculation')
    # Download selected layer objects
    logger.info('- Downloading hazard layer %s from %s' % (hazard_layer,
                                                           hazard_server))
    H = download(hazard_server, hazard_layer, bbox)
    logger.info('- Downloading exposure layer %s from %s' % (exposure_layer,
                                                             exposure_server))
    E = download(exposure_server, exposure_layer, bbox)

    # Calculate result using specified impact function
    logger.info('- Calculating impact using %s' % impact_function)

    impact_filename = calculate_impact(layers=[H, E],
                                       impact_function=impact_function)

    # Upload result to internal GeoServer
    logger.info('- Uploading impact layer %s' % impact_filename)
    result = save_output(impact_filename,
                         title='output_%s' % start.isoformat(),
                         user=theuser)
    logger.info('- Result available at %s.' % result.get_absolute_url())

    calculation.layer = result.get_absolute_url()
    calculation.success = True
    calculation.save()

    output = calculation.__dict__

    # json.dumps does not like datetime objects,
    # let's make it a json string ourselves
    output['run_date'] = 'new Date("%s")' % calculation.run_date

    # FIXME:This should not be needed in an ideal world
    ows_server_url = settings.GEOSERVER_BASE_URL + 'ows',
    output['ows_server_url'] = ows_server_url

    # json.dumps does not like django users
    output['user'] = calculation.user.username

    # Delete _state and _user_cache item from the dict,
    # they were created automatically by Django
    del output['_user_cache']
    del output['_state']
    jsondata = json.dumps(output)
    return HttpResponse(jsondata, mimetype='application/json')
Example #14
0
def calculate(request, save_output=save_to_geonode):
    start = datetime.datetime.now()

    if request.method == 'GET':
        # FIXME: Add a basic form here to be able to generate the POST request.
        return HttpResponse('This should be accessed by robots, not humans.'
                            'In other words using HTTP POST instead of GET.')
    elif request.method == 'POST':
        data = request.POST
        impact_function_name = data['impact_function']
        hazard_server = data['hazard_server']
        hazard_layer = data['hazard']
        exposure_server = data['exposure_server']
        exposure_layer = data['exposure']
        requested_bbox = data['bbox']
        keywords = data['keywords']

    if request.user.is_anonymous():
        theuser = get_valid_user()
    else:
        theuser = request.user

    # Create entry in database
    calculation = Calculation(user=theuser,
                              run_date=start,
                              hazard_server=hazard_server,
                              hazard_layer=hazard_layer,
                              exposure_server=exposure_server,
                              exposure_layer=exposure_layer,
                              impact_function=impact_function_name,
                              success=False)

    # Wrap main computation loop in try except to catch and present
    # messages and stack traces in the application
    try:
        # Get metadata
        haz_metadata = get_metadata(hazard_server, hazard_layer)
        exp_metadata = get_metadata(exposure_server, exposure_layer)

        # Determine common resolution in case of raster layers
        raster_resolution = get_common_resolution(haz_metadata, exp_metadata)

        # Get reconciled bounding boxes
        haz_bbox, exp_bbox, imp_bbox = get_bounding_boxes(haz_metadata,
                                                          exp_metadata,
                                                          requested_bbox)

        # Record layers to download
        download_layers = [(hazard_server, hazard_layer, haz_bbox),
                           (exposure_server, exposure_layer, exp_bbox)]

        # Add linked layers if any FIXME: STILL TODO!

        # Get selected impact function
        impact_function = get_plugin(impact_function_name)
        impact_function_source = inspect.getsource(impact_function)

        # Record information calculation object and save it
        calculation.impact_function_source = impact_function_source
        calculation.bbox = bboxlist2string(imp_bbox)
        calculation.save()

        # Start computation
        msg = 'Performing requested calculation'
        logger.info(msg)

        # Download selected layer objects
        layers = []
        for server, layer_name, bbox in download_layers:
            msg = ('- Downloading layer %s from %s'
                   % (layer_name, server))
            logger.info(msg)
            L = download(server, layer_name, bbox, raster_resolution)
            layers.append(L)

        # Calculate result using specified impact function
        msg = ('- Calculating impact using %s' % impact_function)
        logger.info(msg)
        impact_filename = calculate_impact(layers=layers,
                                           impact_fcn=impact_function)

        # Upload result to internal GeoServer
        msg = ('- Uploading impact layer %s' % impact_filename)
        logger.info(msg)
        result = save_output(impact_filename,
                             title='output_%s' % start.isoformat(),
                             user=theuser)
    except Exception, e:
        # FIXME: Reimplement error saving for calculation.
        # FIXME (Ole): Why should we reimplement?
        # This is dangerous. Try to raise an exception
        # e.g. in get_metadata_from_layer. Things will silently fail.
        # See issue #170

        logger.error(e)
        errors = e.__str__()
        trace = exception_format(e)
        calculation.errors = errors
        calculation.stacktrace = trace
        calculation.save()
        jsondata = json.dumps({'errors': errors, 'stacktrace': trace})
        return HttpResponse(jsondata, mimetype='application/json')