def process_scenario(scenario_id):
    """Convert a 3Di scenario that was imported."""

    # The normal (Sobek) task starts Workflow Template 2.
    # This starts
    # 132 -> 134 -> 160 -> 162 -> 180 -> 185
    # and
    # 150 -> 155
    #
    # meaning
    #
    # 132 = compute rise speed
    # 134 = compute mortality grid
    # 160 = simulation
    # 162 = embankment damage
    # 180 = pyramid generation
    # 185 = presentation generation
    # 150 = pyramid generation 150
    # 155 = presentation generation 155
    #
    # pyramid_generation has 'sobek' and 'his_ssm' functions
    # Same for presentation generation
    #
    # For 3Di we need to do
    # - Compute "sobek-equivalent" results
    # - See if the sobek pyramid generation works on it
    # - See if the sobek presentation generation works on it

    scenario = Scenario.objects.get(pk=scenario_id)

    bathymetry = scenario.result_set.get(resulttype__name='bathymetry')
    netcdf = scenario.result_set.get(resulttype__name='results_3di')

    success1, success2 = False, False
    result1, result2 = None, None

    with temporarily_unzipped(bathymetry.absolute_resultloc) as bathpath:
        with temporarily_unzipped(netcdf.absolute_resultloc) as ncdfpath:
            bathymetry_dataset = gdal.Open(bathpath[0])

            with Converter(ncdfpath[0]) as converter:
                workdir = tempfile.mkdtemp()

                try:
                    result1, success1 = compute_waterdepth_animation(
                        scenario, bathymetry_dataset, converter, workdir)

                    result2, success2 = compute_max_waterdepth_tif_result(
                        scenario, bathymetry_dataset, converter, workdir)
                finally:
                    shutil.rmtree(workdir)

    success = all([success1, success2])
    result = result1, result2
    return (success, result, '-')
Esempio n. 2
0
    def test_trivial(self):
        for f in (TMPFILE, ZIPFILE):
            if os.path.exists(f):
                os.remove(f)

        f = open(TMPFILE, 'w')
        f.write("whee\n")
        f.close()

        os.system("/usr/bin/zip -q {zipfile} {tmpfile}".format(
            zipfile=ZIPFILE, tmpfile=TMPFILE))

        with files.temporarily_unzipped(ZIPFILE) as names:
            self.assertEquals([os.path.basename(name) for name in names],
                              ['test.txt'])

            name = names[0]
            f = open(name, 'w')
            f.write("ook whee\n")
            f.close()

        self.assertFalse(os.path.exists(name))

        zipf = zipfile.ZipFile(ZIPFILE)
        self.assertEquals("ook whee\n", zipf.read('tmp/test.txt'))
        zipf.close()
Esempio n. 3
0
def scenario_results_zipfile(request, permission_manager, scenario_id):
    """Return all results of a given scenario as one large
    zipfile. Requires approval permissions in the scenario."""

    scenario = models.Scenario.objects.get(pk=scenario_id)

    if not permission_manager.check_scenario_permission(
            scenario, models.UserPermission.PERMISSION_SCENARIO_APPROVE):
        raise PermissionDenied()

    results = scenario.result_set.all()

    resultset = set()

    for result in results:
        resultloc = result.resultloc
        if not resultloc:
            continue

        resultloc = result_location(resultloc)

        if os.path.exists(resultloc):
            resultset.add(resultloc)

    # We use a mkstemp file, which will be automatically closed and
    # removed once there are no more references to it.
    temp = tempfile.TemporaryFile(
        prefix="zipfile_for_downloading_scenario_results",
        dir=settings.TMP_DIR)

    zipf = zipfile.ZipFile(temp, mode='w')

    for loc in resultset:
        filename = os.path.basename(loc)
        if filename.endswith('.zip'):
            with files.temporarily_unzipped(loc) as paths:
                for path in paths:
                    zipf.write(path, os.path.basename(path),
                               zipfile.ZIP_DEFLATED)
        else:
            zipf.write(loc, filename, zipfile.ZIP_DEFLATED)

    zipf.close()

    # By wrapping the temp file in a Django FileWrapper, it should be
    # read in in chunks that always fit into memory.
    wrapper = FixedFileWrapper(temp)

    response = HttpResponse(wrapper, content_type='application/zip')
    response['Content-Disposition'] = (
        'attachment; filename=resultaten_scenario_{0}.zip'.format(scenario.id))
    # temp.tell() is the current position in the file, which is the
    # same as the length of the file
    response['Content-Length'] = temp.tell()

    # But for serving from this opened file, we have to start at the
    # beginning
    temp.seek(0)

    return response
Esempio n. 4
0
    def test_trivial(self):
        for f in (TMPFILE, ZIPFILE):
            if os.path.exists(f):
                os.remove(f)

        f = open(TMPFILE, 'w')
        f.write("whee\n")
        f.close()

        os.system("/usr/bin/zip -q {zipfile} {tmpfile}"
                  .format(zipfile=ZIPFILE, tmpfile=TMPFILE))

        with files.temporarily_unzipped(ZIPFILE) as names:
            self.assertEquals(
                [os.path.basename(name) for name in names],
                ['test.txt'])

            name = names[0]
            f = open(name, 'w')
            f.write("ook whee\n")
            f.close()

        self.assertFalse(os.path.exists(name))

        zipf = zipfile.ZipFile(ZIPFILE)
        self.assertEquals(
            "ook whee\n",
            zipf.read('tmp/test.txt'))
        zipf.close()
def dijkring_arrays_to_zip(input_files, tmp_zip_filename, gridtype='output'):
    """
    Return arrays in a dict with key 'dijkringnr'.
    Value is a list of 2-tuples (masked_array, geo_transform)

    Input_files is a list with dicts 'dijkringnr' and 'filename'

    gridtype is used to generate useful arcnames
    """
    dijkring_arrays = {}  # key is dijkringnr
    result = {}

    for input_file in input_files:
        print '  - processing result for scenario %s...' % input_file['scenario']
        #print input_file
        linux_filename = linuxify_pathname(input_file['filename'])
        print linux_filename
        dijkringnr = input_file['dijkringnr']
        with files.temporarily_unzipped(linux_filename) as files_in_zip:
            for filename_in_zip in files_in_zip:
                print filename_in_zip
                dataset = gdal.Open(filename_in_zip)
                # Read the data into a masked array
                arr = dataset.ReadAsArray()
                ndv = dataset.GetRasterBand(1).GetNoDataValue()
                masked_array = np.ma.array(arr, mask=(arr == ndv))
                if dijkringnr not in dijkring_arrays:
                    dijkring_arrays[dijkringnr] = []
                geo_transform = dataset.GetGeoTransform()
                geo_projection = dataset.GetProjection()
                dijkring_arrays[dijkringnr].append((masked_array, geo_transform, geo_projection))
                del dataset  # This closes the file, so that the
                             # directory can be deleted in Windows

    for dijkringnr, arrays in dijkring_arrays.items():
        # for each dijkringnr, calculate max(arrays)
        #print 'dijkringnr %d' % dijkringnr
        # 0 is masked_array, 1 is geo_transform
        max_array = np_max([array[0] for array in arrays])
        geo_transform = arrays[0][1]
        geo_projection = arrays[0][2]

        # Apply dijkring mask
        mask = get_dijkring_mask(
            dijkringnr, geo_projection, geo_transform,
            max_array.shape[1], max_array.shape[0])
        # Everything is masked: a) if it was masked already OR b) it's in the dijkring_mask
        max_array.mask = np.maximum(mask, max_array.mask)

        result[dijkringnr] = (max_array, geo_transform)

        ascii_filename = mktemp()
        write_masked_array_as_ascii(ascii_filename, max_array, geo_transform)
        arc_name = '%s_%d.asc' % (gridtype, dijkringnr)
        add_to_zip(tmp_zip_filename,
                   [{'filename': ascii_filename, 'arcname': arc_name, 'delete_after': True}])

    return result
Esempio n. 6
0
def scenario_results_zipfile(request, permission_manager, scenario_id):
    """Return all results of a given scenario as one large
    zipfile. Requires approval permissions in the scenario."""

    scenario = models.Scenario.objects.get(pk=scenario_id)

    if not permission_manager.check_scenario_permission(scenario, models.UserPermission.PERMISSION_SCENARIO_APPROVE):
        raise PermissionDenied()

    results = scenario.result_set.all()

    resultset = set()

    for result in results:
        resultloc = result.resultloc
        if not resultloc:
            continue

        resultloc = result_location(resultloc)

        if os.path.exists(resultloc):
            resultset.add(resultloc)

    # We use a mkstemp file, which will be automatically closed and
    # removed once there are no more references to it.
    temp = tempfile.TemporaryFile(prefix="zipfile_for_downloading_scenario_results", dir=settings.TMP_DIR)

    zipf = zipfile.ZipFile(temp, mode="w")

    for loc in resultset:
        filename = os.path.basename(loc)
        if filename.endswith(".zip"):
            with files.temporarily_unzipped(loc) as paths:
                for path in paths:
                    zipf.write(path, os.path.basename(path), zipfile.ZIP_DEFLATED)
        else:
            zipf.write(loc, filename, zipfile.ZIP_DEFLATED)

    zipf.close()

    # By wrapping the temp file in a Django FileWrapper, it should be
    # read in in chunks that always fit into memory.
    wrapper = FixedFileWrapper(temp)

    response = HttpResponse(wrapper, content_type="application/zip")
    response["Content-Disposition"] = "attachment; filename=resultaten_scenario_{0}.zip".format(scenario.id)
    # temp.tell() is the current position in the file, which is the
    # same as the length of the file
    response["Content-Length"] = temp.tell()

    # But for serving from this opened file, we have to start at the
    # beginning
    temp.seek(0)

    return response
def generate_for_result(result, results_dir, base_output_dir, tmp_dir,
                        maxwaterdepth_geotransform):
    logger.debug("examining results record: '%s'" % str(result))
    output_dir = os.path.join(base_output_dir, result.resulttype.name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    result_location = os.path.join(results_dir,
                                   result.resultloc.replace('\\', '/'))
    logger.debug("Result location: " + result_location)

    # Check for empty result file
    if not os.path.exists(result_location):
        logger.warning("input file '%s' missing" % result_location)
        return

    if os.stat(result_location)[stat.ST_SIZE] == 0:
        logger.warning("input file '%s' is empty" % result.resultloc)
        return

    result_to_correct_gridta = (result if result.resulttype.name == 'gridta'
                                else None)

    if result_location.endswith('.zip'):
        # Unpack zip file
        with files.temporarily_unzipped(result_location,
                                        rezip=False,
                                        tmp_dir=tmp_dir) as unzipped:
            pyramid_or_animation = compute_pyramids(
                result, unzipped, result_to_correct_gridta, output_dir,
                maxwaterdepth_geotransform)
    else:
        # Just use the file itself
        pyramid_or_animation = compute_pyramids(result, [result_location],
                                                result_to_correct_gridta,
                                                output_dir,
                                                maxwaterdepth_geotransform)

    if hasattr(pyramid_or_animation, 'frames'):
        if result.animation:
            result.animation.delete()  # Delete the old object
            # Doesn't delete the old files, they're already overwritten
        result.animation = pyramid_or_animation
    else:
        if result.raster:
            # Delete the old one (and its pyramid!)
            logger.debug("Deleting old pyramid")
            result.raster.delete()
        result.raster = pyramid_or_animation

    result.save()
Esempio n. 8
0
def generate_for_result(result, results_dir, base_output_dir, tmp_dir,
                        maxwaterdepth_geotransform):
    logger.debug("examining results record: '%s'" % str(result))
    output_dir = os.path.join(base_output_dir, result.resulttype.name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    result_location = os.path.join(
        results_dir, result.resultloc.replace('\\', '/'))
    logger.debug("Result location: " + result_location)

    # Check for empty result file
    if not os.path.exists(result_location):
        logger.warning("input file '%s' missing" % result_location)
        return

    if os.stat(result_location)[stat.ST_SIZE] == 0:
        logger.warning("input file '%s' is empty" % result.resultloc)
        return

    result_to_correct_gridta = (
        result if result.resulttype.name == 'gridta'
        else None)

    if result_location.endswith('.zip'):
        # Unpack zip file
        with files.temporarily_unzipped(
            result_location, rezip=False, tmp_dir=tmp_dir) as unzipped:
            pyramid_or_animation = compute_pyramids(
                result, unzipped, result_to_correct_gridta, output_dir,
                maxwaterdepth_geotransform)
    else:
        # Just use the file itself
        pyramid_or_animation = compute_pyramids(
            result, [result_location], result_to_correct_gridta,
            output_dir, maxwaterdepth_geotransform)

    if hasattr(pyramid_or_animation, 'frames'):
        if result.animation:
            result.animation.delete()  # Delete the old object
            # Doesn't delete the old files, they're already overwritten
        result.animation = pyramid_or_animation
    else:
        if result.raster:
            # Delete the old one (and its pyramid!)
            logger.debug("Deleting old pyramid")
            result.raster.delete()
        result.raster = pyramid_or_animation

    result.save()
Esempio n. 9
0
def inundation_based_on_flsh(scenario):
    for result_type_id in (WATERDEPTH_ANIM_IMPORT_RESULT_ID,
                           WATERDEPTH_ANIM_SOBEK_RESULT_ID):
        if scenario.result_set.filter(resulttype__id=result_type_id).exists():
            use_result_type_id = result_type_id
            break
    else:
        raise AssertionError("Neither resulttype for fls_h.inc found!")

    with files.temporarily_unzipped(result_zip(scenario,
                                               use_result_type_id)) as names:
        for name in names:
            if os.path.basename(name) == 'fls_h.inc':
                j = process_flsh(name)
                save_inundation_json(scenario, j)
                break
def inundation_based_on_flsh(scenario):
    for result_type_id in (
        WATERDEPTH_ANIM_IMPORT_RESULT_ID,
        WATERDEPTH_ANIM_SOBEK_RESULT_ID):
        if scenario.result_set.filter(resulttype__id=result_type_id).exists():
            use_result_type_id = result_type_id
            break
    else:
        raise AssertionError("Neither resulttype for fls_h.inc found!")

    with files.temporarily_unzipped(
        result_zip(scenario, use_result_type_id)) as names:
        for name in names:
            if os.path.basename(name) == 'fls_h.inc':
                j = process_flsh(name)
                save_inundation_json(scenario, j)
                break
Esempio n. 11
0
def calculate_statistics(scenario_id):
    scenario = models.Scenario.objects.get(pk=scenario_id)

    # We need the max water depth .asc file, which is in the results
    # of resulttype 1.
    masked_array = None
    with files.temporarily_unzipped(result_zip(scenario, 1)) as names:
        for name in names:
            if os.path.basename(name) == b'dm1maxd0.asc':
                dataset = gdal.Open(name.encode('utf8'))
                # Read the data into a masked array
                masked_array = get_masked_array(dataset)
                geo_transform = dataset.GetGeoTransform()
                del dataset  # This closes the file, so that the
                             # directory can be deleted in Windows
                break
        else:
            raise AssertionError(
               "Zip file for resulttype 1 ({0}) didn't include a dm1maxd0.asc."
                .format(result_zip(scenario, 1)))

    total_inundation_volume, total_inundated_area = (
        calculate_inundation_and_area(masked_array, geo_transform[1]))

    # Save values in Results for this scenario
    resulttype_inundated_area = models.ResultType.objects.get(
        shortname_dutch="overstroomd gebied")
    resulttype_inundation_volume = models.ResultType.objects.get(
        shortname_dutch="inundatievolume")

    for resulttype, value in (
        (resulttype_inundated_area, total_inundated_area),
        (resulttype_inundation_volume, total_inundation_volume)):
        try:
            result = models.Result.objects.get(
                scenario=scenario, resulttype=resulttype)
        except models.Result.DoesNotExist:
            result = models.Result(
                scenario=scenario, resulttype=resulttype)

        result.value = value
        result.resultloc = 'dm1maxd0.asc'
        result.unit = resulttype.unit
        result.save()
Esempio n. 12
0
def inundation_based_on_max_depth(scenario):
    # We need the max water depth .asc file, which is in the results
    # of resulttype 1.
    masked_array = None
    with files.temporarily_unzipped(
            result_zip(scenario, MAX_WATER_DEPTH_RESULT_ID)) as names:
        for name in names:
            if os.path.basename(name) == b'dm1maxd0.asc':
                dataset = gdal.Open(name.encode('utf8'))
                # Read the data into a masked array
                masked_array = get_masked_array(dataset)
                geo_transform = dataset.GetGeoTransform()
                del dataset  # This closes the file, so that the
                # directory can be deleted in Windows
                break
        else:
            raise AssertionError(
                "Zip file for resulttype 1 ({0}) didn't include a dm1maxd0.asc."
                .format(result_zip(scenario, 1)))

    total_inundation_volume, total_inundated_area = (
        calculate_inundation_and_area(masked_array, geo_transform[1]))

    # Save values in Results for this scenario
    resulttype_inundated_area = models.ResultType.objects.get(
        shortname_dutch="overstroomd gebied")
    resulttype_inundation_volume = models.ResultType.objects.get(
        shortname_dutch="inundatievolume")

    for resulttype, value in ((resulttype_inundated_area,
                               total_inundated_area),
                              (resulttype_inundation_volume,
                               total_inundation_volume)):
        try:
            result = models.Result.objects.get(scenario=scenario,
                                               resulttype=resulttype)
        except models.Result.DoesNotExist:
            result = models.Result(scenario=scenario, resulttype=resulttype)

        result.value = value
        result.resultloc = 'dm1maxd0.asc'
        result.unit = resulttype.unit
        result.save()
def all_files_in(filename):
    """Generator that yields filename if it is not a zip, and all
    files inside otherwise. First linuxifies filename. Yields nothing
    in case filename is not a file, or a bad zip file.

    Unzipped files are cleaned up (and thus unusable) after looping
    through this! Therefore if you only need to read the first file,
    don't use next(), but use a pattern like:

    for f in all_files_in(path):
        process(f)  # Process first file
        break       # Skip rest; files are cleaned up here
    """
    filename = fix_path(filename)
    log.debug(b"all_files_in({f})".format(f=filename))

    if is_valid_zipfile(filename):
        with files.temporarily_unzipped(filename) as files_in_zip:
            for filename_in_zip in files_in_zip:
                yield filename_in_zip
    else:
        if os.path.isfile(filename) and not filename.endswith(".zip"):
            yield filename
def all_files_in(filename):
    """Generator that yields filename if it is not a zip, and all
    files inside otherwise. First linuxifies filename. Yields nothing
    in case filename is not a file, or a bad zip file.

    Unzipped files are cleaned up (and thus unusable) after looping
    through this! Therefore if you only need to read the first file,
    don't use next(), but use a pattern like:

    for f in all_files_in(path):
        process(f)  # Process first file
        break       # Skip rest; files are cleaned up here
    """
    filename = fix_path(filename)
    log.debug(b"all_files_in({f})".format(f=filename))

    if is_valid_zipfile(filename):
        with files.temporarily_unzipped(filename) as files_in_zip:
            for filename_in_zip in files_in_zip:
                yield filename_in_zip
    else:
        if os.path.isfile(filename) and not filename.endswith(".zip"):
            yield filename
def process_scenario(scenario_id):
    """Convert a 3Di scenario that was imported."""

    # The normal (Sobek) task starts Workflow Template 2.
    # This starts
    # 132 -> 134 -> 160 -> 162 -> 180 -> 185
    # and
    # 150 -> 155
    #
    # meaning
    #
    # 132 = compute rise speed
    # 134 = compute mortality grid
    # 160 = simulation
    # 162 = embankment damage
    # 180 = pyramid generation
    # 185 = presentation generation
    # 150 = pyramid generation 150
    # 155 = presentation generation 155
    #
    # pyramid_generation has 'sobek' and 'his_ssm' functions
    # Same for presentation generation
    #
    # For 3Di we need to do
    # - Compute "sobek-equivalent" results
    # - See if the sobek pyramid generation works on it
    # - See if the sobek presentation generation works on it

    scenario = Scenario.objects.get(pk=scenario_id)

    bathymetry = scenario.result_set.get(resulttype__name='bathymetry')
    netcdf = scenario.result_set.get(resulttype__name='results_3di')

    success1, success2 = False, False
    result1, result2 = None, None

    with temporarily_unzipped(bathymetry.absolute_resultloc) as bathpath:
        with temporarily_unzipped(netcdf.absolute_resultloc) as ncdfpath:
            bathymetry_dataset = gdal.Open(bathpath[0])

            with Converter(ncdfpath[0]) as converter:
                workdir = tempfile.mkdtemp()

                try:
                    result1, success1 = compute_waterdepth_animation(
                        scenario,
                        bathymetry_dataset,
                        converter,
                        workdir)

                    result2, success2 = compute_max_waterdepth_tif_result(
                        scenario,
                        bathymetry_dataset,
                        converter,
                        workdir)
                finally:
                    shutil.rmtree(workdir)

    success = all([success1, success2])
    result = result1, result2
    return (success, result, '-')
Esempio n. 16
0
def common_generation(scenario_id, source_programs, tmp_dir):
    """invokes compute_png_files for all grids

    loop on all results computed for the given scenario_id, unpack
    them into a temporary directory, get the corresponding color
    mapping, convert to png, set in the results record the
    resultpngloc field.
    """

    scenario = Scenario.objects.get(pk=scenario_id)
    destination_dir = (
        Setting.objects.get(key='DESTINATION_DIR').value.replace('\\', '/'))
    source_dir = (
        Setting.objects.get(key='SOURCE_DIR').value.replace('\\', '/'))

    logger.debug("select results relative to scenario %s" % scenario_id)
    results = list(scenario.result_set.filter(
            resulttype__program__in=source_programs,
            resulttype__color_mapping_name__isnull=False)
                   .exclude(resulttype__color_mapping_name=""))

    logger.debug("selected results for scenario: %s" % str(results))

    output_dir_name = os.path.join(destination_dir, scenario.get_rel_destdir())

    logger.debug("starting the loop on all previously computed results")

    for result in results:
        logger.debug("examining results record: '%s'" % str(result))

        result_location = os.path.join(
            destination_dir, result.resultloc.replace('\\', '/'))
        logger.debug("Result location: " + result_location)
        result_output_dir = os.path.join(
            output_dir_name, result.resulttype.name)

        # Check for empty result file
        if os.stat(result_location)[stat.ST_SIZE] == 0:
            logger.warning("input file '%s' is empty" % result.resultloc)
            continue

        # Make sure destination directory exists
        if not os.path.isdir(result_output_dir):
            os.makedirs(result_output_dir)

        # Figure out the color mapping name
        if (result.resulttype.id in [0, 1] and
            scenario.main_project.color_mapping_name):
            color_mapping_name = scenario.main_project.color_mapping_name
        else:
            color_mapping_name = result.resulttype.color_mapping_name

        colormapping_abs = os.path.join(
            source_dir, "colormappings", color_mapping_name)
        # Copy color mapping
        shutil.copy(
            colormapping_abs,
            os.path.join(result_output_dir, "colormapping.csv"))

        if result_location.endswith('.zip'):
            # Unpack zip file
            with files.temporarily_unzipped(
                result_location, rezip=False, tmp_dir=tmp_dir) as unzipped:
                infile_asc = compute_png_files(
                    result, result_output_dir,
                    unzipped,
                    colormapping_abs)
        else:
            # Just use the file itself
            infile_asc = compute_png_files(
                result, result_output_dir,
                [result_location],
                colormapping_abs)

        # Set pngloc to result
        result.resultpngloc = os.path.join(
            result_output_dir, infile_asc + ".png")
        result.save()

    return True
Esempio n. 17
0
def common_generation(scenario_id, source_programs, tmp_dir):
    """invokes compute_png_files for all grids

    loop on all results computed for the given scenario_id, unpack
    them into a temporary directory, get the corresponding color
    mapping, convert to png, set in the results record the
    resultpngloc field.
    """

    scenario = Scenario.objects.get(pk=scenario_id)
    destination_dir = settings.EXTERNAL_RESULT_MOUNTED_DIR
    source_dir = settings.EXTERNAL_SOURCE_MOUNTED_DIR

    logger.debug("select results relative to scenario %s" % scenario_id)
    results = list(
        scenario.result_set.filter(
            resulttype__program__in=source_programs,
            resulttype__color_mapping_name__isnull=False).exclude(
                resulttype__color_mapping_name=""))

    logger.debug("selected results for scenario: %s" % str(results))

    output_dir_name = os.path.join(destination_dir, scenario.get_rel_destdir())

    logger.debug("starting the loop on all previously computed results")

    for result in results:
        logger.debug("examining results record: '%s'" % str(result))

        result_location = os.path.join(destination_dir,
                                       result.resultloc.replace('\\', '/'))
        logger.debug("Result location: " + result_location)
        result_output_dir = os.path.join(output_dir_name,
                                         result.resulttype.name)

        # Check for empty result file
        if os.stat(result_location)[stat.ST_SIZE] == 0:
            logger.warning("input file '%s' is empty" % result.resultloc)
            continue

        # Make sure destination directory exists
        if not os.path.isdir(result_output_dir):
            os.makedirs(result_output_dir)

        # Figure out the color mapping name
        if (result.resulttype.id in [0, 1]
                and scenario.main_project.color_mapping_name):
            color_mapping_name = scenario.main_project.color_mapping_name
        else:
            color_mapping_name = result.resulttype.color_mapping_name

        colormapping_abs = os.path.join(source_dir, "colormappings",
                                        color_mapping_name)
        # Copy color mapping
        shutil.copy(colormapping_abs,
                    os.path.join(result_output_dir, "colormapping.csv"))

        if result_location.endswith('.zip'):
            # Unpack zip file
            with files.temporarily_unzipped(result_location,
                                            rezip=False,
                                            tmp_dir=tmp_dir) as unzipped:
                infile_asc = compute_png_files(result, result_output_dir,
                                               unzipped, colormapping_abs)
        else:
            # Just use the file itself
            infile_asc = compute_png_files(result, result_output_dir,
                                           [result_location], colormapping_abs)

        # Set pngloc to result
        result.resultpngloc = os.path.join(result_output_dir,
                                           infile_asc + ".png")
        result.save()

    return True
def calculate_statistics(scenario_id):
    scenario = models.Scenario.objects.get(pk=scenario_id)

    # We need the max water depth .asc file, which is in the results
    # of resulttype 1.
    arr = None
    with files.temporarily_unzipped(result_zip(scenario, 1)) as names:
        for name in names:
            if os.path.basename(name) == 'dm1maxd0.asc':
                dataset = gdal.Open(name)
                # Read the data into a masked array
                arr = dataset.ReadAsArray()
                ndv = dataset.GetRasterBand(1).GetNoDataValue()
                masked_array = ma.array(arr, mask=(arr == ndv))
                geo_transform = dataset.GetGeoTransform()
                del dataset  # This closes the file, so that the
                             # directory can be deleted in Windows
                break

    if arr is None:
        raise AssertionError(
            "Zip file for resulttype 1 didn't include a dm1maxd0.asc.")

    # If we assume that the asc's projection is RD, then the absolute
    # PixelSize gives the width of a pixel in m
    widthx = abs(geo_transform[1])
    widthy = abs(geo_transform[5])
    cellsize_in_m2 = widthx * widthy

    # We assume some things, probably need to check for them, namely
    # that the projection is RD and that pixels are aligned with the
    # projection (geo_transform[2] and [4] are 0)

    # Inundated area is the number of cells that have a max depth > 0,
    # multiplied by each cell's area
    total_inundated_area = (
        numpy.greater(masked_array, 0).sum() * cellsize_in_m2)

    # Total volume is the total depth of all cells multiplied by the area
    # of one cell
    total_inundation_volume = (
        masked_array.sum() * cellsize_in_m2)

    # Save values in Results for this scenario
    resulttype_inundated_area = models.ResultType.objects.get(
        shortname_dutch="overstroomd gebied")
    resulttype_inundation_volume = models.ResultType.objects.get(
        shortname_dutch="inundatievolume")

    for resulttype, value in (
        (resulttype_inundated_area, total_inundated_area),
        (resulttype_inundation_volume, total_inundation_volume)):
        try:
            result = models.Result.objects.get(
                scenario=scenario, resulttype=resulttype)
        except models.Result.DoesNotExist:
            result = models.Result(
                scenario=scenario, resulttype=resulttype)

        result.value = value
        result.resultloc = 'dm1maxd0.asc'
        result.unit = resulttype.unit
        result.save()