Example #1
0
def main(options, args):
    #define console log handler
    console = logging.StreamHandler()
    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    console.setLevel(options.console_level)

    #add handler to root logger
    logging.getLogger('').addHandler(console)

    #set loglevel for this module
    log.setLevel(options.loglevel)

    if not os.path.exists(options.csvpath):
        log.error('The folder does not exists.')
        sys.exit(1)

    log.info('Processing list of .csv files')
    for csvfile in glob.glob(os.path.join(options.csvpath, '*.csv')):
        if os.path.splitext(csvfile)[1] != '.csv':
            log.error('No .csv file provided.')
            sys.exit(1)
        log.info('Reading rows and creating Result objects')
        file = open(csvfile, "rb")
        reader = csv.reader(file)

        # Find exportid; count backwards to avoid underscores in foldernames
        exportid = csvfile.split('_')[-2]
        log.debug('exportid = ' + str(exportid))
        try:
            e_run = ExportRun.objects.get(pk=exportid)
        except:
            log.error('No Export Run object found with id: ' + str(exportid))
            sys.exit(1)

        # Skip header
        reader.next()

        for row in reader:
            log.debug(row)

            #Check if object already exists
            existing_objects = Result.objects.filter(area=row[0],
                                                     name=row[1],
                                                     file_location=row[2],
                                                     export_run=e_run)
            log.debug('existing_result_objects =' + str(existing_objects))
            if not existing_objects:
                r = Result(area=row[0],
                           name=row[1],
                           file_location=row[2],
                           export_run=e_run)
                r.save()
        file.close()
        e_run.state = ExportRun.EXPORT_STATE_DONE
        e_run.run_date = datetime.datetime.fromtimestamp(
            os.path.getctime(csvfile))
        e_run.save()
        os.remove(csvfile)
def main(options, args):
    #define console log handler
    console = logging.StreamHandler()
    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    console.setLevel(options.console_level)

    #add handler to root logger
    logging.getLogger('').addHandler(console)

    #set loglevel for this module
    log.setLevel(options.loglevel)

    if not os.path.exists(options.csvpath):
        log.error('The folder does not exists.')
        sys.exit(1)

    log.info('Processing list of .csv files')
    for csvfile in glob.glob(os.path.join(options.csvpath, '*.csv')):
        if os.path.splitext(csvfile)[1] != '.csv':
            log.error('No .csv file provided.')
            sys.exit(1)
        log.info('Reading rows and creating Result objects')
        file = open(csvfile, "rb")
        reader = csv.reader(file)

        # Find exportid; count backwards to avoid underscores in foldernames
        exportid = csvfile.split('_')[-2]
        log.debug('exportid = ' + str(exportid))
        try:
            e_run = ExportRun.objects.get(pk=exportid)
        except:
            log.error('No Export Run object found with id: ' + str(exportid))
            sys.exit(1)

        # Skip header
        reader.next()

        for row in reader:
            log.debug(row)

            #Check if object already exists
            existing_objects = Result.objects.filter(
                area=row[0], name=row[1],
                file_location=row[2], export_run=e_run)
            log.debug('existing_result_objects =' + str(existing_objects))
            if not existing_objects:
                r = Result(area=row[0],
                           name=row[1],
                           file_location=row[2],
                           export_run=e_run)
                r.save()
        file.close()
        e_run.state = ExportRun.EXPORT_STATE_DONE
        e_run.run_date = datetime.datetime.fromtimestamp(
            os.path.getctime(csvfile))
        e_run.save()
        os.remove(csvfile)
def calculate_export_maps(exportrun_id):
    """
    Execute an ExportRun that creates different maps.

    The function makes temp result files in the temp directory. Output
    is a zip file (in Settings -> EXPORT_FOLDER) containing these
    files and a Result object associated to it. Old Result objects are
    deleted (and associated files are not deleted).

    Note: only the basename is saved. You have to calculate the full
    path yourself by prepending the name with Setting.EXPORT_FOLDER.
    """
    export_run = ExportRun.objects.get(id=exportrun_id)
    export_folder = Setting.objects.get(key='EXPORT_FOLDER').value
    result_files = []

    tmp_zip_filename = mktemp()

    if export_run.export_max_waterdepth or export_run.export_possibly_flooded:
        print 'export_max_waterdepth'
        gridtype = 'gridmaxwaterdepth'
        # Calc max waterdepth
        export_result_type = ResultType.objects.get(name=gridtype).id
        # Find out input files for this type
        input_files = export_run.input_files(export_result_type)
        max_waterdepths = dijkring_arrays_to_zip(input_files, tmp_zip_filename, gridtype)

    if export_run.export_max_flowvelocity:
        # Calc max flow velocity
        print 'export_max_flowvelocity'
        gridtype = 'gridmaxflowvelocity'
        # Calc max waterdepth
        export_result_type = ResultType.objects.get(name=gridtype).id
        # Find out input files for this type
        input_files = export_run.input_files(export_result_type)
        max_flowvelocities = dijkring_arrays_to_zip(input_files, tmp_zip_filename, gridtype)

    if export_run.export_possibly_flooded:
        # Calculate the possible flooded area
        print 'export_possibly_flooded'
        #possibly_flooded = {}
        for dijkringnr, array in max_waterdepths.items():
            flooded_array = np.ma.where(array[0] < 0.02, 0, 1)
            #possibly_flooded[dijkringnr] = flooded_array
            ascii_filename = mktemp()
            arc_name = 'possibly_flooded_%d.asc' % (dijkringnr)
            geo_transform = array[1]
            write_masked_array_as_ascii(ascii_filename, flooded_array, geo_transform)
            add_to_zip(
                tmp_zip_filename,
                [{'filename': ascii_filename, 'arcname': arc_name, 'delete_after': True}])

    # Move file to destination.
    print tmp_zip_filename

    dst_basename = 'export_run_%d.zip' % export_run.id
    dst_filename = os.path.join(export_folder, dst_basename)

    print 'Moving file from %s to %s...' % (tmp_zip_filename, dst_filename)
    shutil.move(tmp_zip_filename, dst_filename)

    result_count = Result.objects.filter(export_run=export_run).count()
    if result_count > 0:
        print 'Warning: deleting old Result objects for this export run...'
        Result.objects.filter(export_run=export_run).delete()

    print 'Making Result object with link to file...'
    result = Result(
        name=export_run.name,
        file_basename=dst_basename,
        area=Result.RESULT_AREA_DIKED_AREA, export_run=export_run)
    result.save()

    print 'Updating state of export_run...'
    export_run.state = ExportRun.EXPORT_STATE_DONE
    export_run.save()

    print 'Finished.'