示例#1
0
    def dump_project_py():
        """Debug routine - dump project attributes to the log."""

        # list all project.* attributes
        for key in dir(project):
            if not key.startswith('__'):
                try:
                    log.info('project.%s=%s' % (key, eval('project.%s' % key)))
                except AttributeError:
                    pass
示例#2
0
def build_elevation():
    """Create combined elevation data.

    Combine all raw elevation data and clip to bounding polygon.
    """

    # if no elevation to combine, we *must* have a combined elevation file
    if not project.point_filenames:    
        if not project.combined_elevation_file:
            abort('No raw elevation data and no combined elevation data!?')
        return

    # user wants us to create combined elevation, make output filename
    project.combined_elevation_file = os.path.join(
                                              project.topographies_folder,
                                              'combined_elevation.pts')

    # Create Geospatial data from TXT files
    geospatial_data = {}
    for filename in project.point_filenames:
        log.info('Reading elevation file %s' % filename)
        absolute_filename = os.path.join(project.raw_elevation_folder, filename)
        G_points = anuga.geospatial_data.\
                       Geospatial_data(file_name=absolute_filename,
                                       verbose=False)

        geospatial_data[filename] = G_points.clip(project.bounding_polygon)

    #####
    # Combine, clip and export dataset
    #####

    G = None
    for key in geospatial_data:
        G += geospatial_data[key]

    G.export_points_file(project.combined_elevation_file)

    # Use for comparision in ARC
    # DO WE NEED THIS?
    try:
        (stem, _) = project.combined_elevation_file.rsplit('.', 1)
    except ValueError:
        stem = project.combined_elevation_file
    G.export_points_file(stem + '.txt')
def run_simulation(vtype, sim_obj):
    '''Run a simulation.

    Returns True if all went well, else False.
    '''
    
    # untar the object
    tar_path = os.path.join(Local_Data_Directory, sim_obj)
    log.info('Untarring %s in directory %s ...'
             % (tar_path, Local_Data_Directory))
    untar_file(tar_path, target_dir=Local_Data_Directory)

    # modify project.py template
    log.debug("Creating '%s' version of project.py" % vtype)
    fd = open('project_template.py', 'r')
    project = fd.readlines()
    fd.close()

    new_project = []
    for line in project:
        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
            
    fd = open('project.py', 'w')
    fd.write(''.join(new_project))
    fd.close()
    
    # import new project.py
    import project

    # run the simulation, produce SWW file
    log.info('Running the simulation ...')
    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
    log.debug("run_simulation: doing '%s'" % cmd)
    res = os.system(cmd)
    log.debug("run_simulation: res=%d" % res)

    # 'unimport' project.py
    del project

    # check result
    if res != 0:
        log.critical('Simulation failed, check log')

    return res == 0
示例#4
0
def build_elevation():
    """Create combined elevation data.

    Combine all raw elevation data and clip to bounding polygon.
    """

    # Create Geospatial data from ASCII files
    geospatial_data = {}
    for filename in project.ascii_grid_filenames:
        log.info('Reading elevation file %s' % filename)
        absolute_filename = os.path.join(project.raw_elevation_folder, filename)
        anuga.asc2dem(absolute_filename+'.asc',
                      use_cache=False, verbose=False)
        anuga.dem2pts(absolute_filename+'.dem', use_cache=False, verbose=False)

        G_grid = anuga.geospatial_data.\
                     Geospatial_data(file_name=absolute_filename+'.pts',
                                     verbose=False)

        geospatial_data[filename] = G_grid.clip(project.bounding_polygon)

    # Create Geospatial data from TXT files
    for filename in project.point_filenames:
        log.info('Reading elevation file %s' % filename)
        absolute_filename = os.path.join(project.raw_elevation_folder, filename)
        G_points = anuga.geospatial_data.\
                       Geospatial_data(file_name=absolute_filename,
                                       verbose=False)

        geospatial_data[filename] = G_points.clip(project.bounding_polygon)

    #####
    # Combine, clip and export dataset
    #####

    G = None
    for key in geospatial_data:
        G += geospatial_data[key]

    G.export_points_file(project.combined_elevation_filestem + '.pts')

    # Use for comparision in ARC
    # DO WE NEED THIS?
    G.export_points_file(project.combined_elevation_filestem + '.txt')
def can_we_run():
    '''Decide if we can run with the files we have.
    
    Return True if we *can* run, else False.

    Tell user what is happening first, then untar files.
    '''

    log.critical('Checking if you have the required files to run:')

    # get max width of object name string
    max_width = 0
    for obj in Mandatory_Data_Objects:
        max_width = max(len(obj), max_width)
    for obj in Optional_Data_Objects:
        max_width = max(len(obj), max_width)

    # if we don't have *all* mandatory object, can't run
    have_mandatory_files = True
    for obj in Mandatory_Data_Objects:
        obj_path = os.path.join(Local_Data_Directory, obj)
        if os.path.exists(obj_path):
            log.info('\t%s  found' % obj.ljust(max_width))
        else:
            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
            have_mandatory_files = False

    # at least *one* of these must exist
    have_optional_files = False
    for obj in Optional_Data_Objects:
        obj_path = os.path.join(Local_Data_Directory, obj)
        if os.path.exists(obj_path):
            have_optional_files = True
            log.info('\t%s  found' % obj.ljust(max_width))
        else:
            log.info('\t%s  MISSING!' % obj.ljust(max_width))

    if not have_mandatory_files or not have_optional_files:
        log.critical('You must obtain the missing files before you can run '
                     'this validation.')
        return False

    log.critical('You have enough required files to run.')
    log.critical()

    return True
示例#6
0
def setup_model():
    """Perform sanity checks.

    The checks here can be simpler than for full-blown ANUGA as the directory
    structure is automatically generated.
    """

    # flag - we check many things and then don't proceed if anything wrong
    sanity_error = False               # checked at bottom of this file

    #####
    # check directory Structure
    #####

    if not os.path.exists(project.home):
        log.error("Sorry, data directory '%s' doesn't exist" % project.home)
        sanity_error = True

    if not os.path.exists(project.muxhome):
        log.error("Sorry, MUX directory '%s' doesn't exist" % project.muxhome)
        sanity_error = True

    if not os.path.exists(project.anuga_folder):
        log.error("Sorry, ANUGA directory '%s' doesn't exist"
                  % project.anuga_folder)
        sanity_error = True

    if not os.path.exists(project.topographies_folder):
        log.error("Sorry, topo directory '%s' doesn't exist"
                  % project.topographies_folder)
        sanity_error = True

    if not os.path.exists(project.polygons_folder):
        log.error("Sorry, polygon directory '%s' doesn't exist"
                  % project.polygons_folder)
        sanity_error = True

    if not os.path.exists(project.boundaries_folder):
        log.error("Sorry, boundaries directory '%s' doesn't exist"
                  % project.boundaries_folder)
        sanity_error = True

    if not os.path.exists(project.output_folder):
        log.error("Sorry, outputs directory '%s' doesn't exist"
                  % project.output_folder)
        sanity_error = True

    if not os.path.exists(project.gauges_folder):
        log.error("Sorry, gauges directory '%s' doesn't exist"
                  % project.gauges_folder)
        sanity_error = True

    if not os.path.exists(project.meshes_folder):
        log.error("Sorry, meshes directory '%s' doesn't exist"
                  % project.meshes_folder)
        sanity_error = True

    if not os.path.exists(project.mux_data_folder):
        log.error("Sorry, mux data directory '%s' doesn't exist"
                  % project.mux_data_folder)
        sanity_error = True

    # generate the event.lst file for the event
    get_multimux(project.event, project.multimux_folder, project.mux_input)

    # if multi_mux is True, check if multi-mux file exists
    if project.multi_mux:
        if not os.path.exists(project.mux_input):
            log.error("Sorry, MUX input file '%s' doesn't exist"
                      % project.mux_input)
            sanity_error = True

    if not os.path.exists(project.event_folder):
        log.error("Sorry, you must generate event %s with EventSelection."
                  % project.event)
        sanity_error = True

    #####
    # determine type of run, set some parameters depending on type
    #####

    if project.setup == 'trial':
        project.scale_factor = 100
        project.time_thinning = 96
        project.yieldstep = 240
    elif project.setup == 'basic':
        project.scale_factor = 4
        project.time_thinning = 12
        project.yieldstep = 120
    elif project.setup == 'final':
        project.scale_factor = 1
        project.time_thinning = 4
        project.yieldstep = 60
    else:
        log.error("Sorry, you must set the 'setup' variable to one of:"
                  '   trial - coarsest mesh, fast\n'
                  '   basic - coarse mesh\n'
                  '   final - fine mesh, slowest\n'
                  '\n'
                  "'setup' was set to '%s'" % project.setup)
        sanity_error = True

    #####
    # check for errors detected above.
    #####

    if sanity_error:
        msg = 'You must fix the above errors before continuing.'
        raise Exception(msg)

    #####
    # Reading polygons and creating interior regions
    #####

#    # Create list of land polygons with initial conditions
#    project.land_initial_conditions = []
#    for (filename, MSL) in project.land_initial_conditions_filename:
#        polygon = anuga.read_polygon(os.path.join(project.polygons_folder,
#                                                  filename))
#        project.land_initial_conditions.append([polygon, MSL])

    # Create list of interior polygons with scaling factor
    project.interior_regions = []
    for (filename, maxarea) in project.interior_regions_data:
        polygon = anuga.read_polygon(os.path.join(project.polygons_folder,
                                                  filename))
        project.interior_regions.append([polygon,
                                         maxarea*project.scale_factor])

    # Initial bounding polygon for data clipping
    project.bounding_polygon = anuga.read_polygon(os.path.join(
                                                      project.polygons_folder,
                                                      project.bounding_polygon))
    project.bounding_maxarea = project.bounding_polygon_maxarea \
                               * project.scale_factor

    # Estimate the number of triangles
    log.debug('number_mesh_triangles(%s, %s, %s)'
              % (str(project.interior_regions),
                 str(project.bounding_polygon),
                 str(project.bounding_maxarea)))
    triangle_min = number_mesh_triangles(project.interior_regions,
                                         project.bounding_polygon,
                                         project.bounding_maxarea)

    log.info('minimum estimated number of triangles=%d' % triangle_min)
示例#7
0
        mux_filenames = []
        for line in mux_data:
            muxname = line.strip().split()[0]
            split_index = muxname.index('.grd')
            muxname = muxname[:split_index+len('.grd')]
            muxname = os.path.join(project.mux_data_folder, muxname)
            mux_filenames.append(muxname)

        mux_weights = [float(line.strip().split()[1]) for line in mux_data]

        # Call legacy function to create STS file.
        anuga.urs2sts(mux_filenames, basename_out=output_dir,
                      ordering_filename=project.urs_order,
                      weights=mux_weights, verbose=False)
    else:                           # a single mux stem file, assume 1.0 weight
        log.info('using single-mux file %s' % mux_file)

        mux_file = os.path.join(project.event_folder, event_file)
        mux_filenames = [mux_file]

        weight_factor = 1.0
        mux_weights = weight_factor*num.ones(len(mux_filenames), num.Float)

        order_filename = project.urs_order

        # Create ordered sts file
        anuga.urs2sts(mux_filenames, basename_out=output_dir,
                      ordering_filename=order_filename,
                      weights=mux_weights, verbose=False)

    # report on progress so far
示例#8
0
def run_tsudat(json_data, logger=None):
    """Run ANUGA using data from a JSON data file.

    json_data  the path to the JSON data file

    Returns a dictionary of {'<type of file>': <list of files>, ...}.
    The dictionary keys and values addd here are:
        'log':              list of a single path to the log file
        'results_max':      list of ASC files containing maximum values
        'sww':              list of all SWW files produced
        'timeseries':       list of all gauge files produced
        'timeseries_plot':  list of all plots for gauge files produced

    For example:
    {'log': ['/tmp/tsudat/user/project/VictorHarbour/trial/outputs/tsudat.log'],
     'results_max': ['/tmp/tsudat/user/project/VictorHarbour/trial/outputs/VictorHarbour_all_stage.asc',
                     '/tmp/tsudat/user/project/VictorHarbour/trial/outputs/VictorHarbour_all_depth.asc'],
     'sww': ['/tmp/tsudat/user/project/VictorHarbour/trial/outputs/VictorHarbour.sww'],
     'timeseries': ['/tmp/tsudat/user/project/VictorHarbour/trial/outputs/gauge_inner4.csv',
                    '/tmp/tsudat/user/project/VictorHarbour/trial/outputs/gauge_inner1.csv',
                    '/tmp/tsudat/user/project/VictorHarbour/trial/outputs/gauge_inner3.csv']}
    """


    def dump_project_py():
        """Debug routine - dump project attributes to the log."""

        # list all project.* attributes
        for key in dir(project):
            if not key.startswith('__'):
                try:
                    log.info('project.%s=%s' % (key, eval('project.%s' % key)))
                except AttributeError:
                    pass

    # set global logger
    global Logger
    Logger = logger

    # get JSON data and adorn project object with its data
    adorn_project(json_data)

    if project.debug:
        dump_project_py()

    # run the tsudat simulation
    youngest_input = get_youngest_input()
    sww_file = os.path.join(project.output_folder, project.scenario+'.sww')
    try:
        sww_ctime = os.path.getctime(sww_file)
    except OSError:
        sww_ctime = 0.0		# SWW file not there

    if project.force_run or youngest_input > sww_ctime:
        log.info('#'*90)
        log.info('# Running simulation')
        log.info('#'*90)
        if Logger:
            Logger('Running simulation')
        run_model()
        log.info('End of simulation')
        if Logger:
            Logger('End of simulation')
    else:
        log.info('#'*90)
        log.info('# Not running simulation')
        log.info('# If you want to force a simulation run, select FORCE RUN')
        log.info('#'*90)
        if Logger:
            Logger('Not running simulation\n'
                   'If you want to force a simulation run, select FORCE RUN')


    # add *all* SWW files in the output directory to result dictionary
    # (whether we ran a simulation or not)
    glob_mask = os.path.join(project.output_folder, '*.sww')
    project.payload['sww'] = glob.glob(glob_mask)

    # now do optional post-run extractions
    if project.get_results_max:
        log.info('~'*90)
        log.info('~ Running export_results_max()')
        log.info('~'*90)
        file_list = export_results_max()
        if Logger:
            Logger('Running export_results_max()')
        project.payload['results_max'] = file_list  # add files to output dict
        log.info('export_results_max() has finished')
        if Logger:
            Logger('export_results_max() has finished')
    else:
        log.info('~'*90)
        log.info('~ Not running export_results_max() - not requested')
        log.info('~'*90)
        if Logger:
            Logger('Not running export_results_max() - not requested')

    if project.get_timeseries:
        log.info('~'*90)
        log.info('~ Running get_timeseries()')
        log.info('~'*90)
        if Logger:
            Logger('Running get_timeseries()')
        file_list = get_timeseries()
        project.payload['timeseries'] = file_list  # add files to output dict
        # generate plot files
        plot_list = []
        for filename in file_list:
            plot_file = make_stage_plot(filename)
            plot_list.append(plot_file)
        project.payload['timeseries_plot'] = plot_list  # add files to output dict

        log.info('get_timeseries() has finished')
        if Logger:
            Logger('get_timeseries() has finished')
    else:
        log.info('~'*90)
        log.info('~ Not running get_timeseries() - not requested')
        log.info('~'*90)
        if Logger:
            Logger('Not running get_timeseries() - not requested')

    return project.payload
示例#9
0
def number_mesh_triangles(interior_regions, bounding_poly, remainder_res):
    """Calculate the approximate number of triangles inside the
    bounding polygon and the other interior regions

    Polygon areas are converted to square Kms

    FIXME: Add tests for this function
    """

    # TO DO check if any of the regions fall inside one another

    log.info('-' * 80)
    log.info('Polygon  Max triangle area (m^2)  Total area (km^2)  '
             'Estimated #triangles')
    log.info('-' * 80)
        
    no_triangles = 0.0
    area = polygon_area(bounding_poly)

    for poly, resolution in interior_regions:
        this_area = polygon_area(poly)
        this_triangles = this_area/resolution
        no_triangles += this_triangles
        area -= this_area

        log.info('Interior %s%s%d'
                 % (('%.0f' % resolution).ljust(25),
                    ('%.2f' % (this_area/1000000)).ljust(19), 
                    this_triangles))

    bound_triangles = area/remainder_res
    no_triangles += bound_triangles

    log.info('Bounding %s%s%d'
             % (('%.0f' % remainder_res).ljust(25),
                ('%.2f' % (area/1000000)).ljust(19),
                bound_triangles))

    total_number_of_triangles = no_triangles/0.7

    log.info('Estimated total number of triangles: %d'
                 % total_number_of_triangles)
    log.info('Note: This is generally about 20% less than the final amount')

    return int(total_number_of_triangles)
示例#10
0
def export_results_max():
    """Export maximum resuts."""

    ######
    # Define allowed variable names and associated equations to generate values.
    ######
    # Note that mannings n (friction value) is taken as 0.01, as in the model
    # run density of water is 1000
    var_equations = {'stage': enm.export_newstage_max,
                     'oldstage': 'stage',
                     'fred': 'fred',
                     'momentum': '(xmomentum**2 + ymomentum**2)**0.5',
                     'olddepth': 'stage-elevation',
                     'depth': edm.export_depthonland_max,
                     'speed': '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6)',
                     'energy': '(((xmomentum/(stage-elevation+1.e-6))**2'
                               '  + (ymomentum/(stage-elevation+1.e-6))**2)'
                               '*0.5*1000*(stage-elevation+1.e-6))+(9.81*stage*1000)',
                     'bed_shear_stress': ('(((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(xmomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2'
                                          '+ ((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(ymomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2)**0.5'),
                     'elevation': 'elevation'}

    ######
    # Start script, running through variables, area, sww file
    ######

    for which_var in project.layers_list:
        log.info("Exporting value: %s" % which_var)

        if which_var not in var_equations:
            log.critical('Unrecognized variable name: %s' % which_var)
            break

        for which_area in project.area:
            log.info("Using area: %s" % which_area)

            if which_area == 'All':
                easting_min = None
                easting_max = None
                northing_min = None
                northing_max = None
            else:
                try:
                    easting_min = eval('project.xmin%s' % which_area)
                    easting_max = eval('project.xmax%s' % which_area)
                    northing_min = eval('project.ymin%s' % which_area)
                    northing_max = eval('project.ymax%s' % which_area)
                except AttributeError:
                    log.critical('Unrecognized area name: %s' % which_area)
                    break

            name = os.path.join(project.output_folder, project.scenario_name)

            outname = name + '_' + which_area + '_' + which_var
            quantityname = var_equations[which_var]

            log.info('Generating output file: %s' % (outname+'.asc'))

            # assume 'quantityname' is a string, handle in the old way,
            #  else call the handler function (same params as anuga.sww2dem)
            if isinstance(quantityname, basestring):
                export_func = anuga.sww2dem
            elif callable(quantityname):
                export_func = quantityname

            export_func(name+'.sww', outname+'.asc', quantity=quantityname,
                        reduction=max, cellsize=project.cell_size,
                        easting_min=easting_min, easting_max=easting_max,
                        northing_min=northing_min, northing_max=northing_max,
                        verbose=False)
示例#11
0
        muxname = line.strip().split()[0]
        split_index = muxname.index('.grd')
        muxname = muxname[:split_index+len('.grd')]
        muxname = os.path.join(project.mux_data_folder, muxname)
        mux_filenames.append(muxname)

    mux_weights = [float(line.strip().split()[1]) for line in mux_data]

    # Call legacy function to create STS file.
    anuga.urs2sts(mux_filenames, basename_out=output_dir,
                  ordering_filename=project.urs_order_file,
                  weights=mux_weights, verbose=False)

    # report on progress so far
    sts_file = os.path.join(project.event_folder, project.sts_filestem)
    log.info('STS filestem=%s' % sts_file)

    (quantities, elevation,
     time, gen_files) = get_sts_gauge_data(sts_file, verbose=False)
    log.debug('%d %d' % (len(elevation), len(quantities['stage'][0,:])))

    return gen_files

def define_default(name, default):
    """Check if a project attribute is defined, default it if not.

    name   name of attribute to check (string)
    default  default value if attribute isn't defined
    """

    try:
示例#12
0
def run_chennai(sim_id):
    project_root = os.path.abspath(os.path.dirname(__file__))
    if not os.path.exists(project_root):
        os.makedirs(project_root)
    print "project_root = " + project_root

    inputs_dir = '%s/inputs/' % project_root
    if not os.path.exists(inputs_dir):
        os.makedirs(inputs_dir)
    print "inputs_dir = " + inputs_dir

    working_dir = '%s/working/%s/' % (project_root, sim_id)
    if not os.path.exists(working_dir):
        os.makedirs(working_dir)
    print "working_dir = " + working_dir

    outputs_dir = '%s/outputs/%s' % (project_root, sim_id)
    if not os.path.exists(outputs_dir):
        os.makedirs(outputs_dir)
    print "outputs_dir = " + outputs_dir

    # get data
    print "downloading data..."
    urllib.urlretrieve(
        'http://chennaifloodmanagement.org/uploaded/layers/utm44_1arc_v3.tif',
        inputs_dir + 'utm44_1arc_v3.tif'
    )

    print os.listdir(inputs_dir)

    # configure logging TODO: get this working!
    log_location = project_root + '/' + sim_id + '.log'
    open(log_location, 'a').close()
    log.console_logging_level = log.INFO
    log.log_logging_level = log.DEBUG
    log.log_filename = log_location
    print "# log.log_filename is: " + log.log_filename
    print "# log_location is: " + log_location
    log.debug('A message at DEBUG level')
    log.info('Another message, INFO level')

    print "# starting"
    bounding_polygon_01 = [
        [303382.14647903712, 1488780.8996663219],
        [351451.89152459265, 1499834.3704521982],
        [378957.03975921532, 1493150.8764886451],
        [422656.80798244767, 1504204.3472745214],
        [433196.16384805075, 1471300.9923770288],
        [421885.63560203766, 1413463.0638462803],
        [408261.59021479468, 1372590.9276845511],
        [371245.31595511554, 1427344.16669366],
        [316492.0769460068, 1417833.0406686035],
        [303382.14647903712, 1488780.8996663219]
    ]
    boundary_tags_01 = {
        'inland': [0, 1, 2, 6, 7, 8],
        'ocean': [3, 4, 5]
    }
    print "# Create domain:"
    print "# mesh_filename = " + working_dir + 'mesh_01.msh'
    domain = anuga.create_domain_from_regions(bounding_polygon=bounding_polygon_01,
                                              boundary_tags=boundary_tags_01,
                                              mesh_filename=working_dir + 'mesh_01.msh',
                                              maximum_triangle_area=100000,
                                              verbose=True)
    domain.set_name(sim_id)
    domain.set_datadir(outputs_dir)
    poly_fun_pairs = [
            [
                'Extent',
                inputs_dir + 'utm44_1arc_v3.tif'
            ]
    ]
    print "# create topography_function"
    print "input raster = " + inputs_dir + 'utm44_1arc_v3.tif'
    topography_function = qs.composite_quantity_setting_function(
        poly_fun_pairs,
        domain,
        nan_treatment='exception',
    )
    print topography_function
    print "# set_quantity elevation"
    domain.set_quantity('elevation', topography_function)  # Use function for elevation
    domain.set_quantity('friction', 0.03)  # Constant friction
    domain.set_quantity('stage', 1)  # Constant initial stage

    print "# all quantities set"

    print "# Setup boundary conditions"
    Br = anuga.Reflective_boundary(domain)  # Solid reflective wall
    Bt = anuga.Transmissive_boundary(domain)  # Continue all values on boundary
    Bd = anuga.Dirichlet_boundary([-20, 0., 0.])  # Constant boundary values
    Bi = anuga.Dirichlet_boundary([10.0, 0, 0])  # Inflow
    Bw = anuga.Time_boundary(
        domain=domain,  # Time dependent boundary
        function=lambda t: [(10 * sin(t * 2 * pi) - 0.3) * exp(-t), 0.0, 0.0]
    )

    print "# Associate boundary tags with boundary objects"
    domain.set_boundary({'inland': Br, 'ocean': Bd})
    print domain.get_boundary_tags()

    catchmentrainfall = Rainfall(
        domain=domain,
        rate=0.2
    )
    # # Note need path to File in String.
    # # Else assumed in same directory
    domain.forcing_terms.append(catchmentrainfall)

    print "# Evolve system through time"
    counter_timestep = 0
    for t in domain.evolve(yieldstep=300, finaltime=6000):
        counter_timestep += 1
        print counter_timestep
        print domain.timestepping_statistics()

    asc_out_momentum = outputs_dir + '/' + sim_id + '_momentum.asc'
    asc_out_depth = outputs_dir + '/' + sim_id + '_depth.asc'

    anuga.sww2dem(outputs_dir + '/' + sim_id + '.sww',
                  asc_out_momentum,
                  quantity='momentum',
                  number_of_decimal_places=3,
                  cellsize=30,
                  reduction=max,
                  verbose=True)
    anuga.sww2dem(outputs_dir + '/' + sim_id + '.sww',
                  asc_out_depth,
                  quantity='depth',
                  number_of_decimal_places=3,
                  cellsize=30,
                  reduction=max,
                  verbose=True)

    outputs =[asc_out_depth, asc_out_momentum]

    for output in outputs:
        print "# Convert ASCII grid to GeoTiff so geonode can import it"
        src_ds = gdal.Open(output)
        dst_filename = (output[:-3] + 'tif')

        print "# Create gtif instance"
        driver = gdal.GetDriverByName("GTiff")

        print "# Output to geotiff"
        dst_ds = driver.CreateCopy(dst_filename, src_ds, 0)

        print "# Properly close the datasets to flush the disk"
        dst_filename = None
        src_ds = None

    print "Done. Nice work."
示例#13
0
def run_tsudat(json_data):
    """Run ANUGA on the Amazon EC2.

    json_data  the path to the JSON data file

    Returns the boto instance object for the running image.
    """

    # plug our exception handler into the python system
    sys.excepthook = excepthook

    # get JSON data and adorn project object with its data
    adorn_project(json_data)

    # default certain values if not supplied in JSON data
    default_project_values()

    # set logfile to be in run output folder
    if project.debug:
        log.log_logging_level = log.DEBUG
    log.log_filename = os.path.join(project.output_folder, 'ui.log')
    if project.debug:
        dump_project_py()

    # do all required data generation before EC2 run
    log.info('#'*90)
    log.info('# Preparing simulation')
    log.info('#'*90)

    log.info('Calling: setup_model()')
    setup_model()

    log.info('Calling: build_elevation()')
    build_elevation()

    log.info('Calling: build_urs_boundary()')
    # create .payload dictionary, 'hpgauges' files are copied up to EC2
    # and then returned in the resultant ZIP S3 file
    project.payload = {}
    gauges = build_urs_boundary(project.mux_input_filename, project.event_sts)
    project.payload['hpgauges'] = gauges

    log.info('Calling: get_minmaxAOI()')
    get_minmaxAOI()

    # copy all required python modules to scripts directory
    ec2_name = os.path.join(ScriptsDir, Ec2RunTsuDATOnEC2)
    log.debug("Copying EC2 run file '%s' to scripts directory '%s'."
              % (Ec2RunTsuDAT, ec2_name))
    shutil.copy(Ec2RunTsuDAT, ec2_name)

    for extra in RequiredFiles:
        log.info('Copying %s to S3 scripts directory' % extra)
        shutil.copy(extra, ScriptsDir)

    # dump the current 'projects' object back into JSON, put in 'scripts'
    json_file = os.path.join(ScriptsDir, JsonDataFilename)
    log.info('Dumping JSON to file %s' % json_file)
    dump_json_to_file(project, json_file)
    dump_project_py()

    # bundle up the working directory, put it into S3
    zipname = ('%s-%s-%s-%s.zip'
               % (project.user, project.project,
                  project.scenario, project.setup))
    zip_tmp_dir = tempfile.mkdtemp(prefix='tsudat2_zip_')
    zippath = os.path.join(zip_tmp_dir, zipname)
    log.info('Making zip %s from %s' % (zippath, project.working_directory))
    make_dir_zip(project.working_directory, zippath)
    os.system('ls -l %s' % zip_tmp_dir)

    s3_name = os.path.join(project.InputS3DataDir, zipname)
    try:
        s3 = s3_connect()
        bucket = s3.create_bucket(project.S3Bucket)
        key = bucket.new_key(s3_name)
        log.info('Creating S3 file: %s/%s' % (project.S3Bucket, s3_name))
        key.set_contents_from_filename(zippath)
        log.info('Done!')
        key.set_acl('public-read')
    except boto.exception.S3ResponseError, e:
        log.critical('S3 error: %s' % str(e))
        print('S3 error: %s' % str(e))
        sys.exit(10)
示例#14
0
        mux_filenames = []
        for line in mux_data:
            muxname = line.strip().split()[0]
            split_index = muxname.index('.grd')
            muxname = muxname[:split_index+len('.grd')]
            muxname = os.path.join(project.mux_data_folder, muxname)
            mux_filenames.append(muxname)

        mux_weights = [float(line.strip().split()[1]) for line in mux_data]

        # Call legacy function to create STS file.
        anuga.urs2sts(mux_filenames, basename_out=output_dir,
                      ordering_filename=project.urs_order_file,
                      weights=mux_weights, verbose=False)
    else:                           # a single mux stem file, assume 1.0 weight
        log.info('using single-mux file %s' % mux_file)

        mux_file = os.path.join(project.event_folder, event_file)
        mux_filenames = [mux_file]

        weight_factor = 1.0
        mux_weights = weight_factor*num.ones(len(mux_filenames), num.Float)

        order_filename = project.urs_order_file

        # Create ordered sts file
        anuga.urs2sts(mux_filenames, basename_out=output_dir,
                      ordering_filename=order_filename,
                      weights=mux_weights, verbose=False)

    # report on progress so far
def refresh_local_data(data_objects, target_dir, mirrors):
    '''Update local data objects from the server.

    data_objects:   list of files to refresh
    target_dir:     directory in which to put files
    mirrors:        list of mirror sites to use
    
    Each file has an associated *.digest file used to decide
    if the local file needs refreshing.
    
    Return True if all went well, else False.
    '''

    # decision function to decide if a file contains HTML
    def is_html(filename):
        '''Decide if given file contains HTML.'''
        
        fd = open(filename)
        data = fd.read(1024)
        fd.close()

        if 'DOCTYPE' in data:
            return True
        
        return False

    
    # local function to get remote file from one of mirrors
    def get_remote_from_mirrors(remote, local, auth, mirrors):
        '''Get 'remote' from one of 'mirrors', put in 'local'.'''

        # Get a unique date+time string to defeat caching.  The idea is to add
        # this to the end of any URL so proxy sees a different request.
        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')

        # try each mirror when getting file
        for mirror in mirrors:
            log.debug('Fetching remote file %s from mirror %s'
                      % (remote, mirror))

            remote_url = mirror + remote + cache_defeat
            (result, auth) = get_web_file(remote_url, local, auth=auth)
            if result and is_html(local)==False:
                log.debug('Success fetching file %s' % remote)
                return (True, auth)
            log.debug('Failure fetching from %s' % mirror)
            auth = None

        log.debug('Failure fetching file %s' % remote)
        return (False, auth)            
                

    # local function to compare contents of two files
    def files_same(file_a, file_b):
        '''Compare two files to see if contents are the same.'''
        
        fd = open(file_a, 'r')
        data_a = fd.read()
        fd.close()

        fd = open(file_b, 'r')
        data_b = fd.read()
        fd.close()

        return data_a == data_b

        
    # local function to update one data object
    def refresh_object(obj, auth, mirrors):
        '''Update object 'obj' using authentication tuple 'auth'.
        
        Return (True, <updated_auth>) if all went well,
        else (False, <updated_auth>).
        '''

        # create local and remote file paths.
        obj_digest = obj + '.digest'
        
        remote_file = os.path.join(Remote_Data_Directory, obj)
        remote_digest = remote_file + '.digest'
        
        local_file = os.path.join(Local_Data_Directory, obj)
        local_digest = local_file + '.digest'
        
        # see if missing either digest or object .tgz
        if not os.path.exists(local_digest) or not os.path.exists(local_file):
            # no digest or no object, download both digest and object
            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
            if res:
                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
        else:
            # download object digest to remote data directory
            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
            if res:
                if not files_same(local_digest, remote_digest):
                    # digests differ, refresh object
                    shutil.move(remote_digest, local_digest)
                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)

        return (res, auth)

    # create local data directory if required
    log.debug('Creating local directory: %s' % Local_Data_Directory)
    if not os.path.exists(Local_Data_Directory):
        os.mkdir(Local_Data_Directory)

    # clean out remote data copy directory
    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
    os.mkdir(Remote_Data_Directory)

    # success, refresh local files
    auth = None
    result = True
    for data_object in data_objects:
        log.info("Refreshing file '%s'" % data_object)
        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
                  % (data_object, str(auth)))
        (res, auth) = refresh_object(data_object, auth, mirrors)
        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
                  % (str(res), str(auth)))
        if res == False:
            log.info('Refresh of file %s failed.' % data_object)
            result = False
            # don't use possibly bad 'auth' again,
            # some proxies lock out on repeated failures.
            auth = None

    if result:
        log.critical('Local data has been refreshed.')
    else:
        log.critical('Local data has been refreshed, with one or more errors.')
    log.critical()
    return result
示例#16
0
def run_model():
    """Run a tsunami simulation for a scenario."""

    log.info('@'*90)
    log.info('@ Running simulation')
    log.info('@'*90)

    # Read in boundary from ordered sts file
    event_sts = anuga.create_sts_boundary(project.event_sts)

    # Reading the landward defined points, this incorporates the original
    # clipping polygon minus the 100m contour
    landward_boundary = anuga.read_polygon(project.landward_boundary)

    # Combine sts polyline with landward points
    bounding_polygon_sts = event_sts + landward_boundary

    # Number of boundary segments
    num_ocean_segments = len(event_sts) - 1
    # Number of landward_boundary points
    num_land_points = anuga.file_length(project.landward_boundary)

    # Boundary tags refer to project.landward_boundary
    # 4 points equals 5 segments start at N
    boundary_tags={'back': range(num_ocean_segments+1,
                                 num_ocean_segments+num_land_points),
                   'side': [num_ocean_segments,
                            num_ocean_segments+num_land_points],
                   'ocean': range(num_ocean_segments)}

    # Build mesh and domain
    log.debug('bounding_polygon_sts=%s' % str(bounding_polygon_sts))
    log.debug('boundary_tags=%s' % str(boundary_tags))
    log.debug('project.bounding_maxarea=%s' % str(project.bounding_maxarea))
    log.debug('project.interior_regions=%s' % str(project.interior_regions))
    log.debug('project.meshes=%s' % str(project.meshes))

    domain = anuga.create_domain_from_regions(bounding_polygon_sts,
                                boundary_tags=boundary_tags,
                                maximum_triangle_area=project.bounding_maxarea,
                                interior_regions=project.interior_regions,
                                mesh_filename=project.meshes,
                                use_cache=False,
                                verbose=False)

    domain.geo_reference.zone = project.zone
    log.info('\n%s' % domain.statistics())

    domain.set_name(project.scenario_name)
    domain.set_datadir(project.output_folder)
    domain.set_minimum_storable_height(0.01)  # Don't store depth less than 1cm

    # Set the initial stage in the offcoast region only
    if project.land_initial_conditions:
        IC = anuga.Polygon_function(project.land_initial_conditions,
                                    default=project.tide,
                                    geo_reference=domain.geo_reference)
    else:
        IC = project.tide

    domain.set_quantity('stage', IC, use_cache=True, verbose=False)
    domain.set_quantity('friction', project.friction)
    domain.set_quantity('elevation',
                        filename=project.combined_elevation_filestem+'.pts',
                        use_cache=True, verbose=False, alpha=project.alpha)

    # Setup boundary conditions
    log.debug('Set boundary - available tags: %s' % domain.get_boundary_tags())

    Br = anuga.Reflective_boundary(domain)
    Bt = anuga.Transmissive_stage_zero_momentum_boundary(domain)
    Bd = anuga.Dirichlet_boundary([project.tide, 0, 0])
    Bf = anuga.Field_boundary(project.event_sts+'.sts',
                        domain, mean_stage=project.tide, time_thinning=1,
                        default_boundary=anuga.Dirichlet_boundary([0, 0, 0]),
                        boundary_polygon=bounding_polygon_sts,
                        use_cache=True, verbose=False)

    domain.set_boundary({'back': Br,
                         'side': Bt,
                         'ocean': Bf})

    # Evolve system through time
    t0 = time.time()
    for t in domain.evolve(yieldstep=project.yieldstep,
                           finaltime=project.finaltime,
                           skip_initial_step=False):
        log.info('\n%s' % domain.timestepping_statistics())
        log.info('\n%s' % domain.boundary_statistics(tags='ocean'))

    log.info('Simulation took %.2f seconds' % (time.time()-t0))
示例#17
0
def run_tsudat(json_data):
    """Run ANUGA on the Amazon EC2.

    json_data  the path to the JSON data file
    """

    # plug our exception handler into the python system
    sys.excepthook = excepthook

    # get JSON data and adorn project object with its data
    adorn_project(json_data)

    # default certain values if not supplied in JSON data
    default_project_values()

    # set logfile to be in run output folder
    if project.debug:
        log.log_logging_level = log.DEBUG
    log.log_filename = os.path.join(project.output_folder, 'ui.log')
    if project.debug:
        dump_project_py()
        
    # do all required data generation before local run
    log.info('#'*80)
    log.info('# Preparing simulation')
    log.info('#'*80)

    log.info('Calling: setup_model()')
    setup_model()

    log.info('Calling: build_elevation()')
    build_elevation()

    log.info('Calling: build_urs_boundary()')
    # create .payload dictionary, 'hpgauges' files are copied up to EC2
    # and then returned in the resultant ZIP S3 file
    project.payload = {}
    gauges = build_urs_boundary(project.mux_input_filename, project.event_sts)
    project.payload['hpgauges'] = gauges

    # determine limits of AOI
    log.info('Calling: get_minmaxAOI()')
    get_minmaxAOI()

    # actually run the simulation
    youngest_input = get_youngest_input()
    sww_file = os.path.join(project.output_folder, project.scenario+'.sww')
    try:
        sww_ctime = os.path.getctime(sww_file)
    except OSError:
        sww_ctime = 0.0         # SWW file not there

    if project.force_run or youngest_input > sww_ctime:
        log.info('#'*80)
        log.info('# Running simulation')
        log.info('#'*80)
        run_model()
        log.info('End of simulation')
    else:
        log.info('#'*80)
        log.info('# Not running simulation')
        log.debug('# SWW file %s is younger than input data' % sww_file)
        log.info('# If you want to force a simulation run, select FORCE RUN')
        log.info('#'*80)

    log.info('#'*80)
    log.info('# Simulation finished')
    log.info('#'*80)

    # now do optional post-run extractions
    if project.get_results_max:
        log.info('~'*80)
        log.info('~ Running export_results_max()')
        log.info('~'*80)
        file_list = export_results_max()
        project.payload['results_max'] = file_list  # add files to output dict
        log.info('export_results_max() has finished')
    else:
        log.info('~'*80)
        log.info('~ Not running export_results_max() - not requested')
        log.info('~'*80)

    if project.get_timeseries:
        log.info('~'*80)
        log.info('~ Running get_timeseries()')
        log.info('~'*80)
        file_list = get_timeseries()
        project.payload['timeseries'] = file_list  # add files to output dict
        # generate plot files
        plot_list = []
        for filename in file_list:
            plot_file = make_stage_plot(filename)
            plot_list.append(plot_file)
        project.payload['timeseries_plot'] = plot_list  # add files to output dict

        log.info('get_timeseries() has finished')
    else:
        log.info('~'*80)
        log.info('~ Not running get_timeseries() - not requested')
        log.info('~'*80)

    # clean up the local filesystem
    dir_path = os.path.join(project.working_directory, project.user)
    log.debug('Deleting work directory: %s' % dir_path)
示例#18
0
def run_tsudat(json_data):
    """"Run ANUGA using data from a json data file."""

    print('Batemans Bay run_tsudat.py')

    def dump_project_py():
        """Debug routine - dump project attributes to the log."""

        # list all project.* attributes
        for key in dir(project):
            if not key.startswith('__'):
                try:
                    log.info('project.%s=%s' % (key, eval('project.%s' % key)))
                except AttributeError:
                    pass

    # plug our exception handler into the python system
    sys.excepthook = excepthook

    # get json data and adorn project object with it's data
    adorn_project(json_data)

    # set logfile to be in run output folder
    if project.debug:
        log.log_logging_level = log.DEBUG
    log.log_filename = os.path.join(project.output_folder, 'tsudat.log')

    # run the tsudat simulation
    if project.debug:
        dump_project_py()

    youngest_input = get_youngest_input()
    sww_file = os.path.join(project.output_folder, project.scenario_name+'.sww')
    try:
        sww_ctime = os.path.getctime(sww_file)
    except OSError:
        sww_ctime = 0.0		# SWW file not there

    if project.force_run or youngest_input > sww_ctime:
        log.info('#'*90)
        log.info('# Running simulation')
        log.info('#'*90)
        setup_model()
#        build_elevation()
        build_urs_boundary(project.mux_input_filename, project.event_sts)
        run_model()
        log.info('End of simulation')
    else:
        log.info('#'*90)
        log.info('# Not running simulation')
        log.debug('# SWW file %s is younger than input data' % sww_file)
        log.info('# If you want to force a simulation run, select FORCE RUN')
        log.info('#'*90)

    # now do optional post-run extractions
    if project.get_results_max:
       log.info('~'*90)
       log.info('~ Running export_results_max()')
       log.info('~'*90)
       export_results_max()
       log.info('export_results_max() has finished')
    else:
       log.info('~'*90)
       log.info('~ Not running export_results_max() - not requested')
       log.info('~'*90)

    if project.get_timeseries:
       log.info('~'*90)
       log.info('~ Running get_timeseries()')
       log.info('~'*90)
       get_timeseries()
       log.info('get_timeseries() has finished')
    else:
       log.info('~'*90)
       log.info('~ Not running get_timeseries() - not requested')
       log.info('~'*90)

    log.info('#'*90)
    log.info('# Simulation finished')
    log.info('#'*90)
示例#19
0
def number_mesh_triangles(interior_regions, bounding_poly, remainder_res):
    """Calculate the approximate number of triangles inside the
    bounding polygon and the other interior regions

    Polygon areas are converted to square Kms

    FIXME: Add tests for this function
    """

    # TO DO check if any of the regions fall inside one another

    log.info('-' * 80)
    log.info('Polygon  Max triangle area (m^2)  Total area (km^2)  '
             'Estimated #triangles')
    log.info('-' * 80)

    no_triangles = 0.0
    area = polygon_area(bounding_poly)

    for poly, resolution in interior_regions:
        this_area = polygon_area(poly)
        this_triangles = old_div(this_area, resolution)
        no_triangles += this_triangles
        area -= this_area

        log.info('Interior %s%s%d' %
                 (('%.0f' % resolution).ljust(25),
                  ('%.2f' %
                   (old_div(this_area, 1000000))).ljust(19), this_triangles))

    bound_triangles = old_div(area, remainder_res)
    no_triangles += bound_triangles

    log.info('Bounding %s%s%d' %
             (('%.0f' % remainder_res).ljust(25),
              ('%.2f' % (old_div(area, 1000000))).ljust(19), bound_triangles))

    total_number_of_triangles = no_triangles / 0.7

    log.info('Estimated total number of triangles: %d' %
             total_number_of_triangles)
    log.info('Note: This is generally about 20% less than the final amount')

    return int(total_number_of_triangles)
示例#20
0
def _conjugate_gradient_preconditioned(A, b, x0, M, 
                        imax=10000, tol=1.0e-8, atol=1.0e-10, iprint=None, Type='None'):
    """
   Try to solve linear equation Ax = b using
   preconditioned conjugate gradient method

   Input
   A: matrix or function which applies a matrix, assumed symmetric
      A can be either dense or sparse or a function
      (__mul__ just needs to be defined)
   b: right hand side
   x0: inital guess (default the 0 vector)
   imax: max number of iterations
   tol: tolerance used for residual

   Output
   x: approximate solution
   """

    # Padarn note: This is temporary while the Jacboi preconditioner is the only
    # one avaliable.
    D=[]
    if not Type=='Jacobi':
        log.warning('Only the Jacobi Preconditioner is impletment cg_solve python')
        msg = 'Only the Jacobi Preconditioner is impletment in cg_solve python'
        raise PreconditionerError, msg
    else:
        D=Sparse(A.M, A.M)
        for i in range(A.M):
            D[i,i]=1/M[i]
        D=Sparse_CSR(D)

    stats = Stats()

    b  = num.array(b, dtype=num.float)
    if len(b.shape) != 1:
        raise VectorShapeError, 'input vector should consist of only one column'

    if x0 is None:
        x0 = num.zeros(b.shape, dtype=num.float)
    else:
        x0 = num.array(x0, dtype=num.float)

    stats.x0 = num.linalg.norm(x0)

    if iprint is None or iprint == 0:
        iprint = imax

    dx = 0.0
    
    i = 1
    x = x0
    r = b - A * x
    z = D * r
    d = r
    rTr = num.dot(r, z)
    rTr0 = rTr

    stats.rTr0 = rTr0
    
    #FIXME Let the iterations stop if starting with a small residual
    while (i < imax and rTr > tol ** 2 * rTr0 and rTr > atol ** 2):
        q = A * d
        alpha = rTr / num.dot(d, q)
        xold = x
        x = x + alpha * d

        dx = num.linalg.norm(x-xold)
        
        #if dx < atol :
        #    break

        # Padarn Note 26/11/12: This modification to the algorithm seems
        # unnecessary, but also seem to have been implemented incorrectly -
        # it was set to perform the more expensive r = b - A * x routine in
        # 49/50 iterations. Suggest this being either removed completely or
        # changed to 'if i%50==0' (or equvialent).  
        #if i % 50:
        if False:
            r = b - A * x
        else:
            r = r - alpha * q
        rTrOld = rTr
        z = D * r
        rTr = num.dot(r, z)
        bt = rTr / rTrOld

        d = z + bt * d
        i = i + 1
        if i % iprint == 0:
            log.info('i = %g rTr = %15.8e dx = %15.8e' % (i, rTr, dx))

        if i == imax:
            log.warning('max number of iterations attained')
            msg = 'Conjugate gradient solver did not converge: rTr==%20.15e' % rTr
            raise ConvergenceError, msg

    stats.x = num.linalg.norm(x)
    stats.iter = i
    stats.rTr = rTr
    stats.dx = dx

    return x, stats
    # compare SWW files here and there
    new_output_sww = os.path.join(output_directory, expected_sww)
    #cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
    cmd = 'python compare_model_timeseries.py %s %s %e > compare_model_timeseries.stdout' %\
          (local_sww, new_output_sww, epsilon)
    print '-------------------------------------'
    print cmd
    print '-------------------------------------'    
    
    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
    res = os.system(cmd)
    log.debug("check_that_output_is_as_expected: res=%d" % res)
    log.critical()
    print 'Result', res
    if res == 0:
        log.info('Simulation results are as expected.')
    else:
        log.critical('Simulation results are NOT as expected.')
        fd = open('compare_model_timeseries.stdout', 'r')
        cmp_error = fd.readlines()
        fd.close()
        log.critical(''.join(cmp_error))


def teardown():
    '''Clean up after validation run.'''

    log.debug('teardown: called')
    
    # remove remote directory and stdout capture file
    #shutil.rmtree(Remote_Data_Directory, ignore_errors=True)