Example #1
0
def export_newstage_max(name_in, name_out,
                        quantity=None,	# NOT USED
                        reduction=None,	# always 'max'
                        cellsize=DEFAULT_CELLSIZE,
                        number_of_decimal_places=DEFAULT_DECIMAL_PLACES,
                        NODATA_value=DEFAULT_NODATA,
                        easting_min=None,
                        easting_max=None,
                        northing_min=None,
                        northing_max=None,
                        verbose=False,	# NOT USED, but passed through
                        origin=None,
                        datum=DEFAULT_DATUM,
                        block_size=DEFAULT_BLOCK_SIZE):
    """Read SWW file and extract the maximum depth values, but only for land.

    name_in                   input filename (must be SWW)
    name_out                  output filename (.asc or .ers)
    quantity                  NOT USED
    reduction                 always 'max'
    cellsize
    number_of_decimal_places  number of decimal places for values
    NODATA_value              the value to use if NODATA
    easting_min
    easting_max
    northing_min
    northing_max
    verbose                   NOT USED, but passed through
    origin
    datum
    block_size                number of slices along non-time axis to process

    Also write accompanying file with same basename_in but extension .prj used
    to fix the UTM zone, datum, false northings and eastings.  The prj format
    is assumed to be:
        Projection    UTM
        Zone          56
        Datum         WGS84
        Zunits        NO
        Units         METERS
        Spheroid      WGS84
        Xshift        0.0000000000
        Yshift        10000000.0000000000
        Parameters
    """

    log = logger.Log()

    (basename_in, in_ext) = os.path.splitext(name_in)
    (basename_out, out_ext) = os.path.splitext(name_out)
    out_ext = out_ext.lower()

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext not in ['.asc', '.ers']:
        raise IOError('Format for %s must be either asc or ers.' % name_out)

    false_easting = 500000
    false_northing = 10000000

    assert(isinstance(block_size, (int, long, float)))

    log.debug('Reading from %s' % name_in)
    log.debug('Output directory is %s' % name_out)

    # open SWW file
    fid = NetCDFFile(name_in)

    # get extent and reference
    x = fid.variables['x'][:]
    y = fid.variables['y'][:]
    volumes = fid.variables['volumes'][:]
    times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # get geo_reference since SWW files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference() # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
Example #2
0
def sww2array(
        name_in,
        quantity=None,  # defaults to elevation
        reduction=None,
        cellsize=10,
        number_of_decimal_places=None,
        NODATA_value=-9999.0,
        easting_min=None,
        easting_max=None,
        northing_min=None,
        northing_max=None,
        verbose=False,
        origin=None,
        datum='WGS84',
        block_size=None):
    """Read SWW file and convert to a numpy array (can be stored to a png file later)


    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2array will output the quantity at that time-step.
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum


    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    if quantity_formula.has_key(quantity):
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert (isinstance(block_size, (int, long, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)

    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'], num.float)
    y = num.array(fid.variables['y'], num.float)
    volumes = num.array(fid.variables['volumes'], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
Example #3
0
def get_maximum_inundation_data(filename,
                                polygon=None,
                                time_interval=None,
                                use_centroid_values=True,
                                return_time=False,
                                verbose=False):
    """Compute maximum run up height from sww file.

    filename             path to SWW file to read
    polygon              if specified resrict to points inside this polygon
                         assumed absolute coordinates and in same zone as
                         domain
    time_interval        if specified resrict to within the period specified
    use_centroid_values 
    verbose              True if this function is to be verbose

    Returns (maximal_runup, maximal_runup_location).

    Usage:
    runup, location = get_maximum_inundation_data(filename,
                                                  polygon=None,
                                                  time_interval=None,
                                                  verbose=False)

    Algorithm is as in get_maximum_inundation_elevation from
    shallow_water_domain except that this function works with the SWW file and
    computes the maximal runup height over multiple timesteps.

    If no inundation is found within polygon and time_interval the return value
    is None signifying "No Runup" or "Everything is dry".
    """

    # We are using nodal values here as that is what is stored in sww files.

    # Water depth below which it is considered to be 0 in the model
    # FIXME (Ole): Allow this to be specified as a keyword argument as well

    from anuga.geometry.polygon import inside_polygon
    from anuga.config import minimum_allowed_height
    from anuga.file.netcdf import NetCDFFile

    # Just find max inundation over one file
    dir, base = os.path.split(filename)
    #iterate_over = get_all_swwfiles(dir, base)
    iterate_over = [filename[:-4]]
    if verbose:
        print iterate_over

    # Read sww file
    if verbose: log.critical('Reading from %s' % filename)
    # FIXME: Use general swwstats (when done)

    maximal_runup = None
    maximal_runup_location = None
    maximal_time = None

    for _, swwfile in enumerate(iterate_over):
        # Read sww file
        filename = os.path.join(dir, swwfile + '.sww')

        if verbose: log.critical('Reading from %s' % filename)
        # FIXME: Use general swwstats (when done)

        fid = NetCDFFile(filename)

        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()

        # Get extent
        volumes = fid.variables['volumes'][:]
        x = fid.variables['x'][:] + xllcorner
        y = fid.variables['y'][:] + yllcorner

        # Get the relevant quantities (Convert from single precison)
        try:
            elevation = num.array(fid.variables['elevation_c'][:], num.float)
            stage = num.array(fid.variables['stage_c'][:], num.float)
            found_c_values = True
        except:
            elevation = num.array(fid.variables['elevation'][:], num.float)
            stage = num.array(fid.variables['stage'][:], num.float)
            found_c_values = False

        if verbose:
            print 'found c values ', found_c_values
            print 'stage.shape ', stage.shape
            print 'elevation.shape ', elevation.shape

        # Here's where one could convert nodal information to centroid
        # information but is probably something we need to write in C.
        # Here's a Python thought which is NOT finished!!!
        if use_centroid_values is True:
            vols0 = volumes[:, 0]
            vols1 = volumes[:, 1]
            vols2 = volumes[:, 2]
            # Then use these to compute centroid location
            x = (x[vols0] + x[vols1] + x[vols2]) / 3.0
            y = (y[vols0] + y[vols1] + y[vols2]) / 3.0

            if found_c_values:
                # don't have to do anything as found in sww file
                pass
            else:
                elevation = (elevation[vols0] + elevation[vols1] +
                             elevation[vols2]) / 3.0
                stage = (stage[:, vols0] + stage[:, vols1] +
                         stage[:, vols2]) / 3.0

        # Spatial restriction
        if polygon is not None:
            msg = 'polygon must be a sequence of points.'
            assert len(polygon[0]) == 2, msg
            # FIXME (Ole): Make a generic polygon input check in polygon.py
            # and call it here
            points = num.ascontiguousarray(
                num.concatenate((x[:, num.newaxis], y[:, num.newaxis]),
                                axis=1))
            point_indices = inside_polygon(points, polygon)

            # Restrict quantities to polygon
            elevation = num.take(elevation, point_indices, axis=0)
            stage = num.take(stage, point_indices, axis=1)

            # Get info for location of maximal runup
            points_in_polygon = num.take(points, point_indices, axis=0)
            x = points_in_polygon[:, 0]
            y = points_in_polygon[:, 1]
        else:
            # Take all points
            point_indices = num.arange(len(x))

        # Temporal restriction
        time = fid.variables['time'][:]
        if verbose:
            print time
        all_timeindices = num.arange(len(time))

        if time_interval is not None:
            msg = 'time_interval must be a sequence of length 2.'
            assert len(time_interval) == 2, msg
            msg = 'time_interval %s must not be decreasing.' % time_interval
            assert time_interval[1] >= time_interval[0], msg
            msg = 'Specified time interval [%.8f:%.8f] ' % tuple(time_interval)
            msg += 'must does not match model time interval: [%.8f, %.8f]\n' \
                   % (time[0], time[-1])
            if time_interval[1] < time[0]:
                fid.close()
                raise ValueError(msg)
            if time_interval[0] > time[-1]:
                fid.close()
                raise ValueError(msg)

            # Take time indices corresponding to interval (& is bitwise AND)
            timesteps = num.compress((time_interval[0] <= time) \
                                     & (time <= time_interval[1]),
                                     all_timeindices)

            msg = 'time_interval %s did not include any model timesteps.' \
                  % time_interval
            assert not num.alltrue(timesteps == 0), msg
        else:
            # Take them all
            timesteps = all_timeindices

        #print timesteps

        fid.close()

        # Compute maximal runup for each timestep
        #maximal_runup = None
        #maximal_runup_location = None
        #maximal_runups = [None]
        #maximal_runup_locations = [None]

        for i in timesteps:
            ## if use_centroid_values is True:
            ##     stage_i  = stage[i,:]
            ## else:
            ##     stage_i = stage[i,:]

            stage_i = stage[i, :]
            depth = stage_i - elevation

            if verbose:
                print '++++++++'
            # Get wet nodes i.e. nodes with depth>0 within given region
            # and timesteps
            wet_nodes = num.where(depth > 0.0)[0]

            if verbose:
                print stage_i.shape
                print num.max(stage_i)
                #print max(wet_elevation)

            if num.alltrue(wet_nodes == 0):
                runup = None
            else:
                # Find maximum elevation among wet nodes
                wet_elevation = num.take(elevation, wet_nodes, axis=0)

                if verbose:
                    pass
                    #print wet_elevation

                runup_index = num.argmax(wet_elevation)
                runup = max(wet_elevation)
                if verbose:
                    print 'max(wet_elevation) ', max(wet_elevation)
                assert wet_elevation[runup_index] == runup  # Must be True

            if runup > maximal_runup:
                maximal_runup = runup  # works even if maximal_runup is None
                maximal_time = time[i]

                # Record location
                wet_x = num.take(x, wet_nodes, axis=0)
                wet_y = num.take(y, wet_nodes, axis=0)
                maximal_runup_location =    [wet_x[runup_index], \
                                            wet_y[runup_index]]
            if verbose:
                print i, runup

    if return_time:
        return maximal_runup, maximal_runup_location, maximal_time
    else:
        return maximal_runup, maximal_runup_location
class General_mesh:
    """Collection of 2D triangular elements

    A triangular element is defined in terms of three vertex ids,
    ordered counter clock-wise, each corresponding to a given node
    which is represented as a coordinate set (x,y).
    Vertices from different triangles can point to the same node.
    The nodes are implemented as an Nx2 numeric array containing the
    x and y coordinates.


    To instantiate:
       Mesh(nodes, triangles)

    where

      nodes is either a list of 2-tuples or an Nx2 numeric array of
      floats representing all x, y coordinates in the mesh.

      triangles is either a list of 3-tuples or an Mx3 numeric array of
      integers representing indices of all vertices in the mesh.
      Each vertex is identified by its index i in [0, N-1].


    Example:

        a = [0.0, 0.0]
        b = [0.0, 2.0]
        c = [2.0, 0.0]
        e = [2.0, 2.0]

        nodes = [a, b, c, e]
        triangles = [ [1,0,2], [1,2,3] ]   # bac, bce

        # Create mesh with two triangles: bac and bce
        mesh = Mesh(nodes, triangles)



    Other:

      In addition mesh computes an Mx6 array called vertex_coordinates.
      This structure is derived from coordinates and contains for each
      triangle the three x,y coordinates at the vertices.

      See neighbourmesh.py for a specialisation of the general mesh class
      which includes information about neighbours and the mesh boundary.

      The mesh object is purely geometrical and contains no information
      about quantities defined on the mesh.

    """

    # FIXME: It would be a good idea to use geospatial data as an alternative
    #        input
    def __init__(self,
                 nodes,
                 triangles,
                 geo_reference=None,
                 use_inscribed_circle=False,
                 verbose=False):
        """Build triangular 2d mesh from nodes and triangle information

        Input:

          nodes: x,y coordinates represented as a sequence of 2-tuples or
                 a Nx2 numeric array of floats.

          triangles: sequence of 3-tuples or Mx3 numeric array of
                     non-negative integers representing indices into
                     the nodes array.

          georeference (optional): If specified coordinates are
          assumed to be relative to this origin.


        """

        self.verbose = verbose

        if verbose: log.critical('General_mesh: Building basic mesh structure')

        self.use_inscribed_circle = use_inscribed_circle

        self.triangles = num.array(triangles, num.int)

        if verbose:
            log.timingInfo("numTriangles, " + str(self.triangles.shape[0]))

        self.nodes = num.array(nodes, num.float)

        # Register number of elements and nodes
        self.number_of_triangles = N = int(self.triangles.shape[0])
        self.number_of_nodes = self.nodes.shape[0]

        # FIXME: this stores a geo_reference, but when coords are returned
        # This geo_ref is not taken into account!
        if geo_reference is None:
            self.geo_reference = Geo_reference()  # Use defaults
        else:
            self.geo_reference = geo_reference

        # Input checks
        msg = (
            'Triangles must an Mx3 numeric array or a sequence of 3-tuples. '
            'The supplied array has the shape: %s' % str(self.triangles.shape))
        assert len(self.triangles.shape) == 2, msg

        msg = ('Nodes must an Nx2 numeric array or a sequence of 2-tuples'
               'The supplied array has the shape: %s' % str(self.nodes.shape))
        assert len(self.nodes.shape) == 2, msg

        msg = 'Vertex indices reference non-existing coordinate sets'
        assert num.max(self.triangles) < self.nodes.shape[0], msg

        # FIXME: Maybe move to statistics?
        # Or use with get_extent
        xy_extent = [
            min(self.nodes[:, 0]),
            min(self.nodes[:, 1]),
            max(self.nodes[:, 0]),
            max(self.nodes[:, 1])
        ]

        self.xy_extent = num.array(xy_extent, num.float)

        # Allocate space for geometric quantities
        self.normals = num.zeros((N, 6), num.float)
        self.areas = num.zeros(N, num.float)
        self.edgelengths = num.zeros((N, 3), num.float)

        # Get x,y coordinates for all triangle vertices and store
        self.centroid_coordinates = num.zeros((N, 2), num.float)

        #Allocate space for geometric quantities
        self.radii = num.zeros(N, num.float)

        # Get x,y coordinates for all triangle vertices and store
        self.vertex_coordinates = V = self.compute_vertex_coordinates()

        # Get x,y coordinates for all triangle edge midpoints and store
        self.edge_midpoint_coordinates = self.compute_edge_midpoint_coordinates(
        )

        # Initialise each triangle
        if verbose:
            log.critical('General_mesh: Computing areas, normals, '
                         'edgelengths, centroids and radii')

        # Calculate Areas
        V0 = V[0:3 * N:3, :]
        V1 = V[1:3 * N:3, :]
        V2 = V[2:3 * N:3, :]

        # Area
        x0 = V0[:, 0]
        y0 = V0[:, 1]
        x1 = V1[:, 0]
        y1 = V1[:, 1]
        x2 = V2[:, 0]
        y2 = V2[:, 1]

        self.areas[:] = -((x1 * y0 - x0 * y1) + (x2 * y1 - x1 * y2) +
                          (x0 * y2 - x2 * y0)) / 2.0

        #areas = -((x0-x1)*(y2-y1) - (y0-y1)*(x2-x1))/2.0

        #assert num.allclose(self.areas, areas)

        ind = num.where(self.areas <= 0.0)
        msg = 'Degenerate Triangle(s) ' + str(ind[0])
        assert num.all(self.areas > 0.0), msg

        #print V.shape, V0.shape, V1.shape, V2.shape

        #        #print E.shape, E[0:3*M:3, :].shape, E[1:3*M:3, :].shape, E[2:3*M:3, :].shape
        #        E[0:3*M:3, :] = 0.5*(V1+V2)
        #        E[1:3*M:3, :] = 0.5*(V2+V0)
        #        E[2:3*M:3, :] = 0.5*(V0+V1)

        i0 = self.triangles[:, 0]
        i1 = self.triangles[:, 1]
        i2 = self.triangles[:, 2]

        assert num.allclose(x0, self.nodes[i0, 0])
        assert num.allclose(y0, self.nodes[i0, 1])

        assert num.allclose(x1, self.nodes[i1, 0])
        assert num.allclose(y1, self.nodes[i1, 1])

        assert num.allclose(x2, self.nodes[i2, 0])
        assert num.allclose(y2, self.nodes[i2, 1])

        xn0 = x2 - x1
        yn0 = y2 - y1
        l0 = num.sqrt(xn0**2 + yn0**2)

        xn0 /= l0
        yn0 /= l0

        xn1 = x0 - x2
        yn1 = y0 - y2
        l1 = num.sqrt(xn1**2 + yn1**2)

        xn1 /= l1
        yn1 /= l1

        xn2 = x1 - x0
        yn2 = y1 - y0
        l2 = num.sqrt(xn2**2 + yn2**2)

        xn2 /= l2
        yn2 /= l2

        # Compute and store

        self.normals[:, 0] = yn0
        self.normals[:, 1] = -xn0

        self.normals[:, 2] = yn1
        self.normals[:, 3] = -xn1

        self.normals[:, 4] = yn2
        self.normals[:, 5] = -xn2

        self.edgelengths[:, 0] = l0
        self.edgelengths[:, 1] = l1
        self.edgelengths[:, 2] = l2

        self.centroid_coordinates[:, 0] = old_div((x0 + x1 + x2), 3)
        self.centroid_coordinates[:, 1] = old_div((y0 + y1 + y2), 3)

        if self.use_inscribed_circle == False:
            #OLD code. Computed radii may exceed that of an
            #inscribed circle

            #Midpoints
            xm0 = old_div((x1 + x2), 2)
            ym0 = old_div((y1 + y2), 2)

            xm1 = old_div((x2 + x0), 2)
            ym1 = old_div((y2 + y0), 2)

            xm2 = old_div((x0 + x1), 2)
            ym2 = old_div((y0 + y1), 2)

            #The radius is the distance from the centroid of
            #a triangle to the midpoint of the side of the triangle
            #closest to the centroid

            d0 = num.sqrt((self.centroid_coordinates[:, 0] - xm0)**2 +
                          (self.centroid_coordinates[:, 1] - ym0)**2)
            d1 = num.sqrt((self.centroid_coordinates[:, 0] - xm1)**2 +
                          (self.centroid_coordinates[:, 1] - ym1)**2)
            d2 = num.sqrt((self.centroid_coordinates[:, 0] - xm2)**2 +
                          (self.centroid_coordinates[:, 1] - ym2)**2)

            self.radii[:] = num.minimum(num.minimum(d0, d1), d2)

        else:
            #NEW code added by Peter Row. True radius
            #of inscribed circle is computed

            a = num.sqrt((x0 - x1)**2 + (y0 - y1)**2)
            b = num.sqrt((x1 - x2)**2 + (y1 - y2)**2)
            c = num.sqrt((x2 - x0)**2 + (y2 - y0)**2)

            self.radii[:] = old_div(2.0 * self.areas, (a + b + c))

#        for i in range(N):
#            if verbose and i % ((N+10)/10) == 0: log.critical('(%d/%d)' % (i, N))
#
#            x0, y0 = V[3*i, :]
#            x1, y1 = V[3*i+1, :]
#            x2, y2 = V[3*i+2, :]
#
#
#            i0 = self.triangles[i][0]
#            i1 = self.triangles[i][1]
#            i2 = self.triangles[i][2]
#
##            assert x0 == self.nodes[i0][0]
##            assert y0 == self.nodes[i0][1]
##
##            assert x1 == self.nodes[i1][0]
##            assert y1 == self.nodes[i1][1]
##
##            assert x2 == self.nodes[i2][0]
##            assert y2 == self.nodes[i2][1]
#
##            # Area
##            self.areas[i] = abs((x1*y0-x0*y1) + (x2*y1-x1*y2) + (x0*y2-x2*y0))/2
##
##            msg = 'Triangle %g (%f,%f), (%f,%f), (%f, %f)' % (i,x0,y0,x1,y1,x2,y2)
##            msg += ' is degenerate:  area == %f' % self.areas[i]
##            assert self.areas[i] > 0.0, msg
#
#            # Normals
#            # The normal vectors
#            #   - point outward from each edge
#            #   - are orthogonal to the edge
#            #   - have unit length
#            #   - Are enumerated according to the opposite corner:
#            #     (First normal is associated with the edge opposite
#            #     the first vertex, etc)
#            #   - Stored as six floats n0x,n0y,n1x,n1y,n2x,n2y per triangle
#            n0 = num.array([x2-x1, y2-y1], num.float)
#            l0 = num.sqrt(num.sum(n0**2))
#
#            n1 = num.array([x0-x2, y0-y2], num.float)
#            l1 = num.sqrt(num.sum(n1**2))
#
#            n2 = num.array([x1-x0, y1-y0], num.float)
#            l2 = num.sqrt(num.sum(n2**2))
#
#            # Normalise
#            n0 /= l0
#            n1 /= l1
#            n2 /= l2
#
##            # Compute and store
##            self.normals[i, :] = [n0[1], -n0[0],
##                                  n1[1], -n1[0],
##                                  n2[1], -n2[0]]
#
#            # Edgelengths
#            #self.edgelengths[i, :] = [l0, l1, l2]
#
#
#
#            #Compute centroid
##            centroid = num.array([(x0 + x1 + x2)/3, (y0 + y1 + y2)/3], num.float)
###            self.centroid_coordinates[i] = centroid
##
##
##            if self.use_inscribed_circle == False:
##                #OLD code. Computed radii may exceed that of an
##                #inscribed circle
##
##                #Midpoints
##                m0 = num.array([(x1 + x2)/2, (y1 + y2)/2], num.float)
##                m1 = num.array([(x0 + x2)/2, (y0 + y2)/2], num.float)
##                m2 = num.array([(x1 + x0)/2, (y1 + y0)/2], num.float)
##
##                #The radius is the distance from the centroid of
##                #a triangle to the midpoint of the side of the triangle
##                #closest to the centroid
##                d0 = num.sqrt(num.sum( (centroid-m0)**2 ))
##                d1 = num.sqrt(num.sum( (centroid-m1)**2 ))
##                d2 = num.sqrt(num.sum( (centroid-m2)**2 ))
##
##                #self.radii[i] = min(d0, d1, d2)
##
##            else:
##                #NEW code added by Peter Row. True radius
##                #of inscribed circle is computed
##
##                a = num.sqrt((x0-x1)**2+(y0-y1)**2)
##                b = num.sqrt((x1-x2)**2+(y1-y2)**2)
##                c = num.sqrt((x2-x0)**2+(y2-y0)**2)
##
##                self.radii[i]=2.0*self.areas[i]/(a+b+c)

# Build structure listing which triangles belong to which node.
        if verbose:
            log.critical('General Mesh: Building inverted triangle structure')
        self.build_inverted_triangle_structure()

        if verbose: log.timingInfo("aoi, '%s'" % self.get_area())

    def __len__(self):

        return self.number_of_triangles

    def __repr__(self):
        return ('Mesh: %d vertices, %d triangles' %
                (self.nodes.shape[0], len(self)))

    def get_normals(self):
        """Return all normal vectors.

        Return normal vectors for all triangles as an Nx6 array
        (ordered as x0, y0, x1, y1, x2, y2 for each triangle)
        """

        return self.normals

    def get_normal(self, i, j):
        """Return normal vector j of the i'th triangle.

        Return value is the numeric array slice [x, y]
        """

        return self.normals[i, 2 * j:2 * j + 2]

    def get_edgelength(self, i, j):
        """Return length of j'th edge of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """
        return self.edgelengths[i, j]

    def get_number_of_triangles(self):
        return self.number_of_triangles

    def get_number_of_nodes(self):
        return self.number_of_nodes

    def get_nodes(self, absolute=False):
        """Return all nodes in mesh.

        The nodes are ordered in an Nx2 array where N is the number of nodes.
        This is the same format they were provided in the constructor
        i.e. without any duplication.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        (To see which, switch to default absolute=True and run tests).
        """

        N = self.number_of_nodes
        V = self.nodes[:N, :]
        if absolute is True:
            if not self.geo_reference.is_absolute():
                V = self.geo_reference.get_absolute(V)

        return V

    def get_node(self, i, absolute=False):
        """Return node coordinates for triangle i.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        (To see which, switch to default absolute=True and run tests).

        Note: This method returns a modified _copy_ of the nodes slice if
              absolute is True.  If absolute is False, just return the slice.
              This is related to the ensure_numeric() returning a copy problem.
        """

        V = self.nodes[i, :]
        if absolute is True:
            if not self.geo_reference.is_absolute():
                # get a copy so as not to modify the internal self.nodes array
                V = copy.copy(V)
                V += num.array([
                    self.geo_reference.get_xllcorner(),
                    self.geo_reference.get_yllcorner()
                ], num.float)
        return V

    def get_vertex_coordinates(self, triangle_id=None, absolute=False):
        """Return vertex coordinates for all triangles.

        Return all vertex coordinates for all triangles as a 3*M x 2 array
        where the jth vertex of the ith triangle is located in row 3*i+j and
        M the number of triangles in the mesh.

        if triangle_id is specified (an integer) the 3 vertex coordinates
        for triangle_id are returned.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        """

        V = self.vertex_coordinates

        if triangle_id is None:
            if absolute is True:
                if not self.geo_reference.is_absolute():
                    V = self.geo_reference.get_absolute(V)
            return V
        else:
            i = triangle_id
            msg = 'triangle_id must be an integer'
            assert int(i) == i, msg
            assert 0 <= i < self.number_of_triangles

            i3 = 3 * i
            if absolute is True and not self.geo_reference.is_absolute():
                offset = num.array([
                    self.geo_reference.get_xllcorner(),
                    self.geo_reference.get_yllcorner()
                ], num.float)

                return V[i3:i3 + 3, :] + offset
            else:
                return V[i3:i3 + 3, :]

    def get_vertex_coordinate(self, i, j, absolute=False):
        """Return coordinates for vertex j of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """

        msg = 'vertex id j must be an integer in [0,1,2]'
        assert j in [0, 1, 2], msg

        V = self.get_vertex_coordinates(triangle_id=i, absolute=absolute)
        return V[j, :]

    def compute_vertex_coordinates(self):
        """Return all vertex coordinates for all triangles as a 3*M x 2 array
        where the jth vertex of the ith triangle is located in row 3*i+j.

        This function is used to precompute this important structure. Use
        get_vertex coordinates to retrieve the points.
        """

        M = self.number_of_triangles
        vertex_coordinates = num.zeros((3 * M, 2), num.float)

        k0 = self.triangles[:, 0]
        k1 = self.triangles[:, 1]
        k2 = self.triangles[:, 2]

        #        I = num.arange(M,dtype=num.int)
        #
        #        V0 = V[0:3*M:3, :]
        #        V1 = V[1:3*M:3, :]
        #        V2 = V[2:3*M:3, :]

        vertex_coordinates[0:3 * M:3, :] = self.nodes[k0, :]
        vertex_coordinates[1:3 * M:3, :] = self.nodes[k1, :]
        vertex_coordinates[2:3 * M:3, :] = self.nodes[k2, :]

        #        for i in range(M):
        #            for j in range(3):
        #                k = self.triangles[i,j] # Index of vertex j in triangle i
        #                vertex_coordinates[3*i+j,:] = self.nodes[k]

        return vertex_coordinates

    def get_edge_midpoint_coordinates(self, triangle_id=None, absolute=False):
        """Return edge midpoint coordinates for all triangles or from particular triangle.

        Return all edge midpoint coordinates for all triangles as a 3*M x 2 array
        where the jth midpoint of the ith triangle is located in row 3*i+j and
        M the number of triangles in the mesh.

        if triangle_id is specified (an integer) the 3 midpoint coordinates
        for triangle_id are returned.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        """

        E = self.edge_midpoint_coordinates

        if triangle_id is None:
            if absolute is True:
                if not self.geo_reference.is_absolute():
                    E = self.geo_reference.get_absolute(E)
            return E
        else:
            i = triangle_id
            msg = 'triangle_id must be an integer'
            assert int(i) == i, msg
            assert 0 <= i < self.number_of_triangles

            i3 = 3 * i
            if absolute is True and not self.geo_reference.is_absolute():
                offset = num.array([
                    self.geo_reference.get_xllcorner(),
                    self.geo_reference.get_yllcorner()
                ], num.float)

                return E[i3:i3 + 3, :] + offset
            else:
                return E[i3:i3 + 3, :]

    def get_edge_midpoint_coordinate(self, i, j, absolute=False):
        """Return coordinates for edge midpoint j of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """

        msg = 'edge midpoint id j must be an integer in [0,1,2]'
        assert j in [0, 1, 2], msg

        E = self.get_edge_midpoint_coordinates(triangle_id=i,
                                               absolute=absolute)
        return E[j, :]  # Return (x, y) for edge mid point

    def compute_edge_midpoint_coordinates(self):
        """Return all edge midpoint coordinates for all triangles as a 3*M x 2 array
        where the jth edge midpoint of the ith triangle is located in row 3*i+j.

        This function is used to precompute this important structure. Use
        get_edge_midpoint_coordinates to retrieve the points.

        Assumes that vertex_coordinates have been computed
        """

        M = self.number_of_triangles
        E = num.zeros((3 * M, 2), num.float)

        V = self.vertex_coordinates

        V0 = V[0:3 * M:3, :]
        V1 = V[1:3 * M:3, :]
        V2 = V[2:3 * M:3, :]

        #print V.shape, V0.shape, V1.shape, V2.shape

        #print E.shape, E[0:3*M:3, :].shape, E[1:3*M:3, :].shape, E[2:3*M:3, :].shape
        E[0:3 * M:3, :] = 0.5 * (V1 + V2)
        E[1:3 * M:3, :] = 0.5 * (V2 + V0)
        E[2:3 * M:3, :] = 0.5 * (V0 + V1)

        return E

    def get_triangles(self, indices=None):
        """Get mesh triangles.

        Return Mx3 integer array where M is the number of triangles.
        Each row corresponds to one triangle and the three entries are
        indices into the mesh nodes which can be obtained using the method
        get_nodes()

        Optional argument, indices is the set of triangle ids of interest.
        """

        if indices is None:
            return self.triangles

        return num.take(self.triangles, indices, axis=0)

    def get_disconnected_triangles(self):
        """Get mesh based on nodes obtained from get_vertex_coordinates.

        Return array Mx3 array of integers where each row corresponds to
        a triangle. A triangle is a triplet of indices into
        point coordinates obtained from get_vertex_coordinates and each
        index appears only once

        This provides a mesh where no triangles share nodes
        (hence the name disconnected triangles) and different
        nodes may have the same coordinates.

        This version of the mesh is useful for storing meshes with
        discontinuities at each node and is e.g. used for storing
        data in sww files.

        The triangles created will have the format
        [[0,1,2],
         [3,4,5],
         [6,7,8],
         ...
         [3*M-3 3*M-2 3*M-1]]
        """

        M = len(self)  # Number of triangles
        K = 3 * M  # Total number of unique vertices
        return num.reshape(num.arange(K, dtype=num.int), (M, 3))

    def get_unique_vertices(self, indices=None):
        """Return indices to vertices as a sorted list.
           FIXME (Ole): It may not be needed anymore
        """

        triangles = self.get_triangles(indices=indices)
        unique_verts = {}
        for triangle in triangles:
            unique_verts[triangle[0]] = 0
            unique_verts[triangle[1]] = 0
            unique_verts[triangle[2]] = 0
        res = list(unique_verts.keys())
        res.sort()  # Ensure uniqueness
        return res

        # Note Padarn 27/11/12:
        # This function was modified, but then it was deicded it was not
        # needed. It should be restored if it is used elsewhere in the code
        # (it was being used in quantity.py in the _set_vertex_values function).
        # Note however, the function in the head of the code is very slow and
        # could be easily sped up many fold.
        #
        # Have we profiled it? (Ole 31/5/2020)

    def get_triangles_and_vertices_per_node(self, node=None):
        """Get triangles associated with given node.

        Return list of triangle_ids, vertex_ids for specified node.
        If node in None or absent, this information will be returned
        for all nodes in a list L where L[v] is the triangle
        list for node v.
        """

        triangle_list = []
        if node is not None:
            # Get index for this node
            #first = num.sum(self.number_of_triangles_per_node[:node])

            first = self.node_index[node]
            # Get number of triangles for this node
            count = self.number_of_triangles_per_node[node]

            for i in range(count):
                index = self.vertex_value_indices[first + i]

                volume_id = old_div(index, 3)
                vertex_id = index % 3

                triangle_list.append((volume_id, vertex_id))

            triangle_list = num.array(triangle_list, num.int)  #array default#
        else:
            # Get info for all nodes recursively.
            # If need be, we can speed this up by
            # working directly with the inverted triangle structure
            for i in range(self.number_of_nodes):
                L = self.get_triangles_and_vertices_per_node(node=i)
                triangle_list.append(L)

        return triangle_list

    def build_inverted_triangle_structure(self):
        """Build structure listing triangles belonging to each node

        Two arrays are created and store as mesh attributes

        number_of_triangles_per_node: An integer array of length N
        listing for each node how many triangles use it. N is the number of
        nodes in mesh.

        vertex_value_indices: An array of length M listing indices into
        triangles ordered by node number. The (triangle_id, vertex_id)
        pairs are obtained from each index as (index/3, index%3) or each
        index can be used directly into a flat triangles array. This
        is for example the case in the quantity.c where this structure is
        used to average vertex values efficiently.

        Example:
        a = [0.0, 0.0] # node 0
        b = [0.0, 2.0] # node 1
        c = [2.0, 0.0] # node 2
        d = [0.0, 4.0] # node 3
        e = [2.0, 2.0] # node 4
        f = [4.0, 0.0] # node 5
        nodes = array([a, b, c, d, e, f])

        #                    bac,     bce,     ecf,     dbe
        triangles = array([[1,0,2], [1,2,4], [4,2,5], [3,1,4]])

        For this structure:
        number_of_triangles_per_node = [1 3 3 1 3 1]
        which means that node a has 1 triangle associated with it, node b
        has 3, node has 3 and so on.

        vertex_value_indices = [ 1  0  3 10  2  4  7  9  5  6 11  8]
        which reflects the fact that
        node 0 is used by triangle 0, vertex 1 (index = 1)
        node 1 is used by triangle 0, vertex 0 (index = 0)
                   and by triangle 1, vertex 0 (index = 3)
                   and by triangle 3, vertex 1 (index = 10)
        node 2 is used by triangle 0, vertex 2 (index = 2)
                   and by triangle 1, vertex 1 (index = 4)
                   and by triangle 2, vertex 1 (index = 7)
        node 3 is used by triangle 3, vertex 0 (index = 9)
        node 4 is used by triangle 1, vertex 2 (index = 5)
                   and by triangle 2, vertex 0 (index = 6)
                   and by triangle 3, vertex 2 (index = 11)
        node 5 is used by triangle 2, vertex 2 (index = 8)

        Preconditions:
          self.nodes and self.triangles are defined

        Postcondition:
          self.number_of_triangles_per_node is built
          self.vertex_value_indices is built
        """

        # Count number of triangles per node
        #        number_of_triangles_per_node = num.zeros(self.number_of_nodes,
        #                                                 num.int)       #array default#
        #        for volume_id, triangle in enumerate(self.get_triangles()):
        #            for vertex_id in triangle:
        #                number_of_triangles_per_node[vertex_id] += 1

        # Need to pad number_of_triangles_per_node in case lone nodes at end of list
        #number_of_triangles_per_node = num.zeros(self.number_of_nodes, num.int)

        number_of_triangles_per_node = num.bincount(
            self.triangles.flat).astype(num.int)
        number_of_lone_nodes = self.number_of_nodes - len(
            number_of_triangles_per_node)

        orphan_nodes = num.argwhere(number_of_triangles_per_node == 0)
        number_of_orphan_nodes = len(orphan_nodes)

        if number_of_orphan_nodes > 0 and self.verbose:
            msg = 'Node(s) %d not associated to a triangle.' % orphan_nodes[0]
            print(msg)

        if number_of_lone_nodes > 0:
            number_of_triangles_per_node =  \
               num.append(number_of_triangles_per_node,num.zeros(number_of_lone_nodes,num.int))

        #assert num.allclose(number_of_triangles_per_node_new, number_of_triangles_per_node)

        # Allocate space for inverted structure
        number_of_entries = num.sum(number_of_triangles_per_node)

        assert number_of_entries == 3 * self.number_of_triangles

        #vertex_value_indices = num.zeros(number_of_entries, num.int) #array default#

        # Array of vertex_indices (3*vol_id+vertex_id) sorted into contiguous
        # order around each node. Use with number_of_triangles_per_node to
        # find vertices associated with a node.
        # ie There are  number_of_triangles_per_node[i] vertices
        vertex_value_indices = num.argsort(self.triangles.flat).astype(num.int)
        #vertex_value_indices = num.argsort(self.triangles.flatten())

        #        node_index = num.zeros((self.number_of_nodes)+1, dtype = num.int)
        #        node_index[0] = 0
        #        for i in xrange(self.number_of_nodes):
        #            node_index[i+1] = node_index[i] + number_of_triangles_per_node[i]

        node_index = num.zeros((self.number_of_nodes) + 1, dtype=num.int)
        node_index[1:] = num.cumsum(number_of_triangles_per_node)

        #assert num.allclose(node_index,node_index_new)

        # Save structures
        self.node_index = node_index
        self.number_of_triangles_per_node = number_of_triangles_per_node
        self.vertex_value_indices = vertex_value_indices

    def get_extent(self, absolute=False):
        """Return min and max of all x and y coordinates

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        """

        C = self.get_vertex_coordinates(absolute=absolute)
        X = C[:, 0:6:2].copy()
        Y = C[:, 1:6:2].copy()

        xmin = num.min(X)
        xmax = num.max(X)
        ymin = num.min(Y)
        ymax = num.max(Y)

        return xmin, xmax, ymin, ymax

    def get_areas(self):
        """Get areas of all individual triangles."""

        return self.areas

    def get_area(self):
        """Return total area of mesh"""

        return num.sum(self.areas)

    def set_georeference(self, g):
        self.geo_reference = g

    def get_georeference(self):
        return self.geo_reference
Example #5
0
def sww2dem(
        name_in,
        name_out,
        quantity=None,  # defaults to elevation
        reduction=None,
        cellsize=10,
        number_of_decimal_places=None,
        NODATA_value=-9999.0,
        easting_min=None,
        easting_max=None,
        northing_min=None,
        northing_max=None,
        verbose=False,
        origin=None,
        datum='WGS84',
        block_size=None):
    """Read SWW file and convert to Digitial Elevation model format
    (.asc or .ers)

    Example (ASC):
    ncols         3121
    nrows         1800
    xllcorner     722000
    yllcorner     5893000
    cellsize      25
    NODATA_value  -9999
    138.3698 137.4194 136.5062 135.5558 ..........

    The number of decimal places can be specified by the user to save
    on disk space requirements by specifying in the call to sww2dem.

    Also write accompanying file with same basename_in but extension .prj
    used to fix the UTM zone, datum, false northings and eastings.

    The prj format is assumed to be as

    Projection    UTM
    Zone          56
    Datum         WGS84
    Zunits        NO
    Units         METERS
    Spheroid      WGS84
    Xshift        0.0000000000
    Yshift        10000000.0000000000
    Parameters

    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2dem will output the quantity at that time-step. 
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum

    format can be either 'asc' or 'ers'
    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)
    basename_out, out_ext = os.path.splitext(name_out)
    out_ext = out_ext.lower()

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext not in ['.asc', '.ers']:
        raise IOError('Format for %s must be either asc or ers.' % name_out)

    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    if quantity_formula.has_key(quantity):
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert (isinstance(block_size, (int, long, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)
        log.critical('Output directory is %s' % name_out)

    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'][:], num.float)
    y = num.array(fid.variables['y'][:], num.float)
    volumes = num.array(fid.variables['volumes'][:], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    try:  # works with netcdf4
        number_of_timesteps = len(fid.dimensions['number_of_timesteps'])
        number_of_points = len(fid.dimensions['number_of_points'])
    except:  #works with scientific.io.netcdf
        number_of_timesteps = fid.dimensions['number_of_timesteps']
        number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
def sww2array(name_in,
            quantity=None, # defaults to elevation
            reduction=None,
            cellsize=10,
            number_of_decimal_places=None,
            NODATA_value=-9999.0,
            easting_min=None,
            easting_max=None,
            northing_min=None,
            northing_max=None,
            verbose=False,
            origin=None,
            datum='WGS84',
            block_size=None):
    """Read SWW file and convert to a numpy array (can be stored to a png file later)


    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2array will output the quantity at that time-step.
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum


    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)


    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'
    
    if reduction is None:
        reduction = max

    if quantity_formula.has_key(quantity):
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert(isinstance(block_size, (int, long, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)


    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'], num.float)
    y = num.array(fid.variables['y'], num.float)
    volumes = num.array(fid.variables['volumes'], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference() # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
def sww2dem(name_in, name_out,
            quantity=None, # defaults to elevation
            reduction=None,
            cellsize=10,
            number_of_decimal_places=None,
            NODATA_value=-9999.0,
            easting_min=None,
            easting_max=None,
            northing_min=None,
            northing_max=None,
            verbose=False,
            origin=None,
            datum='WGS84',
            block_size=None):
    """Read SWW file and convert to Digitial Elevation model format
    (.asc or .ers)

    Example (ASC):
    ncols         3121
    nrows         1800
    xllcorner     722000
    yllcorner     5893000
    cellsize      25
    NODATA_value  -9999
    138.3698 137.4194 136.5062 135.5558 ..........

    The number of decimal places can be specified by the user to save
    on disk space requirements by specifying in the call to sww2dem.

    Also write accompanying file with same basename_in but extension .prj
    used to fix the UTM zone, datum, false northings and eastings.

    The prj format is assumed to be as

    Projection    UTM
    Zone          56
    Datum         WGS84
    Zunits        NO
    Units         METERS
    Spheroid      WGS84
    Xshift        0.0000000000
    Yshift        10000000.0000000000
    Parameters

    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2dem will output the quantity at that time-step. 
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum

    format can be either 'asc' or 'ers'
    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)
    basename_out, out_ext = os.path.splitext(name_out)
    out_ext = out_ext.lower()

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext not in ['.asc', '.ers']:
        raise IOError('Format for %s must be either asc or ers.' % name_out)

    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'
    
    if reduction is None:
        reduction = max

    if quantity_formula.has_key(quantity):
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert(isinstance(block_size, (int, long, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)
        log.critical('Output directory is %s' % name_out)

    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'], num.float)
    y = num.array(fid.variables['y'], num.float)
    volumes = num.array(fid.variables['volumes'], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference() # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
Example #8
0
def sww2pts(name_in,
            name_out=None,
            data_points=None,
            quantity=None,
            timestep=None,
            reduction=None,
            NODATA_value=-9999,
            verbose=False,
            origin=None):
    """Read SWW file and convert to interpolated values at selected points

    The parameter 'quantity' must be the name of an existing quantity or
    an expression involving existing quantities. The default is 'elevation'.

    if timestep (an index) is given, output quantity at that timestep.

    if reduction is given use that to reduce quantity over all timesteps.

    data_points (Nx2 array) give locations of points where quantity is to 
    be computed.
    """

    import sys
    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
             apply_expression_to_dictionary
    from anuga.geospatial_data.geospatial_data import Geospatial_data

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    basename_in, in_ext = os.path.splitext(name_in)

    if name_out != None:
        basename_out, out_ext = os.path.splitext(name_out)
    else:
        basename_out = basename_in + '_%s' % quantity
        out_ext = '.pts'
        name_out = basename_out + out_ext

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext != '.pts':
        raise IOError('Output format for %s must be .pts' % name_out)

    # Read sww file
    if verbose: log.critical('Reading from %s' % name_in)
    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    # Get extent and reference
    x = fid.variables['x'][:]
    y = fid.variables['y'][:]
    volumes = fid.variables['volumes'][:]

    try:  # works with netcdf4
        number_of_timesteps = len(fid.dimensions['number_of_timesteps'])
        number_of_points = len(fid.dimensions['number_of_points'])
    except:  #works with scientific.io.netcdf
        number_of_timesteps = fid.dimensions['number_of_timesteps']
        number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
def sww2dem(
        name_in,
        name_out,
        quantity=None,  # defaults to elevation
        reduction=None,
        cellsize=10,
        number_of_decimal_places=None,
        NODATA_value=-9999.0,
        easting_min=None,
        easting_max=None,
        northing_min=None,
        northing_max=None,
        verbose=False,
        origin=None,
        datum='WGS84',
        block_size=None):
    """Read SWW file and convert to Digitial Elevation model format
    (.asc or .ers)

    Example (ASC):
    ncols         3121
    nrows         1800
    xllcorner     722000
    yllcorner     5893000
    cellsize      25
    NODATA_value  -9999
    138.3698 137.4194 136.5062 135.5558 ..........

    The number of decimal places can be specified by the user to save
    on disk space requirements by specifying in the call to sww2dem.

    Also write accompanying file with same basename_in but extension .prj
    used to fix the UTM zone, datum, false northings and eastings.

    The prj format is assumed to be as

    Projection    UTM
    Zone          56
    Datum         WGS84
    Zunits        NO
    Units         METERS
    Spheroid      WGS84
    Xshift        0.0000000000
    Yshift        10000000.0000000000
    Parameters

    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2dem will output the quantity at that time-step. 
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum

    format can be either 'asc' or 'ers'
    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)
    basename_out, out_ext = os.path.splitext(name_out)
    out_ext = out_ext.lower()

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext not in ['.asc', '.ers']:
        raise IOError('Format for %s must be either asc or ers.' % name_out)

    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    if quantity in quantity_formula:
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert (isinstance(block_size, (int, int, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)
        log.critical('Output directory is %s' % name_out)

    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'], num.float)
    y = num.array(fid.variables['y'], num.float)
    volumes = num.array(fid.variables['volumes'], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError as e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
    else:
        zone = origin[0]
        xllcorner = origin[1]
        yllcorner = origin[2]

    # FIXME: Refactor using code from Interpolation_function.statistics
    # (in interpolate.py)
    # Something like print swwstats(swwname)
    if verbose:
        log.critical('------------------------------------------------')
        log.critical('Statistics of SWW file:')
        log.critical('  Name: %s' % name_in)
        log.critical('  Reference:')
        log.critical('    Lower left corner: [%f, %f]' %
                     (xllcorner, yllcorner))
        if type(reduction) is not types.BuiltinFunctionType:
            log.critical('    Time: %f' % times)
        else:
            log.critical('    Start time: %f' % fid.starttime[0])
        log.critical('  Extent:')
        log.critical('    x [m] in [%f, %f], len(x) == %d' %
                     (num.min(x), num.max(x), len(x.flat)))
        log.critical('    y [m] in [%f, %f], len(y) == %d' %
                     (num.min(y), num.max(y), len(y.flat)))
        if type(reduction) is not types.BuiltinFunctionType:
            log.critical('    t [s] = %f, len(t) == %d' % (times, 1))
        else:
            log.critical('    t [s] in [%f, %f], len(t) == %d' %
                         (min(times), max(times), len(times)))
        log.critical('  Quantities [SI units]:')

        # Comment out for reduced memory consumption
        for name in ['stage', 'xmomentum', 'ymomentum']:
            q = fid.variables[name][:].flatten()
            if type(reduction) is not types.BuiltinFunctionType:
                q = q[reduction * len(x):(reduction + 1) * len(x)]
            if verbose:
                log.critical('    %s in [%f, %f]' % (name, min(q), max(q)))
        for name in ['elevation']:
            q = fid.variables[name][:].flatten()
            if verbose:
                log.critical('    %s in [%f, %f]' % (name, min(q), max(q)))

    # Get the variables in the supplied expression.
    # This may throw a SyntaxError exception.
    var_list = get_vars_in_expression(quantity)

    # Check that we have the required variables in the SWW file.
    missing_vars = []
    for name in var_list:
        try:
            _ = fid.variables[name]
        except KeyError:
            missing_vars.append(name)
    if missing_vars:
        msg = (
            "In expression '%s', variables %s are not in the SWW file '%s'" %
            (quantity, str(missing_vars), name_in))
        raise_(Exception, msg)

    # Create result array and start filling, block by block.
    result = num.zeros(number_of_points, num.float)

    if verbose:
        msg = 'Slicing sww file, num points: ' + str(number_of_points)
        msg += ', block size: ' + str(block_size)
        log.critical(msg)

    for start_slice in range(0, number_of_points, block_size):
        # Limit slice size to array end if at last block
        end_slice = min(start_slice + block_size, number_of_points)

        # Get slices of all required variables
        q_dict = {}
        for name in var_list:
            # check if variable has time axis
            if len(fid.variables[name].shape) == 2:
                q_dict[name] = fid.variables[name][:, start_slice:end_slice]
            else:  # no time axis
                q_dict[name] = fid.variables[name][start_slice:end_slice]

        # Evaluate expression with quantities found in SWW file
        res = apply_expression_to_dictionary(quantity, q_dict)

        if len(res.shape) == 2:
            new_res = num.zeros(res.shape[1], num.float)
            for k in range(res.shape[1]):
                if type(reduction) is not types.BuiltinFunctionType:
                    new_res[k] = res[reduction, k]
                else:
                    new_res[k] = reduction(res[:, k])
            res = new_res

        result[start_slice:end_slice] = res

    # Post condition: Now q has dimension: number_of_points
    assert len(result.shape) == 1
    assert result.shape[0] == number_of_points

    if verbose:
        log.critical('Processed values for %s are in [%f, %f]' %
                     (quantity, min(result), max(result)))

    # Create grid and update xll/yll corner and x,y
    # Relative extent
    if easting_min is None:
        xmin = min(x)
    else:
        xmin = easting_min - xllcorner

    if easting_max is None:
        xmax = max(x)
    else:
        xmax = easting_max - xllcorner

    if northing_min is None:
        ymin = min(y)
    else:
        ymin = northing_min - yllcorner

    if northing_max is None:
        ymax = max(y)
    else:
        ymax = northing_max - yllcorner

    msg = 'xmax must be greater than or equal to xmin.\n'
    msg += 'I got xmin = %f, xmax = %f' % (xmin, xmax)
    assert xmax >= xmin, msg

    msg = 'ymax must be greater than or equal to xmin.\n'
    msg += 'I got ymin = %f, ymax = %f' % (ymin, ymax)
    assert ymax >= ymin, msg

    if verbose: log.critical('Creating grid')
    ncols = int(old_div((xmax - xmin), cellsize)) + 1
    nrows = int(old_div((ymax - ymin), cellsize)) + 1

    # New absolute reference and coordinates
    newxllcorner = xmin + xllcorner
    newyllcorner = ymin + yllcorner

    x = x + xllcorner - newxllcorner
    y = y + yllcorner - newyllcorner

    vertex_points = num.concatenate((x[:, num.newaxis], y[:, num.newaxis]),
                                    axis=1)
    assert len(vertex_points.shape) == 2

    def calc_grid_values_old(vertex_points, volumes, result):

        grid_points = num.zeros((ncols * nrows, 2), num.float)

        for i in range(nrows):
            if out_ext == '.asc':
                yg = i * cellsize
            else:
                # this will flip the order of the y values for ers
                yg = (nrows - i) * cellsize

            for j in range(ncols):
                xg = j * cellsize
                k = i * ncols + j

                grid_points[k, 0] = xg
                grid_points[k, 1] = yg

        # Interpolate
        from anuga.fit_interpolate.interpolate import Interpolate

        # Remove loners from vertex_points, volumes here
        vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
        # export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})

        interp = Interpolate(vertex_points, volumes, verbose=verbose)

        bprint = 0

        # Interpolate using quantity values
        if verbose: log.critical('Interpolating')
        grid_values = interp.interpolate(bprint, result, grid_points).flatten()
        outside_indices = interp.get_outside_poly_indices()

        for i in outside_indices:
            #print 'change grid_value',NODATA_value
            grid_values[i] = NODATA_value

        return grid_values

    def calc_grid_values(vertex_points, volumes, result):

        grid_points = num.zeros((ncols * nrows, 2), num.float)

        for i in range(nrows):
            if out_ext == '.asc':
                yg = i * cellsize
            else:
                #this will flip the order of the y values for ers
                yg = (nrows - i) * cellsize

            for j in range(ncols):
                xg = j * cellsize
                k = i * ncols + j

                grid_points[k, 0] = xg
                grid_points[k, 1] = yg

        grid_values = num.zeros(ncols * nrows, num.float)

        eval_grid(nrows, ncols, NODATA_value, grid_points,
                  vertex_points.flatten(), volumes, result, grid_values)
        return grid_values.flatten()

    grid_values = calc_grid_values(vertex_points, volumes, result)

    if verbose:
        log.critical('Interpolated values are in [%f, %f]' %
                     (num.min(grid_values), num.max(grid_values)))

    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)


#    P = interp.mesh.get_boundary_polygon()
#    outside_indices = outside_polygon(grid_points, P, closed=True)

    if out_ext == '.ers':
        # setup ERS header information
        grid_values = num.reshape(grid_values, (nrows, ncols))
        header = {}
        header['datum'] = '"' + datum + '"'
        # FIXME The use of hardwired UTM and zone number needs to be made optional
        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
        header['projection'] = '"UTM-' + str(zone) + '"'
        header['coordinatetype'] = 'EN'
        if header['coordinatetype'] == 'LL':
            header['longitude'] = str(newxllcorner)
            header['latitude'] = str(newyllcorner)
        elif header['coordinatetype'] == 'EN':
            header['eastings'] = str(newxllcorner)
            header['northings'] = str(newyllcorner)
        header['nullcellvalue'] = str(NODATA_value)
        header['xdimension'] = str(cellsize)
        header['ydimension'] = str(cellsize)
        header['value'] = '"' + quantity + '"'
        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test

        #Write
        if verbose:
            log.critical('Writing %s' % name_out)

        import ermapper_grids

        ermapper_grids.write_ermapper_grid(name_out, grid_values, header)

        fid.close()

    else:
        #Write to Ascii format
        #Write prj file
        prjfile = basename_out + '.prj'

        if verbose: log.critical('Writing %s' % prjfile)
        prjid = open(prjfile, 'w')
        prjid.write('Projection    %s\n' % 'UTM')
        prjid.write('Zone          %d\n' % zone)
        prjid.write('Datum         %s\n' % datum)
        prjid.write('Zunits        NO\n')
        prjid.write('Units         METERS\n')
        prjid.write('Spheroid      %s\n' % datum)
        prjid.write('Xshift        %d\n' % false_easting)
        prjid.write('Yshift        %d\n' % false_northing)
        prjid.write('Parameters\n')
        prjid.close()

        if verbose: log.critical('Writing %s' % name_out)

        ascid = open(name_out, 'w')

        ascid.write('ncols         %d\n' % ncols)
        ascid.write('nrows         %d\n' % nrows)
        ascid.write('xllcorner     %d\n' % newxllcorner)
        ascid.write('yllcorner     %d\n' % newyllcorner)
        ascid.write('cellsize      %f\n' % cellsize)
        ascid.write('NODATA_value  %d\n' % NODATA_value)

        #Get bounding polygon from mesh
        #P = interp.mesh.get_boundary_polygon()
        #inside_indices = inside_polygon(grid_points, P)

        # change printoptions so that a long string of zeros in not
        # summarized as [0.0, 0.0, 0.0, ... 0.0, 0.0, 0.0]
        #printoptions = num.get_printoptions()
        #num.set_printoptions(threshold=sys.maxint)

        format = '%.' + '%g' % number_of_decimal_places + 'e'
        for i in range(nrows):
            if verbose and i % (old_div((nrows + 10), 10)) == 0:
                log.critical('Doing row %d of %d' % (i, nrows))

            base_index = (nrows - i - 1) * ncols

            slice = grid_values[base_index:base_index + ncols]

            num.savetxt(ascid, slice.reshape(1, ncols), format, ' ')

        #Close
        ascid.close()
        fid.close()

        return basename_out
Example #10
0
def sww2pts(name_in, name_out=None,
            data_points=None,
            quantity=None,
            timestep=None,
            reduction=None,
            NODATA_value=-9999,
            verbose=False,
            origin=None):
    """Read SWW file and convert to interpolated values at selected points

    The parameter 'quantity' must be the name of an existing quantity or
    an expression involving existing quantities. The default is 'elevation'.

    if timestep (an index) is given, output quantity at that timestep.

    if reduction is given use that to reduce quantity over all timesteps.

    data_points (Nx2 array) give locations of points where quantity is to 
    be computed.
    """

    import sys
    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
             apply_expression_to_dictionary
    from anuga.geospatial_data.geospatial_data import Geospatial_data

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    basename_in, in_ext = os.path.splitext(name_in)
    
    if name_out != None:
        basename_out, out_ext = os.path.splitext(name_out)
    else:
        basename_out = basename_in + '_%s' % quantity
        out_ext = '.pts'
        name_out = basename_out + out_ext

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext != '.pts':
        raise IOError('Output format for %s must be .pts' % name_out)


    # Read sww file
    if verbose: log.critical('Reading from %s' % name_in)
    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    # Get extent and reference
    x = fid.variables['x'][:]
    y = fid.variables['y'][:]
    volumes = fid.variables['volumes'][:]


    try: # works with netcdf4
        number_of_timesteps = len(fid.dimensions['number_of_timesteps'])
        number_of_points = len(fid.dimensions['number_of_points'])
    except: #works with scientific.io.netcdf
        number_of_timesteps = fid.dimensions['number_of_timesteps']
        number_of_points = fid.dimensions['number_of_points']

        
    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError, e:
            geo_reference = Geo_reference() # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
def sww2pts(name_in,
            name_out=None,
            data_points=None,
            quantity=None,
            timestep=None,
            reduction=None,
            NODATA_value=-9999,
            verbose=False,
            origin=None):
    """Read SWW file and convert to interpolated values at selected points

    The parameter 'quantity' must be the name of an existing quantity or
    an expression involving existing quantities. The default is 'elevation'.

    if timestep (an index) is given, output quantity at that timestep.

    if reduction is given use that to reduce quantity over all timesteps.

    data_points (Nx2 array) give locations of points where quantity is to 
    be computed.
    """

    import sys
    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
             apply_expression_to_dictionary
    from anuga.geospatial_data.geospatial_data import Geospatial_data

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    basename_in, in_ext = os.path.splitext(name_in)

    if name_out != None:
        basename_out, out_ext = os.path.splitext(name_out)
    else:
        basename_out = basename_in + '_%s' % quantity
        out_ext = '.pts'
        name_out = basename_out + out_ext

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    if out_ext != '.pts':
        raise IOError('Output format for %s must be .pts' % name_out)

    # Read sww file
    if verbose: log.critical('Reading from %s' % name_in)
    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    # Get extent and reference
    x = fid.variables['x'][:]
    y = fid.variables['y'][:]
    volumes = fid.variables['volumes'][:]

    try:  # works with netcdf4
        number_of_timesteps = len(fid.dimensions['number_of_timesteps'])
        number_of_points = len(fid.dimensions['number_of_points'])
    except:  #works with scientific.io.netcdf
        number_of_timesteps = fid.dimensions['number_of_timesteps']
        number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError as e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
    else:
        zone = origin[0]
        xllcorner = origin[1]
        yllcorner = origin[2]

    # FIXME: Refactor using code from file_function.statistics
    # Something like print swwstats(swwname)
    if verbose:
        x = fid.variables['x'][:]
        y = fid.variables['y'][:]
        times = fid.variables['time'][:]
        log.critical('------------------------------------------------')
        log.critical('Statistics of SWW file:')
        log.critical('  Name: %s' % swwfile)
        log.critical('  Reference:')
        log.critical('    Lower left corner: [%f, %f]' %
                     (xllcorner, yllcorner))
        log.critical('    Start time: %f' % fid.starttime[0])
        log.critical('  Extent:')
        log.critical('    x [m] in [%f, %f], len(x) == %d' %
                     (num.min(x), num.max(x), len(x.flat)))
        log.critical('    y [m] in [%f, %f], len(y) == %d' %
                     (num.min(y), num.max(y), len(y.flat)))
        log.critical('    t [s] in [%f, %f], len(t) == %d' %
                     (min(times), max(times), len(times)))
        log.critical('  Quantities [SI units]:')
        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
            q = fid.variables[name][:].flat
            log.critical('    %s in [%f, %f]' % (name, min(q), max(q)))

    # Get quantity and reduce if applicable
    if verbose: log.critical('Processing quantity %s' % quantity)

    # Turn NetCDF objects into numeric arrays
    quantity_dict = {}
    for name in list(fid.variables.keys()):
        quantity_dict[name] = fid.variables[name][:]

    # Convert quantity expression to quantities found in sww file
    q = apply_expression_to_dictionary(quantity, quantity_dict)

    if len(q.shape) == 2:
        # q has a time component and needs to be reduced along
        # the temporal dimension
        if verbose: log.critical('Reducing quantity %s' % quantity)

        q_reduced = num.zeros(number_of_points, num.float)
        for k in range(number_of_points):
            q_reduced[k] = reduction(q[:, k])
        q = q_reduced

    # Post condition: Now q has dimension: number_of_points
    assert len(q.shape) == 1
    assert q.shape[0] == number_of_points

    if verbose:
        log.critical('Processed values for %s are in [%f, %f]' %
                     (quantity, min(q), max(q)))

    # Create grid and update xll/yll corner and x,y
    vertex_points = num.concatenate((x[:, num.newaxis], y[:, num.newaxis]),
                                    axis=1)
    assert len(vertex_points.shape) == 2

    # Interpolate
    from anuga.fit_interpolate.interpolate import Interpolate
    interp = Interpolate(vertex_points, volumes, verbose=verbose)

    # Interpolate using quantity values
    if verbose: log.critical('Interpolating')
    interpolated_values = interp.interpolate(q, data_points).flatten()

    if verbose:
        log.critical(
            'Interpolated values are in [%f, %f]' %
            (num.min(interpolated_values), num.max(interpolated_values)))

    # Assign NODATA_value to all points outside bounding polygon
    # (from interpolation mesh)
    P = interp.mesh.get_boundary_polygon()
    outside_indices = outside_polygon(data_points, P, closed=True)

    for i in outside_indices:
        interpolated_values[i] = NODATA_value

    # Store results
    G = Geospatial_data(data_points=data_points,
                        attributes=interpolated_values)

    G.export_points_file(name_out, absolute=True)

    fid.close()
Example #12
0
class General_mesh:
    """Collection of 2D triangular elements

    A triangular element is defined in terms of three vertex ids,
    ordered counter clock-wise, each corresponding to a given node
    which is represented as a coordinate set (x,y).
    Vertices from different triangles can point to the same node.
    The nodes are implemented as an Nx2 numeric array containing the
    x and y coordinates.


    To instantiate:
       Mesh(nodes, triangles)

    where

      nodes is either a list of 2-tuples or an Nx2 numeric array of
      floats representing all x, y coordinates in the mesh.

      triangles is either a list of 3-tuples or an Mx3 numeric array of
      integers representing indices of all vertices in the mesh.
      Each vertex is identified by its index i in [0, N-1].


    Example:

        a = [0.0, 0.0]
        b = [0.0, 2.0]
        c = [2.0,0.0]
        e = [2.0, 2.0]

        nodes = [a, b, c, e]
        triangles = [ [1,0,2], [1,2,3] ]   # bac, bce

        # Create mesh with two triangles: bac and bce
        mesh = Mesh(nodes, triangles)



    Other:

      In addition mesh computes an Mx6 array called vertex_coordinates.
      This structure is derived from coordinates and contains for each
      triangle the three x,y coordinates at the vertices.

      See neighbourmesh.py for a specialisation of the general mesh class
      which includes information about neighbours and the mesh boundary.

      The mesh object is purely geometrical and contains no information
      about quantities defined on the mesh.

    """

    # FIXME: It would be a good idea to use geospatial data as an alternative
    #        input
    def __init__(self,
                 nodes,
                 triangles,
                 geo_reference=None,
                 use_inscribed_circle=False,
                 verbose=False):
        """Build triangular 2d mesh from nodes and triangle information

        Input:

          nodes: x,y coordinates represented as a sequence of 2-tuples or
                 a Nx2 numeric array of floats.

          triangles: sequence of 3-tuples or Mx3 numeric array of
                     non-negative integers representing indices into
                     the nodes array.

          georeference (optional): If specified coordinates are
          assumed to be relative to this origin.


        """

        if verbose: log.critical('General_mesh: Building basic mesh structure')

        self.use_inscribed_circle = use_inscribed_circle
         
        self.triangles = num.array(triangles, num.int)

        if verbose: 
            log.timingInfo("numTriangles, " + str(self.triangles.shape[0]))
       
        self.nodes = num.array(nodes, num.float)

        # Register number of elements and nodes
        self.number_of_triangles = N = self.triangles.shape[0]
        self.number_of_nodes = self.nodes.shape[0]


        # FIXME: this stores a geo_reference, but when coords are returned
        # This geo_ref is not taken into account!
        if geo_reference is None:
            self.geo_reference = Geo_reference()    # Use defaults
        else:
            self.geo_reference = geo_reference

        # Input checks
        msg = ('Triangles must an Mx3 numeric array or a sequence of 3-tuples. '
               'The supplied array has the shape: %s'
               % str(self.triangles.shape))
        assert len(self.triangles.shape) == 2, msg

        msg = ('Nodes must an Nx2 numeric array or a sequence of 2-tuples'
               'The supplied array has the shape: %s' % str(self.nodes.shape))
        assert len(self.nodes.shape) == 2, msg

        msg = 'Vertex indices reference non-existing coordinate sets'
        assert num.max(self.triangles) < self.nodes.shape[0], msg

        # FIXME: Maybe move to statistics?
        # Or use with get_extent
        xy_extent = [min(self.nodes[:,0]), min(self.nodes[:,1]),
                     max(self.nodes[:,0]), max(self.nodes[:,1])]

        self.xy_extent = num.array(xy_extent, num.float)

        # Allocate space for geometric quantities
        self.normals = num.zeros((N, 6), num.float)
        self.areas = num.zeros(N, num.float)
        self.edgelengths = num.zeros((N, 3), num.float)

        # Get x,y coordinates for all triangle vertices and store
        self.centroid_coordinates = num.zeros((N, 2), num.float)

        #Allocate space for geometric quantities
        self.radii = num.zeros(N, num.float)

        # Get x,y coordinates for all triangle vertices and store
        self.vertex_coordinates = V = self.compute_vertex_coordinates()

        # Get x,y coordinates for all triangle edge midpoints and store
        self.edge_midpoint_coordinates  = self.compute_edge_midpoint_coordinates()

        # Initialise each triangle
        if verbose:
            log.critical('General_mesh: Computing areas, normals, '
                         'edgelengths, centroids and radii')


        # Calculate Areas
        V0 = V[0:3*N:3, :]
        V1 = V[1:3*N:3, :]
        V2 = V[2:3*N:3, :]


        # Area
        x0 = V0[:,0]
        y0 = V0[:,1]
        x1 = V1[:,0]
        y1 = V1[:,1]
        x2 = V2[:,0]
        y2 = V2[:,1]

        self.areas[:] = -((x1*y0-x0*y1) + (x2*y1-x1*y2) + (x0*y2-x2*y0))/2.0
        
        #areas = -((x0-x1)*(y2-y1) - (y0-y1)*(x2-x1))/2.0

        #assert num.allclose(self.areas, areas)
        
        ind = num.where(self.areas <= 0.0)
        msg = 'Degenerate Triangle(s) '+str(ind[0])
        assert num.all(self.areas > 0.0), msg


        #print V.shape, V0.shape, V1.shape, V2.shape

#        #print E.shape, E[0:3*M:3, :].shape, E[1:3*M:3, :].shape, E[2:3*M:3, :].shape
#        E[0:3*M:3, :] = 0.5*(V1+V2)
#        E[1:3*M:3, :] = 0.5*(V2+V0)
#        E[2:3*M:3, :] = 0.5*(V0+V1)

        i0 = self.triangles[:,0]
        i1 = self.triangles[:,1]
        i2 = self.triangles[:,2]

        assert num.allclose( x0, self.nodes[i0,0] )
        assert num.allclose( y0, self.nodes[i0,1] )

        assert num.allclose( x1, self.nodes[i1,0] )
        assert num.allclose( y1, self.nodes[i1,1] )

        assert num.allclose( x2, self.nodes[i2,0] )
        assert num.allclose( y2, self.nodes[i2,1] )


        xn0 = x2-x1
        yn0 = y2-y1
        l0 = num.sqrt(xn0**2 + yn0**2)

        xn0 /= l0
        yn0 /= l0

        xn1 = x0-x2
        yn1 = y0-y2
        l1 = num.sqrt(xn1**2 + yn1**2)

        xn1 /= l1
        yn1 /= l1

        xn2 = x1-x0
        yn2 = y1-y0
        l2 = num.sqrt(xn2**2 + yn2**2)

        xn2 /= l2
        yn2 /= l2

        # Compute and store

        self.normals[:,0] =  yn0
        self.normals[:,1] = -xn0

        self.normals[:,2] =  yn1
        self.normals[:,3] = -xn1

        self.normals[:,4] =  yn2
        self.normals[:,5] = -xn2
        
        self.edgelengths[:,0] = l0
        self.edgelengths[:,1] = l1
        self.edgelengths[:,2] = l2

        self.centroid_coordinates[:,0] = (x0 + x1 + x2)/3
        self.centroid_coordinates[:,1] = (y0 + y1 + y2)/3



        if self.use_inscribed_circle == False:
            #OLD code. Computed radii may exceed that of an
            #inscribed circle

            #Midpoints
            xm0 = (x1 + x2)/2
            ym0 = (y1 + y2)/2

            xm1 = (x2 + x0)/2
            ym1 = (y2 + y0)/2

            xm2 = (x0 + x1)/2
            ym2 = (y0 + y1)/2


            #The radius is the distance from the centroid of
            #a triangle to the midpoint of the side of the triangle
            #closest to the centroid

            d0 = num.sqrt((self.centroid_coordinates[:,0] - xm0)**2 + (self.centroid_coordinates[:,1] - ym0)**2)
            d1 = num.sqrt((self.centroid_coordinates[:,0] - xm1)**2 + (self.centroid_coordinates[:,1] - ym1)**2)
            d2 = num.sqrt((self.centroid_coordinates[:,0] - xm2)**2 + (self.centroid_coordinates[:,1] - ym2)**2)


            self.radii[:] = num.minimum(num.minimum(d0, d1), d2)

        else:
            #NEW code added by Peter Row. True radius
            #of inscribed circle is computed

            a = num.sqrt((x0-x1)**2+(y0-y1)**2)
            b = num.sqrt((x1-x2)**2+(y1-y2)**2)
            c = num.sqrt((x2-x0)**2+(y2-y0)**2)

            self.radii[:]=2.0*self.areas/(a+b+c)



#        for i in range(N):
#            if verbose and i % ((N+10)/10) == 0: log.critical('(%d/%d)' % (i, N))
#
#            x0, y0 = V[3*i, :]
#            x1, y1 = V[3*i+1, :]
#            x2, y2 = V[3*i+2, :]
#
#
#            i0 = self.triangles[i][0]
#            i1 = self.triangles[i][1]
#            i2 = self.triangles[i][2]
#
##            assert x0 == self.nodes[i0][0]
##            assert y0 == self.nodes[i0][1]
##
##            assert x1 == self.nodes[i1][0]
##            assert y1 == self.nodes[i1][1]
##
##            assert x2 == self.nodes[i2][0]
##            assert y2 == self.nodes[i2][1]
#
##            # Area
##            self.areas[i] = abs((x1*y0-x0*y1) + (x2*y1-x1*y2) + (x0*y2-x2*y0))/2
##
##            msg = 'Triangle %g (%f,%f), (%f,%f), (%f, %f)' % (i,x0,y0,x1,y1,x2,y2)
##            msg += ' is degenerate:  area == %f' % self.areas[i]
##            assert self.areas[i] > 0.0, msg
#
#            # Normals
#            # The normal vectors
#            #   - point outward from each edge
#            #   - are orthogonal to the edge
#            #   - have unit length
#            #   - Are enumerated according to the opposite corner:
#            #     (First normal is associated with the edge opposite
#            #     the first vertex, etc)
#            #   - Stored as six floats n0x,n0y,n1x,n1y,n2x,n2y per triangle
#            n0 = num.array([x2-x1, y2-y1], num.float)
#            l0 = num.sqrt(num.sum(n0**2))
#
#            n1 = num.array([x0-x2, y0-y2], num.float)
#            l1 = num.sqrt(num.sum(n1**2))
#
#            n2 = num.array([x1-x0, y1-y0], num.float)
#            l2 = num.sqrt(num.sum(n2**2))
#
#            # Normalise
#            n0 /= l0
#            n1 /= l1
#            n2 /= l2
#
##            # Compute and store
##            self.normals[i, :] = [n0[1], -n0[0],
##                                  n1[1], -n1[0],
##                                  n2[1], -n2[0]]
#
#            # Edgelengths
#            #self.edgelengths[i, :] = [l0, l1, l2]
#
#
#
#            #Compute centroid
##            centroid = num.array([(x0 + x1 + x2)/3, (y0 + y1 + y2)/3], num.float)
###            self.centroid_coordinates[i] = centroid
##
##
##            if self.use_inscribed_circle == False:
##                #OLD code. Computed radii may exceed that of an
##                #inscribed circle
##
##                #Midpoints
##                m0 = num.array([(x1 + x2)/2, (y1 + y2)/2], num.float)
##                m1 = num.array([(x0 + x2)/2, (y0 + y2)/2], num.float)
##                m2 = num.array([(x1 + x0)/2, (y1 + y0)/2], num.float)
##
##                #The radius is the distance from the centroid of
##                #a triangle to the midpoint of the side of the triangle
##                #closest to the centroid
##                d0 = num.sqrt(num.sum( (centroid-m0)**2 ))
##                d1 = num.sqrt(num.sum( (centroid-m1)**2 ))
##                d2 = num.sqrt(num.sum( (centroid-m2)**2 ))
##
##                #self.radii[i] = min(d0, d1, d2)
##
##            else:
##                #NEW code added by Peter Row. True radius
##                #of inscribed circle is computed
##
##                a = num.sqrt((x0-x1)**2+(y0-y1)**2)
##                b = num.sqrt((x1-x2)**2+(y1-y2)**2)
##                c = num.sqrt((x2-x0)**2+(y2-y0)**2)
##
##                self.radii[i]=2.0*self.areas[i]/(a+b+c)


        # Build structure listing which triangles belong to which node.
        if verbose: log.critical('General Mesh: Building inverted triangle structure')
        self.build_inverted_triangle_structure()
        
        if verbose: log.timingInfo("aoi, '%s'" % self.get_area())
        

    def __len__(self):
        return self.number_of_triangles

    def __repr__(self):
        return ('Mesh: %d vertices, %d triangles'
                % (self.nodes.shape[0], len(self)))

    def get_normals(self):
        """Return all normal vectors.

        Return normal vectors for all triangles as an Nx6 array
        (ordered as x0, y0, x1, y1, x2, y2 for each triangle)
        """

        return self.normals

    def get_normal(self, i, j):
        """Return normal vector j of the i'th triangle.

        Return value is the numeric array slice [x, y]
        """

        return self.normals[i, 2*j:2*j+2]
        
    def get_edgelength(self, i, j):
        """Return length of j'th edge of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """
        return self.edgelengths[i, j]
                

    def get_number_of_triangles(self):
        return self.number_of_triangles

    
    def get_number_of_nodes(self):
        return self.number_of_nodes


    def get_nodes(self, absolute=False):
        """Return all nodes in mesh.

        The nodes are ordered in an Nx2 array where N is the number of nodes.
        This is the same format they were provided in the constructor
        i.e. without any duplication.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        (To see which, switch to default absolute=True and run tests).
        """

        N = self.number_of_nodes
        V = self.nodes[:N,:]
        if absolute is True:
            if not self.geo_reference.is_absolute():
                V = self.geo_reference.get_absolute(V)

        return V

    def get_node(self, i, absolute=False):
        """Return node coordinates for triangle i.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        (To see which, switch to default absolute=True and run tests).

        Note: This method returns a modified _copy_ of the nodes slice if 
              absolute is True.  If absolute is False, just return the slice.
              This is related to the ensure_numeric() returning a copy problem.
        """

        V = self.nodes[i,:]
        if absolute is True:
            if not self.geo_reference.is_absolute():
                # get a copy so as not to modify the internal self.nodes array
                V = copy.copy(V)
                V += num.array([self.geo_reference.get_xllcorner(),
                                self.geo_reference.get_yllcorner()], num.float)
        return V

    def get_vertex_coordinates(self, triangle_id=None, absolute=False):
        """Return vertex coordinates for all triangles.

        Return all vertex coordinates for all triangles as a 3*M x 2 array
        where the jth vertex of the ith triangle is located in row 3*i+j and
        M the number of triangles in the mesh.

        if triangle_id is specified (an integer) the 3 vertex coordinates
        for triangle_id are returned.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        """

        V = self.vertex_coordinates

        if triangle_id is None:
            if absolute is True:
                if not self.geo_reference.is_absolute():
                    V = self.geo_reference.get_absolute(V)
            return V
        else:
            i = triangle_id
            msg = 'triangle_id must be an integer'
            assert int(i) == i, msg
            assert 0 <= i < self.number_of_triangles

            i3 = 3*i
            if absolute is True and not self.geo_reference.is_absolute():
                offset=num.array([self.geo_reference.get_xllcorner(),
                                  self.geo_reference.get_yllcorner()], num.float)
                                  
                return V[i3:i3+3,:] + offset                                  
            else:
                return V[i3:i3+3,:]

    def get_vertex_coordinate(self, i, j, absolute=False):
        """Return coordinates for vertex j of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """

        msg = 'vertex id j must be an integer in [0,1,2]'
        assert j in [0,1,2], msg

        V = self.get_vertex_coordinates(triangle_id=i, absolute=absolute)
        return V[j,:]



    def compute_vertex_coordinates(self):
        """Return all vertex coordinates for all triangles as a 3*M x 2 array
        where the jth vertex of the ith triangle is located in row 3*i+j.

        This function is used to precompute this important structure. Use
        get_vertex coordinates to retrieve the points.
        """

        M = self.number_of_triangles
        vertex_coordinates = num.zeros((3*M, 2), num.float)

        k0 = self.triangles[:,0]
        k1 = self.triangles[:,1]
        k2 = self.triangles[:,2]

#        I = num.arange(M,dtype=num.int)
#
#        V0 = V[0:3*M:3, :]
#        V1 = V[1:3*M:3, :]
#        V2 = V[2:3*M:3, :]

        vertex_coordinates[0:3*M:3,:] = self.nodes[k0,:]
        vertex_coordinates[1:3*M:3,:] = self.nodes[k1,:]
        vertex_coordinates[2:3*M:3,:] = self.nodes[k2,:]

#        for i in range(M):
#            for j in range(3):
#                k = self.triangles[i,j] # Index of vertex j in triangle i
#                vertex_coordinates[3*i+j,:] = self.nodes[k]

        return vertex_coordinates


    def get_edge_midpoint_coordinates(self, triangle_id=None, absolute=False):
        """Return edge midpoint coordinates for all triangles or from particular triangle.

        Return all edge midpoint coordinates for all triangles as a 3*M x 2 array
        where the jth midpoint of the ith triangle is located in row 3*i+j and
        M the number of triangles in the mesh.

        if triangle_id is specified (an integer) the 3 midpoint coordinates
        for triangle_id are returned.

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        Default is False as many parts of ANUGA expects relative coordinates.
        """

        E = self.edge_midpoint_coordinates
        
        if triangle_id is None:
            if absolute is True:
                if not self.geo_reference.is_absolute():
                    E = self.geo_reference.get_absolute(E)
            return E
        else:
            i = triangle_id
            msg = 'triangle_id must be an integer'
            assert int(i) == i, msg
            assert 0 <= i < self.number_of_triangles

            i3 = 3*i
            if absolute is True and not self.geo_reference.is_absolute():
                offset=num.array([self.geo_reference.get_xllcorner(),
                                  self.geo_reference.get_yllcorner()], num.float)

                return E[i3:i3+3,:] + offset                                  
            else:
                return E[i3:i3+3,:]


    def get_edge_midpoint_coordinate(self, i, j, absolute=False):
        """Return coordinates for edge midpoint j of the i'th triangle.
        Return value is the numeric array slice [x, y]
        """

        msg = 'edge midpoint id j must be an integer in [0,1,2]'
        assert j in [0,1,2], msg

        E = self.get_edge_midpoint_coordinates(triangle_id=i, absolute=absolute)
        return E[j,:] # Return (x, y) for edge mid point

    
    def compute_edge_midpoint_coordinates(self):
        """Return all edge midpoint coordinates for all triangles as a 3*M x 2 array
        where the jth edge midpoint of the ith triangle is located in row 3*i+j.

        This function is used to precompute this important structure. Use
        get_edge_midpoint_coordinates to retrieve the points.

        Assumes that vertex_coordinates have been computed
        """

        M = self.number_of_triangles
        E = num.zeros((3*M, 2), num.float)

        V = self.vertex_coordinates

        V0 = V[0:3*M:3, :]
        V1 = V[1:3*M:3, :]
        V2 = V[2:3*M:3, :]

        
        #print V.shape, V0.shape, V1.shape, V2.shape

        #print E.shape, E[0:3*M:3, :].shape, E[1:3*M:3, :].shape, E[2:3*M:3, :].shape
        E[0:3*M:3, :] = 0.5*(V1+V2)
        E[1:3*M:3, :] = 0.5*(V2+V0)
        E[2:3*M:3, :] = 0.5*(V0+V1)

        return E



    def get_triangles(self, indices=None):
        """Get mesh triangles.

        Return Mx3 integer array where M is the number of triangles.
        Each row corresponds to one triangle and the three entries are
        indices into the mesh nodes which can be obtained using the method
        get_nodes()

        Optional argument, indices is the set of triangle ids of interest.
        """


        if indices is None:
            return self.triangles

        return num.take(self.triangles, indices, axis=0)

    def get_disconnected_triangles(self):
        """Get mesh based on nodes obtained from get_vertex_coordinates.

        Return array Mx3 array of integers where each row corresponds to
        a triangle. A triangle is a triplet of indices into
        point coordinates obtained from get_vertex_coordinates and each
        index appears only once

        This provides a mesh where no triangles share nodes
        (hence the name disconnected triangles) and different
        nodes may have the same coordinates.

        This version of the mesh is useful for storing meshes with
        discontinuities at each node and is e.g. used for storing
        data in sww files.

        The triangles created will have the format
        [[0,1,2],
         [3,4,5],
         [6,7,8],
         ...
         [3*M-3 3*M-2 3*M-1]]
        """

        M = len(self) # Number of triangles
        K = 3*M       # Total number of unique vertices
        return num.reshape(num.arange(K, dtype=num.int), (M,3))

    def get_unique_vertices(self, indices=None):
        """FIXME(Ole): This function needs a docstring"""

        triangles = self.get_triangles(indices=indices)
        unique_verts = {}
        for triangle in triangles:
            unique_verts[triangle[0]] = 0
            unique_verts[triangle[1]] = 0
            unique_verts[triangle[2]] = 0
        return unique_verts.keys()

        # Note Padarn 27/11/12:
        # This function was modified, but then it was deicded it was not
        # needed. It should be restored if it is used elsewhere in the code
        # (it was being used in quantity.py in the _set_vertex_values function).
        # Note however, the function in the head of the code is very slow and
        # could be easily sped up many fold.
    def get_triangles_and_vertices_per_node(self, node=None):
        """Get triangles associated with given node.

        Return list of triangle_ids, vertex_ids for specified node.
        If node in None or absent, this information will be returned
        for all nodes in a list L where L[v] is the triangle
        list for node v.
        """

        triangle_list = []
        if node is not None:
            # Get index for this node
            #first = num.sum(self.number_of_triangles_per_node[:node])

            first = self.node_index[node]
            # Get number of triangles for this node
            count = self.number_of_triangles_per_node[node]

            for i in range(count):
                index = self.vertex_value_indices[first+i]

                volume_id = index / 3
                vertex_id = index % 3

                triangle_list.append( (volume_id, vertex_id) )

            triangle_list = num.array(triangle_list, num.int)    #array default#
        else:
            # Get info for all nodes recursively.
            # If need be, we can speed this up by
            # working directly with the inverted triangle structure
            for i in range(self.number_of_nodes):
                L = self.get_triangles_and_vertices_per_node(node=i)
                triangle_list.append(L)

        return triangle_list

    def build_inverted_triangle_structure(self):
        """Build structure listing triangles belonging to each node

        Two arrays are created and store as mesh attributes

        number_of_triangles_per_node: An integer array of length N
        listing for each node how many triangles use it. N is the number of
        nodes in mesh.

        vertex_value_indices: An array of length M listing indices into
        triangles ordered by node number. The (triangle_id, vertex_id)
        pairs are obtained from each index as (index/3, index%3) or each
        index can be used directly into a flat triangles array. This
        is for example the case in the quantity.c where this structure is
        used to average vertex values efficiently.

        Example:
        a = [0.0, 0.0] # node 0
        b = [0.0, 2.0] # node 1
        c = [2.0, 0.0] # node 2
        d = [0.0, 4.0] # node 3
        e = [2.0, 2.0] # node 4
        f = [4.0, 0.0] # node 5
        nodes = array([a, b, c, d, e, f])

        #                    bac,     bce,     ecf,     dbe
        triangles = array([[1,0,2], [1,2,4], [4,2,5], [3,1,4]])

        For this structure:
        number_of_triangles_per_node = [1 3 3 1 3 1]
        which means that node a has 1 triangle associated with it, node b
        has 3, node has 3 and so on.

        vertex_value_indices = [ 1  0  3 10  2  4  7  9  5  6 11  8]
        which reflects the fact that
        node 0 is used by triangle 0, vertex 1 (index = 1)
        node 1 is used by triangle 0, vertex 0 (index = 0)
                   and by triangle 1, vertex 0 (index = 3)
                   and by triangle 3, vertex 1 (index = 10)
        node 2 is used by triangle 0, vertex 2 (index = 2)
                   and by triangle 1, vertex 1 (index = 4)
                   and by triangle 2, vertex 1 (index = 7)
        node 3 is used by triangle 3, vertex 0 (index = 9)
        node 4 is used by triangle 1, vertex 2 (index = 5)
                   and by triangle 2, vertex 0 (index = 6)
                   and by triangle 3, vertex 2 (index = 11)
        node 5 is used by triangle 2, vertex 2 (index = 8)

        Preconditions:
          self.nodes and self.triangles are defined

        Postcondition:
          self.number_of_triangles_per_node is built
          self.vertex_value_indices is built
        """

        # Count number of triangles per node
#        number_of_triangles_per_node = num.zeros(self.number_of_nodes,
#                                                 num.int)       #array default#
#        for volume_id, triangle in enumerate(self.get_triangles()):
#            for vertex_id in triangle:
#                number_of_triangles_per_node[vertex_id] += 1

        # Need to pad number_of_triangles_per_node in case lone nodes at end of list
        #number_of_triangles_per_node = num.zeros(self.number_of_nodes, num.int)

        number_of_triangles_per_node = num.bincount(self.triangles.flat)

        number_of_lone_nodes = self.number_of_nodes - len(number_of_triangles_per_node)

        #print number_of_lone_nodes
        if number_of_lone_nodes > 0:
            number_of_triangles_per_node =  \
               num.append(number_of_triangles_per_node,num.zeros(number_of_lone_nodes,num.int))

        #assert num.allclose(number_of_triangles_per_node_new, number_of_triangles_per_node)

        # Allocate space for inverted structure
        number_of_entries = num.sum(number_of_triangles_per_node)

        assert number_of_entries == 3*self.number_of_triangles
        
        #vertex_value_indices = num.zeros(number_of_entries, num.int) #array default#

        # Array of vertex_indices (3*vol_id+vertex_id) sorted into contiguous
        # order around each node. Use with number_of_triangles_per_node to
        # find vertices associated with a node.
        # ie There are  number_of_triangles_per_node[i] vertices
        vertex_value_indices = num.argsort(self.triangles.flat)
        #vertex_value_indices = num.argsort(self.triangles.flatten())

#        node_index = num.zeros((self.number_of_nodes)+1, dtype = num.int)
#        node_index[0] = 0
#        for i in xrange(self.number_of_nodes):
#            node_index[i+1] = node_index[i] + number_of_triangles_per_node[i]

        node_index = num.zeros((self.number_of_nodes)+1, dtype = num.int)
        node_index[1:] = num.cumsum(number_of_triangles_per_node)

        #assert num.allclose(node_index,node_index_new)




        # Save structures
        self.node_index = node_index
        self.number_of_triangles_per_node = number_of_triangles_per_node
        self.vertex_value_indices = vertex_value_indices

    def get_extent(self, absolute=False):
        """Return min and max of all x and y coordinates

        Boolean keyword argument absolute determines whether coordinates
        are to be made absolute by taking georeference into account
        """

        C = self.get_vertex_coordinates(absolute=absolute)
        X = C[:,0:6:2].copy()
        Y = C[:,1:6:2].copy()

        xmin = num.min(X)
        xmax = num.max(X)
        ymin = num.min(Y)
        ymax = num.max(Y)

        return xmin, xmax, ymin, ymax

    def get_areas(self):
        """Get areas of all individual triangles."""

        return self.areas

    def get_area(self):
        """Return total area of mesh"""

        return num.sum(self.areas)

    def set_georeference(self, g):
        self.geo_reference = g

    def get_georeference(self):
        return self.geo_reference
def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
                                use_centroid_values=True,
                                verbose=False):
    """Compute maximum run up height from sww file.

    filename             path to SWW file to read
    polygon              if specified resrict to points inside this polygon
                         assumed absolute coordinates and in same zone as
                         domain
    time_interval        if specified resrict to within the period specified
    use_centroid_values 
    verbose              True if this function is to be verbose

    Returns (maximal_runup, maximal_runup_location).

    Usage:
    runup, location = get_maximum_inundation_data(filename,
                                                  polygon=None,
                                                  time_interval=None,
                                                  verbose=False)

    Algorithm is as in get_maximum_inundation_elevation from
    shallow_water_domain except that this function works with the SWW file and
    computes the maximal runup height over multiple timesteps.

    If no inundation is found within polygon and time_interval the return value
    is None signifying "No Runup" or "Everything is dry".
    """

    # We are using nodal values here as that is what is stored in sww files.

    # Water depth below which it is considered to be 0 in the model
    # FIXME (Ole): Allow this to be specified as a keyword argument as well

    from anuga.geometry.polygon import inside_polygon
    from anuga.config import minimum_allowed_height
    from anuga.file.netcdf import NetCDFFile

    dir, base = os.path.split(filename)

    iterate_over = get_all_swwfiles(dir, base)

    if verbose:
        print iterate_over
        
    # Read sww file
    if verbose: log.critical('Reading from %s' % filename)
    # FIXME: Use general swwstats (when done)

    maximal_runup = None
    maximal_runup_location = None

    for _, swwfile in enumerate (iterate_over):
        # Read sww file
        filename = os.path.join(dir, swwfile+'.sww')

        if verbose: log.critical('Reading from %s' % filename)
        # FIXME: Use general swwstats (when done)

        fid = NetCDFFile(filename)

        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError:
            geo_reference = Geo_reference() # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()

        # Get extent
        volumes = fid.variables['volumes'][:]
        x = fid.variables['x'][:] + xllcorner
        y = fid.variables['y'][:] + yllcorner

        # Get the relevant quantities (Convert from single precison)
        elevation = num.array(fid.variables['elevation'][:], num.float)
        stage = num.array(fid.variables['stage'][:], num.float)

        if verbose:
            print 'stage.shape ',stage.shape
            print 'elevation.shape ',elevation.shape
            
        # Here's where one could convert nodal information to centroid
        # information but is probably something we need to write in C.
        # Here's a Python thought which is NOT finished!!!
        if use_centroid_values is True:
            vols0=volumes[:,0]
            vols1=volumes[:,1]
            vols2=volumes[:,2]
            # Then use these to compute centroid averages 
            x=(x[vols0]+x[vols1]+x[vols2])/3.0
            y=(y[vols0]+y[vols1]+y[vols2])/3.0

            elevation=(elevation[vols0]+elevation[vols1]+elevation[vols2])/3.0
            stage=(stage[:,vols0]+stage[:,vols1]+stage[:,vols2])/3.0

        # Spatial restriction
        if polygon is not None:
            msg = 'polygon must be a sequence of points.'
            assert len(polygon[0]) == 2, msg
            # FIXME (Ole): Make a generic polygon input check in polygon.py
            # and call it here
            points = num.ascontiguousarray(num.concatenate((x[:, num.newaxis],
                                                            y[:, num.newaxis]),
                                                            axis=1))
            point_indices = inside_polygon(points, polygon)

            # Restrict quantities to polygon
            elevation = num.take(elevation, point_indices, axis=0)
            stage = num.take(stage, point_indices, axis=1)

            # Get info for location of maximal runup
            points_in_polygon = num.take(points, point_indices, axis=0)

            x = points_in_polygon[:,0]
            y = points_in_polygon[:,1]
        else:
            # Take all points
            point_indices = num.arange(len(x))

        # Temporal restriction
        time = fid.variables['time'][:]
        if verbose:
            print time
        all_timeindices = num.arange(len(time))
        if time_interval is not None:
            msg = 'time_interval must be a sequence of length 2.'
            assert len(time_interval) == 2, msg
            msg = 'time_interval %s must not be decreasing.' % time_interval
            assert time_interval[1] >= time_interval[0], msg
            msg = 'Specified time interval [%.8f:%.8f] ' % tuple(time_interval)
            msg += 'must does not match model time interval: [%.8f, %.8f]\n' \
                   % (time[0], time[-1])
            if time_interval[1] < time[0]:
                fid.close()
                raise ValueError(msg)
            if time_interval[0] > time[-1]:
                fid.close()
                raise ValueError(msg)

            # Take time indices corresponding to interval (& is bitwise AND)
            timesteps = num.compress((time_interval[0] <= time) \
                                     & (time <= time_interval[1]),
                                     all_timeindices)

            msg = 'time_interval %s did not include any model timesteps.' \
                  % time_interval
            assert not num.alltrue(timesteps == 0), msg
        else:
            # Take them all
            timesteps = all_timeindices

        fid.close()

        # Compute maximal runup for each timestep
        #maximal_runup = None
        #maximal_runup_location = None
        #maximal_runups = [None]
        #maximal_runup_locations = [None]



            
        for i in timesteps:
            ## if use_centroid_values is True:
            ##     stage_i  = stage[i,:]
            ## else:
            ##     stage_i = stage[i,:]

            stage_i = stage[i,:]
            depth = stage_i - elevation

            if verbose:
                print '++++++++'
            # Get wet nodes i.e. nodes with depth>0 within given region
            # and timesteps
            wet_nodes = num.where(depth > 0.0)[0]


            if verbose:
                print stage_i.shape
                print num.max(stage_i)
                #print max(wet_elevation)


            if num.alltrue(wet_nodes == 0):
                runup = None
            else:
                # Find maximum elevation among wet nodes
                wet_elevation = num.take(elevation, wet_nodes, axis=0)


                if verbose:
                    pass
                    #print wet_elevation
                    
                runup_index = num.argmax(wet_elevation)
                runup = max(wet_elevation)
                if verbose:
                    print 'max(wet_elevation) ',max(wet_elevation)
                assert wet_elevation[runup_index] == runup       # Must be True

            if runup > maximal_runup:
                maximal_runup = runup      # works even if maximal_runup is None

                # Record location
                wet_x = num.take(x, wet_nodes, axis=0)
                wet_y = num.take(y, wet_nodes, axis=0)
                maximal_runup_location =    [wet_x[runup_index], \
                                            wet_y[runup_index]]
            if verbose:
                print i, runup

    return maximal_runup, maximal_runup_location
def sww2array(
        name_in,
        quantity=None,  # defaults to elevation
        reduction=None,
        cellsize=10,
        number_of_decimal_places=None,
        NODATA_value=-9999.0,
        easting_min=None,
        easting_max=None,
        northing_min=None,
        northing_max=None,
        verbose=False,
        origin=None,
        datum='WGS84',
        block_size=None):
    """Read SWW file and convert to a numpy array (can be stored to a png file later)


    The parameter quantity must be the name of an existing quantity or
    an expression involving existing quantities. The default is
    'elevation'. Quantity is not a list of quantities.

    If reduction is given and it's an index, sww2array will output the quantity at that time-step.
    If reduction is given and it's a built in function (eg max, min, mean), then that 
    function is used to reduce the quantity over all time-steps. If reduction is not given, 
    reduction is set to "max" by default.

    datum


    block_size - sets the number of slices along the non-time axis to
                 process in one block.
    """

    import sys
    import types

    from anuga.geometry.polygon import inside_polygon, outside_polygon
    from anuga.abstract_2d_finite_volumes.util import \
         apply_expression_to_dictionary

    basename_in, in_ext = os.path.splitext(name_in)

    if in_ext != '.sww':
        raise IOError('Input format for %s must be .sww' % name_in)

    false_easting = 500000
    false_northing = 10000000

    if quantity is None:
        quantity = 'elevation'

    if reduction is None:
        reduction = max

    if quantity in quantity_formula:
        quantity = quantity_formula[quantity]

    if number_of_decimal_places is None:
        number_of_decimal_places = 3

    if block_size is None:
        block_size = DEFAULT_BLOCK_SIZE

    assert (isinstance(block_size, (int, int, float)))

    # Read sww file
    if verbose:
        log.critical('Reading from %s' % name_in)

    from anuga.file.netcdf import NetCDFFile
    fid = NetCDFFile(name_in)

    #Get extent and reference
    x = num.array(fid.variables['x'], num.float)
    y = num.array(fid.variables['y'], num.float)
    volumes = num.array(fid.variables['volumes'], num.int)
    if type(reduction) is not types.BuiltinFunctionType:
        times = fid.variables['time'][reduction]
    else:
        times = fid.variables['time'][:]

    number_of_timesteps = fid.dimensions['number_of_timesteps']
    number_of_points = fid.dimensions['number_of_points']

    if origin is None:
        # Get geo_reference
        # sww files don't have to have a geo_ref
        try:
            geo_reference = Geo_reference(NetCDFObject=fid)
        except AttributeError as e:
            geo_reference = Geo_reference()  # Default georef object

        xllcorner = geo_reference.get_xllcorner()
        yllcorner = geo_reference.get_yllcorner()
        zone = geo_reference.get_zone()
    else:
        zone = origin[0]
        xllcorner = origin[1]
        yllcorner = origin[2]

    # FIXME: Refactor using code from Interpolation_function.statistics
    # (in interpolate.py)
    # Something like print swwstats(swwname)
    if verbose:
        log.critical('------------------------------------------------')
        log.critical('Statistics of SWW file:')
        log.critical('  Name: %s' % name_in)
        log.critical('  Reference:')
        log.critical('    Lower left corner: [%f, %f]' %
                     (xllcorner, yllcorner))
        if type(reduction) is not types.BuiltinFunctionType:
            log.critical('    Time: %f' % times)
        else:
            log.critical('    Start time: %f' % fid.starttime[0])
        log.critical('  Extent:')
        log.critical('    x [m] in [%f, %f], len(x) == %d' %
                     (num.min(x), num.max(x), len(x.flat)))
        log.critical('    y [m] in [%f, %f], len(y) == %d' %
                     (num.min(y), num.max(y), len(y.flat)))
        if type(reduction) is not types.BuiltinFunctionType:
            log.critical('    t [s] = %f, len(t) == %d' % (times, 1))
        else:
            log.critical('    t [s] in [%f, %f], len(t) == %d' %
                         (min(times), max(times), len(times)))
        log.critical('  Quantities [SI units]:')

        # Comment out for reduced memory consumption
        for name in ['stage', 'xmomentum', 'ymomentum']:
            q = fid.variables[name][:].flatten()
            if type(reduction) is not types.BuiltinFunctionType:
                q = q[reduction * len(x):(reduction + 1) * len(x)]
            if verbose:
                log.critical('    %s in [%f, %f]' % (name, min(q), max(q)))
        for name in ['elevation']:
            q = fid.variables[name][:].flatten()
            if verbose:
                log.critical('    %s in [%f, %f]' % (name, min(q), max(q)))

    # Get the variables in the supplied expression.
    # This may throw a SyntaxError exception.
    var_list = get_vars_in_expression(quantity)

    # Check that we have the required variables in the SWW file.
    missing_vars = []
    for name in var_list:
        try:
            _ = fid.variables[name]
        except KeyError:
            missing_vars.append(name)
    if missing_vars:
        msg = (
            "In expression '%s', variables %s are not in the SWW file '%s'" %
            (quantity, str(missing_vars), name_in))
        raise_(Exception, msg)

    # Create result array and start filling, block by block.
    result = num.zeros(number_of_points, num.float)

    if verbose:
        msg = 'Slicing sww file, num points: ' + str(number_of_points)
        msg += ', block size: ' + str(block_size)
        log.critical(msg)

    for start_slice in range(0, number_of_points, block_size):
        # Limit slice size to array end if at last block
        end_slice = min(start_slice + block_size, number_of_points)

        # Get slices of all required variables
        if type(reduction) is not types.BuiltinFunctionType:
            q_dict = {}
            for name in var_list:
                # check if variable has time axis
                if len(fid.variables[name].shape) == 2:
                    print('avoiding large array')
                    q_dict[name] = fid.variables[name][reduction,
                                                       start_slice:end_slice]
                else:  # no time axis
                    q_dict[name] = fid.variables[name][start_slice:end_slice]

            # Evaluate expression with quantities found in SWW file
            res = apply_expression_to_dictionary(quantity, q_dict)

#            if len(res.shape) == 2:
#                new_res = num.zeros(res.shape[1], num.float)
#                for k in xrange(res.shape[1]):
#                    if type(reduction) is not types.BuiltinFunctionType:
#                        new_res[k] = res[k]
#                    else:
#                        new_res[k] = reduction(res[:,k])
#                res = new_res
        else:
            q_dict = {}
            for name in var_list:
                # check if variable has time axis
                if len(fid.variables[name].shape) == 2:
                    q_dict[name] = fid.variables[name][:,
                                                       start_slice:end_slice]
                else:  # no time axis
                    q_dict[name] = fid.variables[name][start_slice:end_slice]

            # Evaluate expression with quantities found in SWW file
            res = apply_expression_to_dictionary(quantity, q_dict)

            if len(res.shape) == 2:
                new_res = num.zeros(res.shape[1], num.float)
                for k in range(res.shape[1]):
                    if type(reduction) is not types.BuiltinFunctionType:
                        new_res[k] = res[reduction, k]
                    else:
                        new_res[k] = reduction(res[:, k])
                res = new_res

        result[start_slice:end_slice] = res

    # Post condition: Now q has dimension: number_of_points
    assert len(result.shape) == 1
    assert result.shape[0] == number_of_points

    if verbose:
        log.critical('Processed values for %s are in [%f, %f]' %
                     (quantity, min(result), max(result)))

    # Create grid and update xll/yll corner and x,y
    # Relative extent
    if easting_min is None:
        xmin = min(x)
    else:
        xmin = easting_min - xllcorner

    if easting_max is None:
        xmax = max(x)
    else:
        xmax = easting_max - xllcorner

    if northing_min is None:
        ymin = min(y)
    else:
        ymin = northing_min - yllcorner

    if northing_max is None:
        ymax = max(y)
    else:
        ymax = northing_max - yllcorner

    msg = 'xmax must be greater than or equal to xmin.\n'
    msg += 'I got xmin = %f, xmax = %f' % (xmin, xmax)
    assert xmax >= xmin, msg

    msg = 'ymax must be greater than or equal to xmin.\n'
    msg += 'I got ymin = %f, ymax = %f' % (ymin, ymax)
    assert ymax >= ymin, msg

    if verbose: log.critical('Creating grid')
    ncols = int(old_div((xmax - xmin), cellsize)) + 1
    nrows = int(old_div((ymax - ymin), cellsize)) + 1

    # New absolute reference and coordinates
    newxllcorner = xmin + xllcorner
    newyllcorner = ymin + yllcorner

    x = x + xllcorner - newxllcorner
    y = y + yllcorner - newyllcorner

    grid_values = num.zeros((nrows * ncols, ), num.float)
    #print '---',grid_values.shape

    num_tri = len(volumes)
    norms = num.zeros(6 * num_tri, num.float)

    #Use fasr method to calc grid values
    from .calc_grid_values_ext import calc_grid_values

    calc_grid_values(nrows, ncols, cellsize, NODATA_value, x, y, norms,
                     volumes, result, grid_values)

    fid.close()

    #print outside_indices

    if verbose:
        log.critical('Interpolated values are in [%f, %f]' %
                     (num.min(grid_values), num.max(grid_values)))

    return x, y, grid_values.reshape(nrows, ncols)[::-1, :]