Esempio n. 1
0
    def __init__(self, filename, make_directories=True, output_log=None):
        """Parse the input data then process it for ANUGA

            @param filename = configuration file (xls)
            @param make_directories Create output directories for simulation           
            @param output_log filename to redirect stdout (inside output
                directories)

        """
        # Get the 'raw' data
        ProjectData.__init__(self, filename)

        # Create a unique output directory, and redirect stdout there
        self.define_output_directory_and_redirect_stdout(
            make_directories=make_directories,
            output_log=output_log)

        # Read files / pre-process
        self.process_project_data()

        if make_directories:
            # Make other required directories, copy code files, etc
            self.setup_directory_structure()

            # Export spatial data to some text
            self.export_spatial_data_as_text()

        barrier()
    def __init__(self, filename, make_directories=True, output_log=None):
        """Parse the input data then process it for ANUGA

            @param filename = configuration file (xls)
            @param make_directories Create output directories for simulation           
            @param output_log filename to redirect stdout (inside output
                directories)

        """
        # Get the 'raw' data
        ProjectData.__init__(self, filename)

        # Create a unique output directory, and redirect stdout there
        self.define_output_directory_and_redirect_stdout(
            make_directories=make_directories, output_log=output_log)

        # Read files / pre-process
        self.process_project_data()

        if make_directories:
            # Make other required directories, copy code files, etc
            self.setup_directory_structure()

            # Export spatial data to some text
            self.export_spatial_data_as_text()

        barrier()
Esempio n. 3
0
def print_operator_inputs(domain):
    """

        Utility for printing discharge information for some operators

    """

    # Shorthand notation
    operators = domain.fractional_step_operators

    # Rainfall first
    if myid == 0:
        for i in range(len(operators)):
            if hasattr(operators[i], 'rate'):
                print '    Operator ' + operators[i].label + \
                      ' rate = ' + str(operators[i].rate(domain.time))

    barrier()

    # Inlets
    for i in range(len(operators)):
        if hasattr(operators[i], 'applied_Q'):
            print '    Operator ' + operators[i].label + \
                  ' Q = ' + str(operators[i].applied_Q)
    barrier()

    if myid == 0:
        print ' '

    return
Esempio n. 4
0
def print_velocity_statistics(domain, max_quantities):
    """
    Utility for printing velocity stats in the evolve loop
    """

    # Print velocity statistics

    for i in range(numprocs):
        if myid == i:

            # Peak speed info

            xx = domain.quantities['xmomentum'].centroid_values
            yy = domain.quantities['ymomentum'].centroid_values
            dd = domain.quantities['stage'].centroid_values \
                - domain.quantities['elevation'].centroid_values
            dd = dd * (dd > 1.0e-03) + 1.0e-03 * (dd <= 1.0e-03)
            vv = 1 / dd * (xx ** 2 + yy ** 2) ** 0.5
            vv = vv * (dd > 1.0e-03)
            print '    Processor ', myid
            print '    @ Peak velocity is: ', vv.max(), vv.argmax()
            print '     &- MaxSpeedHistory: ', \
                max_quantities.max_speed.max()
            print '     %- FUF: ', domain.flux_update_frequency.mean()
        else:
            pass
        barrier()

    # Make a newline

    if myid == 0:
        print ''

    return
    def define_output_directory_and_redirect_stdout(self,
                                                    make_directories=True,
                                                    output_log=None):
        """Make the main output directory, and redirect stdout to a file there

            @param output_log Name of file (stored inside the output directory)
                    which will hold stdout

        """
        if myid == 0:
            runtime = time.strftime('%Y%m%d_%H%M%S', time.localtime())
            for i in range(1, numprocs):
                send(runtime, i)
        else:
            runtime = receive(0)
        barrier()

        self.output_dir = self.output_basedir + 'RUN_' + str(runtime) +\
            '_' + self.scenario

        if ((make_directories) and (myid == 0)):
            try:
                os.mkdir(self.output_basedir)
            except:
                pass

            # Make the output directory

            try:
                os.mkdir(self.output_dir)
            except:
                pass

            print 'OUTPUT_DIRECTORY: ' + str(self.output_dir)

        # Send stdout to a file inside the output directory
        if output_log is not None:
            if make_directories:
                stdout_file = self.output_dir + '/' + output_log
            else:
                stdout_file = output_log

            if myid == 0:
                print 'Redirecting output now to ' + stdout_file
                sys.stdout = Logger(stdout_file)
            barrier()

            if myid != 0:
                sys.stdout = Logger(stdout_file)

        return
Esempio n. 6
0
    def define_output_directory_and_redirect_stdout(self,
                                                    make_directories=True, 
                                                    output_log=None):
        """Make the main output directory, and redirect stdout to a file there

            @param output_log Name of file (stored inside the output directory)
                    which will hold stdout

        """
        if myid == 0:
            runtime = time.strftime('%Y%m%d_%H%M%S', time.localtime())
            for i in range(1, numprocs):
                send(runtime, i)
        else:
            runtime = receive(0)
        barrier()

        self.output_dir = self.output_basedir + 'RUN_' + str(runtime) +\
            '_' + self.scenario

        if ((make_directories) and (myid == 0)):
            try:
                os.mkdir(self.output_basedir)
            except:
                pass

            # Make the output directory

            try:
                os.mkdir(self.output_dir)
            except:
                pass

            print 'OUTPUT_DIRECTORY: ' + str(self.output_dir)

        # Send stdout to a file inside the output directory
        if output_log is not None:
            if make_directories:
                stdout_file = self.output_dir + '/' + output_log
            else:
                stdout_file = output_log

            if myid == 0:
                print 'Redirecting output now to ' + stdout_file
                sys.stdout = Logger(stdout_file)
            barrier()

            if myid != 0:
                sys.stdout = Logger(stdout_file)

        return
                                                  'chan_in': [6],
                                                  'bottom2': [7] },
                                   maximum_triangle_area = 1.0e+06, #0.5*l0*l0,
                                   minimum_triangle_angle = 28.0,
                                   filename = 'channel_floodplain1.msh',
                                   interior_regions = [ ],
                                   breaklines=breakLines.values(),
                                   regionPtArea=regionPtAreas,
                                   verbose=True)
    domain=anuga.create_domain_from_file('channel_floodplain1.msh')
    domain.set_name('channel_floodplain1') # Output name
    domain.set_flow_algorithm(alg)
else:
    domain=None

barrier()
domain=distribute(domain)
barrier()

domain.set_store_vertices_uniquely(True)

#------------------------------------------------------------------------------
#
# Setup initial conditions
#
#------------------------------------------------------------------------------

# Function for topography
def topography(x, y):
    # Longitudinally sloping floodplain with channel in centre
    elev1= -y*floodplain_slope - chan_bankfull_depth*\
Esempio n. 8
0
            'bottom2': [7]
        },
        maximum_triangle_area=1.0e+06,  #0.5*l0*l0,
        minimum_triangle_angle=28.0,
        filename='channel_floodplain1.msh',
        interior_regions=[],
        breaklines=breakLines.values(),
        regionPtArea=regionPtAreas,
        verbose=True)
    domain = anuga.create_domain_from_file('channel_floodplain1.msh')
    domain.set_name('channel_floodplain1')  # Output name
    domain.set_flow_algorithm(alg)
else:
    domain = None

barrier()
domain = distribute(domain)
barrier()

domain.set_store_vertices_uniquely(True)

#------------------------------------------------------------------------------
#
# Setup initial conditions
#
#------------------------------------------------------------------------------


# Function for topography
def topography(x, y):
    # Longitudinally sloping floodplain with channel in centre
Esempio n. 9
0
def setup_mesh(project, setup_initial_conditions=None):
    """
    Code to make the mesh (initial domain)

    The geometry is made on processor 0, then dumped and reloaded
    This reduces the memory demands

    INPUT: project == the project module

    OUTPUT: domain
    """

    if myid == 0:

        if verbose:
            print "Hello from processor ", myid

        #
        # HERE, WE MAKE/PARTITION/READ THE MESH
        # This can lead to memory savings in parallel runs
        # (possibly because of limitations of python memory management)
        #

        # Let's see if we have already pickled this domain

        pickle_name = "domain" + "_P%g_%g.pickle" % (1, 0)
        pickle_name = join(project.partition_dir, pickle_name)

        if os.path.exists(pickle_name):
            if verbose:
                print "Saved domain seems to already exist"
        else:
            if verbose:
                print "CREATING PARTITIONED DOMAIN"
            domain = build_mesh(project)

            if setup_initial_conditions is not None:
                # Set the initial conditions in serial
                setup_initial_conditions.setup_initial_conditions(domain, project)

            # If pypar is not available don't use sequential_distribute stuff
            # (it will fail)

            if pypar_available:
                if verbose:
                    print "Saving Domain"
                sequential_distribute_dump(domain, 1, partition_dir=project.partition_dir, verbose=verbose)

        # If pypar is not available don't use sequential_distribute stuff (it
        # will fail)

        if pypar_available:

            # Now partition the domain

            par_pickle_name = "domain" + "_P%g_%g.pickle" % (numprocs, 0)
            par_pickle_name = join(project.partition_dir, par_pickle_name)
            if os.path.exists(par_pickle_name):
                if verbose:
                    print "Saved partitioned domain seems to already exist"
            else:
                if verbose:
                    print "Load in saved sequential pickled domain"
                domain = sequential_distribute_load_pickle_file(pickle_name, np=1, verbose=verbose)

                if verbose:
                    print "Dump partitioned domains"
                sequential_distribute_dump(domain, numprocs, partition_dir=project.partition_dir, verbose=verbose)

            # This can reduce the memory demands if the domain was made above
            # Even better to have an existing partition (i.e. stop here and
            # rerun)

            domain = None
            gc.collect()
    else:

        domain = None
        if verbose:
            print "Hello from processor ", myid

    barrier()

    # print 'Distributing domain'
    # domain=distribute(domain)
    # barrier()

    # If pypar is not available don't use sequential_distribute stuff (it will
    # fail)

    if pypar_available:
        if myid == 0:
            print "LOADING PARTITIONED DOMAIN"

        domain = sequential_distribute_load(filename=join(project.partition_dir, "domain"), verbose=verbose)

    # #########################################################################
    # Set output directories
    # #########################################################################

    domain.set_name(project.scenario)  # Name of sww file
    domain.set_datadir(project.output_dir)  # Store sww output here

    # Needs more changes for this to work
    # domain.set_checkpointing(checkpoint_time=project.checkpoint_time)

    # #########################################################################
    # Miscellanious numerics
    # #########################################################################

    domain.set_flow_algorithm(project.flow_algorithm)

    # Force zero beta values [hopefully integrated into source]
    # print 'Warning: Forcing everything to first order'
    # domain.beta_w=0.
    # domain.beta_uh=0.
    # domain.beta_vh=0.
    # domain.beta_w_dry=0.
    # domain.beta_uh_dry=0.
    # domain.beta_vh_dry=0.

    # Adjust velocity computation for max quantities
    # domain.velocity_protection=1.0e-05

    # Adjust CFL
    # domain.set_CFL(0.9)

    # Optionally store vertex values uniquely (large file sizes!)

    domain.set_store_vertices_uniquely(project.store_vertices_uniquely)

    if project.use_local_extrapolation_and_flux_updating:
        domain.set_local_extrapolation_and_flux_updating()

    if project.store_elevation_every_timestep:
        domain.quantities_to_be_stored["elevation"] = 2
    else:
        domain.quantities_to_be_stored["elevation"] = 1

    return domain
    def sequential_time_varying_file_boundary_sts(self):
        """sequential_ltest_time_varying_file_boundary_sts_sequential(self):
        Read correct points from ordering file and apply sts to boundary. The boundary is time varying. FIXME add to test_urs2sts.
        """
        lat_long_points = [[6.01, 97.0], [6.02, 97.0], [6.05, 96.9],
                           [6.0, 97.0]]
        bounding_polygon = [[6.0, 97.0], [6.01, 97.0], [6.02, 97.0],
                            [6.02, 97.02], [6.00, 97.02]]
        tide = 3.0
        time_step_count = 65
        time_step = 2.
        n = len(lat_long_points)
        first_tstep = num.ones(n, num.int)
        last_tstep = (time_step_count) * num.ones(n, num.int)
        finaltime = num.float(time_step * (time_step_count - 1))
        yieldstep = num.float(time_step)
        gauge_depth = 20 * num.ones(n, num.float)
        ha = 2 * num.ones((n, time_step_count), num.float)
        ua = 10 * num.ones((n, time_step_count), num.float)
        va = -10 * num.ones((n, time_step_count), num.float)

        times = num.arange(0., num.float(time_step_count * time_step),
                           time_step)
        for i in range(n):
            #ha[i]+=num.sin(times)
            ha[i] += times / finaltime

        sts_file = "test"
        if myid == 0:
            base_name, files = self.write_mux2(lat_long_points,
                                               time_step_count,
                                               time_step,
                                               first_tstep,
                                               last_tstep,
                                               depth=gauge_depth,
                                               ha=ha,
                                               ua=ua,
                                               va=va)
            # base name will not exist, but 3 other files are created

            # Write order file
            file_handle, order_base_name = tempfile.mkstemp("")
            os.close(file_handle)
            os.remove(order_base_name)
            d = ","
            order_file = order_base_name + 'order.txt'
            fid = open(order_file, 'w')

            # Write Header
            header = 'index, longitude, latitude\n'
            fid.write(header)
            indices = [3, 0, 1]
            for i in indices:
                line=str(i)+d+str(lat_long_points[i][1])+d+\
                    str(lat_long_points[i][0])+"\n"
                fid.write(line)
            fid.close()

            urs2sts(base_name,
                    basename_out=sts_file,
                    ordering_filename=order_file,
                    mean_stage=tide,
                    verbose=verbose)
            self.delete_mux(files)

            assert (os.access(sts_file + '.sts', os.F_OK))

            os.remove(order_file)

        barrier()
        boundary_polygon = create_sts_boundary(sts_file)

        # Append the remaining part of the boundary polygon to be defined by
        # the user
        bounding_polygon_utm = []
        for point in bounding_polygon:
            zone, easting, northing = redfearn(point[0], point[1])
            bounding_polygon_utm.append([easting, northing])

        boundary_polygon.append(bounding_polygon_utm[3])
        boundary_polygon.append(bounding_polygon_utm[4])

        assert num.allclose(bounding_polygon_utm, boundary_polygon)

        extent_res = 1000000
        meshname = 'urs_test_mesh' + '.tsh'
        interior_regions = None
        boundary_tags = {'ocean': [0, 1], 'otherocean': [2, 3, 4]}

        # have to change boundary tags from last example because now bounding
        # polygon starts in different place.
        if myid == 0:
            create_mesh_from_regions(boundary_polygon,
                                     boundary_tags=boundary_tags,
                                     maximum_triangle_area=extent_res,
                                     filename=meshname,
                                     interior_regions=interior_regions,
                                     verbose=verbose)

        barrier()

        domain_fbound = Domain(meshname)
        domain_fbound.set_quantities_to_be_stored(None)
        domain_fbound.set_quantity('stage', tide)
        if verbose: print "Creating file boundary condition"
        Bf = File_boundary(sts_file + '.sts',
                           domain_fbound,
                           boundary_polygon=boundary_polygon)
        Br = Reflective_boundary(domain_fbound)

        domain_fbound.set_boundary({'ocean': Bf, 'otherocean': Br})

        temp_fbound = num.zeros(int(finaltime / yieldstep) + 1, num.float)
        if verbose: print "Evolving domain with file boundary condition"
        for i, t in enumerate(
                domain_fbound.evolve(yieldstep=yieldstep,
                                     finaltime=finaltime,
                                     skip_initial_step=False)):
            temp_fbound[i] = domain_fbound.quantities['stage'].centroid_values[
                2]
            if verbose: domain_fbound.write_time()

        domain_drchlt = Domain(meshname)
        domain_drchlt.set_quantities_to_be_stored(None)
        domain_drchlt.set_starttime(time_step)
        domain_drchlt.set_quantity('stage', tide)
        Br = Reflective_boundary(domain_drchlt)
        #Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
        Bd = Time_boundary(
            domain=domain_drchlt,
            f=lambda t: [
                2.0 + t / finaltime + tide, 220. + 10. * tide + 10. * t /
                finaltime, -220. - 10. * tide - 10. * t / finaltime
            ])
        #Bd = Time_boundary(domain=domain_drchlt,f=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
        domain_drchlt.set_boundary({'ocean': Bd, 'otherocean': Br})
        temp_drchlt = num.zeros(int(finaltime / yieldstep) + 1, num.float)

        for i, t in enumerate(
                domain_drchlt.evolve(yieldstep=yieldstep,
                                     finaltime=finaltime,
                                     skip_initial_step=False)):
            temp_drchlt[i] = domain_drchlt.quantities['stage'].centroid_values[
                2]
            #domain_drchlt.write_time()

        #print domain_fbound.quantities['stage'].vertex_values
        #print domain_drchlt.quantities['stage'].vertex_values

        assert num.allclose(temp_fbound,
                            temp_drchlt), temp_fbound - temp_drchlt

        assert num.allclose(domain_fbound.quantities['stage'].vertex_values,
                            domain_drchlt.quantities['stage'].vertex_values)

        assert num.allclose(
            domain_fbound.quantities['xmomentum'].vertex_values,
            domain_drchlt.quantities['xmomentum'].vertex_values)

        assert num.allclose(
            domain_fbound.quantities['ymomentum'].vertex_values,
            domain_drchlt.quantities['ymomentum'].vertex_values)

        if not sys.platform == 'win32':
            if myid == 0: os.remove(sts_file + '.sts')

        if myid == 0: os.remove(meshname)
    def parallel_time_varying_file_boundary_sts(self):
        """ parallel_test_time_varying_file_boundary_sts_sequential(self):
            Read correct points from ordering file and apply sts to boundary. 
            The boundary is time varying. Compares sequential result with 
            distributed result found using anuga_parallel
        """

        #------------------------------------------------------------
        # Define test variables
        #------------------------------------------------------------
        lat_long_points = [[6.01, 97.0], [6.02, 97.0], [6.05, 96.9],
                           [6.0, 97.0]]
        bounding_polygon = [[6.0, 97.0], [6.01, 97.0], [6.02, 97.0],
                            [6.02, 97.02], [6.00, 97.02]]
        tide = 3.0
        time_step_count = 65
        time_step = 2
        n = len(lat_long_points)
        first_tstep = num.ones(n, num.int)
        last_tstep = (time_step_count) * num.ones(n, num.int)
        finaltime = num.float(time_step * (time_step_count - 1))
        yieldstep = num.float(time_step)
        gauge_depth = 20 * num.ones(n, num.float)
        ha = 2 * num.ones((n, time_step_count), num.float)
        ua = 10 * num.ones((n, time_step_count), num.float)
        va = -10 * num.ones((n, time_step_count), num.float)

        times = num.arange(0, time_step_count * time_step, time_step)
        for i in range(n):
            #ha[i]+=num.sin(times)
            ha[i] += times / finaltime

        #------------------------------------------------------------
        # Write mux data to file then convert to sts format
        #------------------------------------------------------------
        sts_file = "test"
        if myid == 0:
            base_name, files = self.write_mux2(lat_long_points,
                                               time_step_count,
                                               time_step,
                                               first_tstep,
                                               last_tstep,
                                               depth=gauge_depth,
                                               ha=ha,
                                               ua=ua,
                                               va=va)
            # base name will not exist, but 3 other files are created

            # Write order file
            file_handle, order_base_name = tempfile.mkstemp("")
            os.close(file_handle)
            os.remove(order_base_name)
            d = ","
            order_file = order_base_name + 'order.txt'
            fid = open(order_file, 'w')

            # Write Header
            header = 'index, longitude, latitude\n'
            fid.write(header)
            indices = [3, 0, 1]
            for i in indices:
                line=str(i)+d+str(lat_long_points[i][1])+d+\
                    str(lat_long_points[i][0])+"\n"
                fid.write(line)
            fid.close()

            urs2sts(base_name,
                    basename_out=sts_file,
                    ordering_filename=order_file,
                    mean_stage=tide,
                    verbose=verbose)
            self.delete_mux(files)

            assert (os.access(sts_file + '.sts', os.F_OK))

            os.remove(order_file)

        barrier()
        #------------------------------------------------------------
        # Define boundary_polygon on each processor. This polygon defines the
        # urs boundary and lies on a portion of the bounding_polygon
        #------------------------------------------------------------
        boundary_polygon = create_sts_boundary(sts_file)

        # Append the remaining part of the boundary polygon to be defined by
        # the user
        bounding_polygon_utm = []
        for point in bounding_polygon:
            zone, easting, northing = redfearn(point[0], point[1])
            bounding_polygon_utm.append([easting, northing])

        boundary_polygon.append(bounding_polygon_utm[3])
        boundary_polygon.append(bounding_polygon_utm[4])

        assert num.allclose(bounding_polygon_utm, boundary_polygon)

        extent_res = 10000
        meshname = 'urs_test_mesh' + '.tsh'
        interior_regions = None
        boundary_tags = {'ocean': [0, 1], 'otherocean': [2, 3, 4]}

        #------------------------------------------------------------
        # Create mesh on the master processor and store in file. This file
        # is read in by each slave processor when needed
        #------------------------------------------------------------
        if myid == 0:
            create_mesh_from_regions(boundary_polygon,
                                     boundary_tags=boundary_tags,
                                     maximum_triangle_area=extent_res,
                                     filename=meshname,
                                     interior_regions=interior_regions,
                                     verbose=verbose)

            # barrier()
            domain_fbound = Domain(meshname)
            domain_fbound.set_quantities_to_be_stored(None)
            domain_fbound.set_quantity('stage', tide)
            # print domain_fbound.mesh.get_boundary_polygon()
        else:
            domain_fbound = None

        barrier()
        if (verbose and myid == 0):
            print 'DISTRIBUTING PARALLEL DOMAIN'
        domain_fbound = distribute(domain_fbound)

        #--------------------------------------------------------------------
        # Find which sub_domain in which the interpolation points are located
        #
        # Sometimes the interpolation points sit exactly
        # between two centroids, so in the parallel run we
        # reset the interpolation points to the centroids
        # found in the sequential run
        #--------------------------------------------------------------------
        interpolation_points = [[279000, 664000], [280250, 664130],
                                [279280, 665400], [280500, 665000]]

        interpolation_points = num.array(interpolation_points)

        #if myid==0:
        #    import pylab as P
        #    boundary_polygon=num.array(boundary_polygon)
        #    P.plot(boundary_polygon[:,0],boundary_polygon[:,1])
        #    P.plot(interpolation_points[:,0],interpolation_points[:,1],'ko')
        #    P.show()

        fbound_gauge_values = []
        fbound_proc_tri_ids = []
        for i, point in enumerate(interpolation_points):
            fbound_gauge_values.append([])  # Empty list for timeseries

            try:
                k = domain_fbound.get_triangle_containing_point(point)
                if domain_fbound.tri_full_flag[k] == 1:
                    fbound_proc_tri_ids.append(k)
                else:
                    fbound_proc_tri_ids.append(-1)
            except:
                fbound_proc_tri_ids.append(-2)

        if verbose: print 'P%d has points = %s' % (myid, fbound_proc_tri_ids)

        #------------------------------------------------------------
        # Set boundary conditions
        #------------------------------------------------------------
        Bf = File_boundary(sts_file + '.sts',
                           domain_fbound,
                           boundary_polygon=boundary_polygon)
        Br = Reflective_boundary(domain_fbound)

        domain_fbound.set_boundary({'ocean': Bf, 'otherocean': Br})

        #------------------------------------------------------------
        # Evolve the domain on each processor
        #------------------------------------------------------------
        for i, t in enumerate(
                domain_fbound.evolve(yieldstep=yieldstep,
                                     finaltime=finaltime,
                                     skip_initial_step=False)):

            stage = domain_fbound.get_quantity('stage')
            for i in range(4):
                if fbound_proc_tri_ids[i] > -1:
                    fbound_gauge_values[i].append(
                        stage.centroid_values[fbound_proc_tri_ids[i]])

        #------------------------------------------------------------
        # Create domain to be run sequntially on each processor
        #------------------------------------------------------------
        domain_drchlt = Domain(meshname)
        domain_drchlt.set_quantities_to_be_stored(None)
        domain_drchlt.set_starttime(time_step)
        domain_drchlt.set_quantity('stage', tide)
        Br = Reflective_boundary(domain_drchlt)
        #Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
        Bd = Time_boundary(
            domain=domain_drchlt,
            function=lambda t: [
                2.0 + t / finaltime + tide, 220. + 10. * tide + 10. * t /
                finaltime, -220. - 10. * tide - 10. * t / finaltime
            ])
        #Bd = Time_boundary(domain=domain_drchlt,function=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
        domain_drchlt.set_boundary({'ocean': Bd, 'otherocean': Br})

        drchlt_gauge_values = []
        drchlt_proc_tri_ids = []
        for i, point in enumerate(interpolation_points):
            drchlt_gauge_values.append([])  # Empty list for timeseries

            try:
                k = domain_drchlt.get_triangle_containing_point(point)
                if domain_drchlt.tri_full_flag[k] == 1:
                    drchlt_proc_tri_ids.append(k)
                else:
                    drchlt_proc_tri_ids.append(-1)
            except:
                drchlt_proc_tri_ids.append(-2)

        if verbose: print 'P%d has points = %s' % (myid, drchlt_proc_tri_ids)

        #------------------------------------------------------------
        # Evolve entire domain on each processor
        #------------------------------------------------------------
        for i, t in enumerate(
                domain_drchlt.evolve(yieldstep=yieldstep,
                                     finaltime=finaltime,
                                     skip_initial_step=False)):

            stage = domain_drchlt.get_quantity('stage')
            for i in range(4):
                drchlt_gauge_values[i].append(
                    stage.centroid_values[drchlt_proc_tri_ids[i]])

        #------------------------------------------------------------
        # Compare sequential values with parallel values
        #------------------------------------------------------------
        barrier()
        success = True
        for i in range(4):
            if fbound_proc_tri_ids[i] > -1:
                fbound_gauge_values[i] = num.array(fbound_gauge_values[i])
                drchlt_gauge_values[i] = num.array(drchlt_gauge_values[i])
                #print i,fbound_gauge_values[i][4]
                #print i,drchlt_gauge_values[i][4]
                success = success and num.allclose(fbound_gauge_values[i],
                                                   drchlt_gauge_values[i])
                assert success  #, (fbound_gauge_values[i]-drchlt_gauge_values[i])

        #assert_(success)

        if not sys.platform == 'win32':
            if myid == 0: os.remove(sts_file + '.sts')

        if myid == 0: os.remove(meshname)
Esempio n. 12
0
def distibute_three_processors():
    """
    Do a parallel test of distributing a rectangle onto 3 processors
   
    """
    
	# FIXME: Need to update expected values on macos
    if sys.platform == 'darwin':
    	return

    import pypar

    myid = pypar.rank()
    numprocs = pypar.size()

    if not numprocs == 3: return

    #print numprocs

    barrier()

    if myid == 0:

        points, vertices, boundary = rectangular_cross(2,2)

        domain = Domain(points, vertices, boundary)


        domain.set_quantity('elevation', topography) # Use function for elevation
        domain.set_quantity('friction', 0.0)         # Constant friction 
        domain.set_quantity('stage', expression='elevation') # Dry initial stage
        domain.set_quantity('xmomentum', expression='friction + 2.0') # 
        domain.set_quantity('ymomentum', ycoord) #

        #----------------------------------------------------------------------------------
        # Test pmesh_divide_metis
        #----------------------------------------------------------------------------------
        nodes, triangles, boundary, triangles_per_proc, quantities = pmesh_divide_metis(domain,numprocs)

        assert_(num.allclose(nodes,points))

        true_vertices = [[0, 9, 1], [3, 9, 0], [4, 9, 3], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8]]

        true_triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0, 9, 1], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 9, 0], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7]]

        assert_(num.allclose(vertices,true_vertices))
        assert_(num.allclose(triangles,true_triangles))

        assert_(num.allclose(triangles_per_proc,[5,6,5]))


        #----------------------------------------------------------------------------------
        # Test build_submesh
        #----------------------------------------------------------------------------------
        submesh = build_submesh(nodes, triangles, boundary, quantities, triangles_per_proc)


        assert_(num.allclose(submesh['full_nodes'][0],[[3.0, 0.5, 0.0], [4.0, 0.5, 0.5], [5.0, 0.5, 1.0], [7.0, 1.0, 0.5], [8.0, 1.0, 1.0], [9.0, 0.25, 0.25], [12.0, 0.75, 0.75]]))
        assert_(num.allclose(submesh['full_nodes'][1],[[0.0, 0.0, 0.0], [1.0, 0.0, 0.5], [2.0, 0.0, 1.0], [4.0, 0.5, 0.5], [5.0, 0.5, 1.0], [9.0, 0.25, 0.25], [10.0, 0.25, 0.75]]))
        assert_(num.allclose(submesh['full_nodes'][2],[[0.0, 0.0, 0.0], [3.0, 0.5, 0.0], [4.0, 0.5, 0.5], [6.0, 1.0, 0.0], [7.0, 1.0, 0.5], [9.0, 0.25, 0.25], [11.0, 0.75, 0.25]]))


        assert_(num.allclose(submesh['ghost_nodes'][0],[[0.0, 0.0, 0.0], [1.0, 0.0, 0.5], [2.0, 0.0, 1.0], [6.0, 1.0, 0.0], [10.0, 0.25, 0.75], [11.0, 0.75, 0.25]]))
        assert_(num.allclose(submesh['ghost_nodes'][1],[[3.0, 0.5, 0.0], [7.0, 1.0, 0.5], [8.0, 1.0, 1.0], [11.0, 0.75, 0.25], [12.0, 0.75, 0.75]]))
        assert_(num.allclose(submesh['ghost_nodes'][2],[[1.0, 0.0, 0.5], [5.0, 0.5, 1.0], [8.0, 1.0, 1.0], [12.0, 0.75, 0.75]]))



        true_full_triangles = [num.array([[ 4,  9,  3],
                                          [ 4, 12,  5],
                                          [ 7, 12,  4],
                                          [ 8, 12,  7],
                                          [ 5, 12,  8]]),
                               num.array([[ 0,  9,  1],
                                          [ 1,  9,  4],
                                          [ 1, 10,  2],
                                          [ 4, 10,  1],
                                          [ 5, 10,  4],
                                          [ 2, 10,  5]]),
                               num.array([[ 3,  9,  0],
                                          [ 3, 11,  4],
                                          [ 6, 11,  3],
                                          [ 7, 11,  6],
                                          [ 4, 11,  7]])]


        assert_(num.allclose(submesh['full_triangles'][0],true_full_triangles[0]))
        assert_(num.allclose(submesh['full_triangles'][1],true_full_triangles[1]))
        assert_(num.allclose(submesh['full_triangles'][2],true_full_triangles[2]))

        true_ghost_triangles = [num.array([[ 5,  0,  9,  1],
                                           [ 6,  1,  9,  4],
                                           [ 8,  4, 10,  1],
                                           [ 9,  5, 10,  4],
                                           [10,  2, 10,  5],
                                           [11,  3,  9,  0],
                                           [12,  3, 11,  4],
                                           [13,  6, 11,  3],
                                           [14,  7, 11,  6],
                                           [15,  4, 11,  7]]),
                                num.array([[ 0,  4,  9,  3],
                                           [ 1,  4, 12,  5],
                                           [ 2,  7, 12,  4],
                                           [ 4,  5, 12,  8],
                                           [11,  3,  9,  0],
                                           [12,  3, 11,  4]]),
                                num.array([[ 0,  4,  9,  3],
                                           [ 1,  4, 12,  5],
                                           [ 2,  7, 12,  4],
                                           [ 3,  8, 12,  7],
                                           [ 5,  0,  9,  1],
                                           [ 6,  1,  9,  4]])]



        assert_(num.allclose(submesh['ghost_triangles'][0],true_ghost_triangles[0]))
        assert_(num.allclose(submesh['ghost_triangles'][1],true_ghost_triangles[1]))
        assert_(num.allclose(submesh['ghost_triangles'][2],true_ghost_triangles[2]))

        true_full_commun = [{0: [1, 2], 1: [1, 2], 2: [1, 2], 3: [2], 4: [1]}, {5: [0, 2], 6: [0, 2], 7: [], 8: [0], 9: [0], 10: [0]}, {11: [0, 1], 12: [0, 1], 13: [0], 14: [0], 15: [0]}]

        assert_(true_full_commun == submesh['full_commun'])


        true_ghost_commun = [num.array([[ 5,  1],
                                        [ 6,  1],
                                        [ 8,  1],
                                        [ 9,  1],
                                        [10,  1],
                                        [11,  2],
                                        [12,  2],
                                        [13,  2],
                                        [14,  2],
                                        [15,  2]]),
                             num.array([[ 0,  0],
                                        [ 1,  0],
                                        [ 2,  0],
                                        [ 4,  0],
                                        [11,  2],
                                        [12,  2]]),
                             num.array([[0, 0],
                                        [1, 0],
                                        [2, 0],
                                        [3, 0],
                                        [5, 1],
                                        [6, 1]])]

        assert_(num.allclose(submesh['ghost_commun'][0],true_ghost_commun[0]))
        assert_(num.allclose(submesh['ghost_commun'][1],true_ghost_commun[1]))
        assert_(num.allclose(submesh['ghost_commun'][2],true_ghost_commun[2]))



    barrier()
    #--------------------------------
    # Now do the comunnication part
    #--------------------------------


    if myid == 0:
        #----------------------------------------------------------------------------------
        # Test send_submesh
        #----------------------------------------------------------------------------------
        for p in range(1, numprocs):
            send_submesh(submesh, triangles_per_proc, p, verbose=False)

        #----------------------------------------------------------------------------------
        # Test extract_submesh
        #----------------------------------------------------------------------------------
        points, vertices, boundary, quantities, \
                ghost_recv_dict, full_send_dict, tri_map, node_map, tri_l2g, node_l2g, \
                ghost_layer_width =\
        extract_submesh(submesh, triangles_per_proc)


        true_points =  [[0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.75, 0.75], [0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [1.0, 0.0], [0.25, 0.75], [0.75, 0.25]]

        true_vertices = [[1, 5, 0], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [7, 5, 8], [8, 5, 1], [1, 11, 8], [2, 11, 1], [9, 11, 2], [0, 5, 7], [0, 12, 1], [10, 12, 0], [3, 12, 10], [1, 12, 3]]


        true_ghost_recv = {1: [num.array([5, 6, 7, 8, 9]), num.array([ 5,  6,  8,  9, 10])], 2: [num.array([10, 11, 12, 13, 14]), num.array([11, 12, 13, 14, 15])]}


        true_full_send = {1: [num.array([0, 1, 2, 4]), num.array([0, 1, 2, 4])], 2: [num.array([0, 1, 2, 3]), num.array([0, 1, 2, 3])]}

        assert_(num.allclose(ghost_layer_width,  2))
        assert_(num.allclose(points,   true_points))
        assert_(num.allclose(vertices, true_vertices))
        assert_(num.allclose(ghost_recv_dict[1],true_ghost_recv[1]))
        assert_(num.allclose(ghost_recv_dict[2],true_ghost_recv[2]))
        assert_(num.allclose(full_send_dict[1],true_full_send[1]))
        assert_(num.allclose(full_send_dict[2],true_full_send[2]))

        #print triangles_per_proc

    else:
        #----------------------------------------------------------------------------------
        # Test rec_submesh
        #----------------------------------------------------------------------------------
        points, vertices, boundary, quantities, \
                ghost_recv_dict, full_send_dict, \
                no_full_nodes, no_full_trigs, tri_map, node_map, tri_l2g, node_l2g, \
                ghost_layer_width  = \
                rec_submesh(0, verbose=False)    

        if myid == 1:


            true_points =  [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.5], [0.5, 1.0], [0.25, 0.25], [0.25, 0.75], [0.5, 0.0], [1.0, 0.5], [1.0, 1.0], [0.75, 0.25], [0.75, 0.75]] 

            true_vertices =  [[0, 5, 1], [1, 5, 3], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [3, 5, 7], [3, 11, 4], [8, 11, 3], [4, 11, 9], [7, 5, 0], [7, 10, 3]]

            true_ghost_recv =  {0: [num.array([6, 7, 8, 9]), num.array([0, 1, 2, 4])], 2: [num.array([10, 11]), num.array([11, 12])]}

            true_full_send =  {0: [num.array([0, 1, 3, 4, 5]), num.array([ 5,  6,  8,  9, 10])], 2: [num.array([0, 1]), num.array([5, 6])]}

            true_tri_map  = num.array([ 6,  7,  8, -1,  9,  0, 1,  2,  3,  4,  5, 10, 11])

            true_node_map = num.array([ 0,  1,  2,  7,  3,  4, -1,  8,  9,  5,  6, 10, 11])

            assert_(num.allclose(ghost_layer_width,  2))
            assert_(num.allclose(tri_map,   true_tri_map))
            assert_(num.allclose(node_map,   true_node_map))
            assert_(num.allclose(points,   true_points))
            assert_(num.allclose(vertices, true_vertices))
            assert_(num.allclose(ghost_recv_dict[0],true_ghost_recv[0]))
            assert_(num.allclose(ghost_recv_dict[2],true_ghost_recv[2]))
            assert_(num.allclose(full_send_dict[0],true_full_send[0]))
            assert_(num.allclose(full_send_dict[2],true_full_send[2])) 


        if myid == 2:

            true_points =   [[0.0, 0.0], [0.5, 0.0], [0.5, 0.5], [1.0, 0.0], [1.0, 0.5], [0.25, 0.25], [0.75, 0.25], [0.0, 0.5], [0.5, 1.0], [1.0, 1.0], [0.75, 0.75]]

            true_vertices =  [[1, 5, 0], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [2, 5, 1], [2, 10, 8], [4, 10, 2], [9, 10, 4], [0, 5, 7], [7, 5, 2]]

            true_ghost_recv =   {0: [num.array([5, 6, 7, 8]), num.array([0, 1, 2, 3])], 1: [num.array([ 9, 10]), num.array([5, 6])]}

            true_full_send =   {0: [num.array([0, 1, 2, 3, 4]), num.array([11, 12, 13, 14, 15])], 1: [num.array([0, 1]), num.array([11, 12])]}

            true_tri_map  = num.array([5, 6, 7, 8, -1, 9, 10, -1, -1, -1, -1, 0, 1, 2, 3, 4, -1])

            true_node_map = num.array([ 0,  7, -1,  1,  2,  8 , 3,  4,  9,  5, -1,  6, 10])

            assert_(num.allclose(ghost_layer_width,  2))
            assert_(num.allclose(tri_map,   true_tri_map))
            assert_(num.allclose(node_map,   true_node_map))
            assert_(num.allclose(points,   true_points))
            assert_(num.allclose(vertices, true_vertices))
            assert_(num.allclose(ghost_recv_dict[0],true_ghost_recv[0]))
            assert_(num.allclose(ghost_recv_dict[1],true_ghost_recv[1]))
            assert_(num.allclose(full_send_dict[0],true_full_send[0]))
            assert_(num.allclose(full_send_dict[1],true_full_send[1])) 
    def sequential_time_varying_file_boundary_sts(self):
        """sequential_ltest_time_varying_file_boundary_sts_sequential(self):
        Read correct points from ordering file and apply sts to boundary. The boundary is time varying. FIXME add to test_urs2sts.
        """
        lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
        bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
                          [6.02,97.02],[6.00,97.02]]
        tide = 3.0
        time_step_count = 65
        time_step = 2.
        n=len(lat_long_points)
        first_tstep=num.ones(n,num.int)
        last_tstep=(time_step_count)*num.ones(n,num.int)
        finaltime=num.float(time_step*(time_step_count-1))
        yieldstep=num.float(time_step)
        gauge_depth=20*num.ones(n,num.float)
        ha=2*num.ones((n,time_step_count),num.float)
        ua=10*num.ones((n,time_step_count),num.float)
        va=-10*num.ones((n,time_step_count),num.float)

        times=num.arange(0., num.float(time_step_count*time_step), time_step)
        for i in range(n):
            #ha[i]+=num.sin(times)
            ha[i]+=times/finaltime



        sts_file="test"
        if myid==0:
            base_name, files = self.write_mux2(lat_long_points,
                                               time_step_count,
                                               time_step,
                                               first_tstep,
                                               last_tstep,
                                               depth=gauge_depth,
                                               ha=ha,
                                               ua=ua,
                                               va=va)
            # base name will not exist, but 3 other files are created

            # Write order file
            file_handle, order_base_name = tempfile.mkstemp("")
            os.close(file_handle)
            os.remove(order_base_name)
            d=","
            order_file=order_base_name+'order.txt'
            fid=open(order_file,'w')
        
            # Write Header
            header='index, longitude, latitude\n'
            fid.write(header)
            indices=[3,0,1]
            for i in indices:
                line=str(i)+d+str(lat_long_points[i][1])+d+\
                    str(lat_long_points[i][0])+"\n"
                fid.write(line)
            fid.close()

            urs2sts(base_name,
                    basename_out=sts_file,
                    ordering_filename=order_file,
                    mean_stage=tide,
                    verbose=verbose)
            self.delete_mux(files)

            assert(os.access(sts_file+'.sts', os.F_OK))

            os.remove(order_file)

        barrier()
        boundary_polygon = create_sts_boundary(sts_file)

        # Append the remaining part of the boundary polygon to be defined by
        # the user
        bounding_polygon_utm=[]
        for point in bounding_polygon:
            zone,easting,northing=redfearn(point[0],point[1])
            bounding_polygon_utm.append([easting,northing])

        boundary_polygon.append(bounding_polygon_utm[3])
        boundary_polygon.append(bounding_polygon_utm[4])

        assert num.allclose(bounding_polygon_utm,boundary_polygon)


        extent_res=1000000
        meshname = 'urs_test_mesh' + '.tsh'
        interior_regions=None
        boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
        
        # have to change boundary tags from last example because now bounding
        # polygon starts in different place.
        if myid==0:
            create_mesh_from_regions(boundary_polygon,
                                     boundary_tags=boundary_tags,
                                     maximum_triangle_area=extent_res,
                                     filename=meshname,
                                     interior_regions=interior_regions,
                                     verbose=verbose)

        barrier()
        
        domain_fbound = Domain(meshname)
        domain_fbound.set_quantities_to_be_stored(None)
        domain_fbound.set_quantity('stage', tide)
        if verbose: print "Creating file boundary condition"
        Bf = File_boundary(sts_file+'.sts',
                           domain_fbound,
                           boundary_polygon=boundary_polygon)
        Br = Reflective_boundary(domain_fbound)

        domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})

        temp_fbound=num.zeros(int(finaltime/yieldstep)+1,num.float)
        if verbose: print "Evolving domain with file boundary condition"
        for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
                                                   finaltime=finaltime, 
                                                   skip_initial_step = False)):
            temp_fbound[i]=domain_fbound.quantities['stage'].centroid_values[2]
            if verbose: domain_fbound.write_time()
            
        
        domain_drchlt = Domain(meshname)
        domain_drchlt.set_quantities_to_be_stored(None)
        domain_drchlt.set_starttime(time_step)
        domain_drchlt.set_quantity('stage', tide)
        Br = Reflective_boundary(domain_drchlt)
        #Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
        Bd = Time_boundary(domain=domain_drchlt, f=lambda t: [2.0+t/finaltime+tide,220.+10.*tide+10.*t/finaltime,-220.-10.*tide-10.*t/finaltime])
        #Bd = Time_boundary(domain=domain_drchlt,f=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
        domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
        temp_drchlt=num.zeros(int(finaltime/yieldstep)+1,num.float)
        
        for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
                                                   finaltime=finaltime, 
                                                   skip_initial_step = False)):
            temp_drchlt[i]=domain_drchlt.quantities['stage'].centroid_values[2]
            #domain_drchlt.write_time()
        
        #print domain_fbound.quantities['stage'].vertex_values
        #print domain_drchlt.quantities['stage'].vertex_values
                    
        assert num.allclose(temp_fbound,temp_drchlt),temp_fbound-temp_drchlt

        
        assert num.allclose(domain_fbound.quantities['stage'].vertex_values,
                            domain_drchlt.quantities['stage'].vertex_values)
                        
        assert num.allclose(domain_fbound.quantities['xmomentum'].vertex_values,
                            domain_drchlt.quantities['xmomentum'].vertex_values)                        
                        
        assert num.allclose(domain_fbound.quantities['ymomentum'].vertex_values,
                            domain_drchlt.quantities['ymomentum'].vertex_values)
        
        if not sys.platform == 'win32':
            if myid==0: os.remove(sts_file+'.sts')
        
        if myid==0: os.remove(meshname)
    def parallel_time_varying_file_boundary_sts(self):
        """ parallel_test_time_varying_file_boundary_sts_sequential(self):
            Read correct points from ordering file and apply sts to boundary. 
            The boundary is time varying. Compares sequential result with 
            distributed result found using anuga_parallel
        """

        #------------------------------------------------------------
        # Define test variables
        #------------------------------------------------------------
        lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
        bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
                          [6.02,97.02],[6.00,97.02]]
        tide = 3.0
        time_step_count = 65
        time_step = 2
        n=len(lat_long_points)
        first_tstep=num.ones(n,num.int)
        last_tstep=(time_step_count)*num.ones(n,num.int)
        finaltime=num.float(time_step*(time_step_count-1))
        yieldstep=num.float(time_step)
        gauge_depth=20*num.ones(n,num.float)
        ha=2*num.ones((n,time_step_count),num.float)
        ua=10*num.ones((n,time_step_count),num.float)
        va=-10*num.ones((n,time_step_count),num.float)

        times=num.arange(0, time_step_count*time_step, time_step)
        for i in range(n):
            #ha[i]+=num.sin(times)
            ha[i]+=times/finaltime

        #------------------------------------------------------------
        # Write mux data to file then convert to sts format
        #------------------------------------------------------------
        sts_file="test"
        if myid==0:
            base_name, files = self.write_mux2(lat_long_points,
                                               time_step_count,
                                               time_step,
                                               first_tstep,
                                               last_tstep,
                                               depth=gauge_depth,
                                               ha=ha,
                                               ua=ua,
                                               va=va)
            # base name will not exist, but 3 other files are created

            # Write order file
            file_handle, order_base_name = tempfile.mkstemp("")
            os.close(file_handle)
            os.remove(order_base_name)
            d=","
            order_file=order_base_name+'order.txt'
            fid=open(order_file,'w')
        
            # Write Header
            header='index, longitude, latitude\n'
            fid.write(header)
            indices=[3,0,1]
            for i in indices:
                line=str(i)+d+str(lat_long_points[i][1])+d+\
                    str(lat_long_points[i][0])+"\n"
                fid.write(line)
            fid.close()

            urs2sts(base_name,
                    basename_out=sts_file,
                    ordering_filename=order_file,
                    mean_stage=tide,
                    verbose=verbose)
            self.delete_mux(files)

            assert(os.access(sts_file+'.sts', os.F_OK))

            os.remove(order_file)

        barrier()
        #------------------------------------------------------------
        # Define boundary_polygon on each processor. This polygon defines the
        # urs boundary and lies on a portion of the bounding_polygon
        #------------------------------------------------------------
        boundary_polygon = create_sts_boundary(sts_file)

        # Append the remaining part of the boundary polygon to be defined by
        # the user
        bounding_polygon_utm=[]
        for point in bounding_polygon:
            zone,easting,northing=redfearn(point[0],point[1])
            bounding_polygon_utm.append([easting,northing])

        boundary_polygon.append(bounding_polygon_utm[3])
        boundary_polygon.append(bounding_polygon_utm[4])


        assert num.allclose(bounding_polygon_utm,boundary_polygon)

        extent_res=10000
        meshname = 'urs_test_mesh' + '.tsh'
        interior_regions=None
        boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
        
        #------------------------------------------------------------
        # Create mesh on the master processor and store in file. This file
        # is read in by each slave processor when needed
        #------------------------------------------------------------
        if myid==0:
            create_mesh_from_regions(boundary_polygon,
                                     boundary_tags=boundary_tags,
                                     maximum_triangle_area=extent_res,
                                     filename=meshname,
                                     interior_regions=interior_regions,
                                     verbose=verbose)
        

            # barrier()
            domain_fbound = Domain(meshname)
            domain_fbound.set_quantities_to_be_stored(None)
            domain_fbound.set_quantity('stage', tide)
            # print domain_fbound.mesh.get_boundary_polygon()
        else:
            domain_fbound=None

        barrier()
        if ( verbose and myid == 0 ): 
            print 'DISTRIBUTING PARALLEL DOMAIN'
        domain_fbound = distribute(domain_fbound)

        #--------------------------------------------------------------------
        # Find which sub_domain in which the interpolation points are located 
        #
        # Sometimes the interpolation points sit exactly
        # between two centroids, so in the parallel run we
        # reset the interpolation points to the centroids
        # found in the sequential run
        #--------------------------------------------------------------------
        interpolation_points = [[279000,664000], [280250,664130], 
                                    [279280,665400], [280500,665000]]

        interpolation_points=num.array(interpolation_points)

        #if myid==0:
        #    import pylab as P
        #    boundary_polygon=num.array(boundary_polygon)
        #    P.plot(boundary_polygon[:,0],boundary_polygon[:,1])
        #    P.plot(interpolation_points[:,0],interpolation_points[:,1],'ko')
        #    P.show()

        fbound_gauge_values = []
        fbound_proc_tri_ids = []
        for i, point in enumerate(interpolation_points):
            fbound_gauge_values.append([]) # Empty list for timeseries

            try:
                k = domain_fbound.get_triangle_containing_point(point)
                if domain_fbound.tri_full_flag[k] == 1:
                    fbound_proc_tri_ids.append(k)
                else:
                    fbound_proc_tri_ids.append(-1)            
            except:
                fbound_proc_tri_ids.append(-2)


        if verbose: print 'P%d has points = %s' %(myid, fbound_proc_tri_ids)

        #------------------------------------------------------------
        # Set boundary conditions
        #------------------------------------------------------------
        Bf = File_boundary(sts_file+'.sts',
                           domain_fbound,
                           boundary_polygon=boundary_polygon)
        Br = Reflective_boundary(domain_fbound)
    
        domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})

        #------------------------------------------------------------
        # Evolve the domain on each processor
        #------------------------------------------------------------  
        for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
                                                   finaltime=finaltime, 
                                                   skip_initial_step = False)):

            stage = domain_fbound.get_quantity('stage')
            for i in range(4):
                if fbound_proc_tri_ids[i] > -1:
                    fbound_gauge_values[i].append(stage.centroid_values[fbound_proc_tri_ids[i]])
        
        #------------------------------------------------------------
        # Create domain to be run sequntially on each processor
        #------------------------------------------------------------
        domain_drchlt = Domain(meshname)
        domain_drchlt.set_quantities_to_be_stored(None)
        domain_drchlt.set_starttime(time_step)
        domain_drchlt.set_quantity('stage', tide)
        Br = Reflective_boundary(domain_drchlt)
        #Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
        Bd = Time_boundary(domain=domain_drchlt, function=lambda t: [2.0+t/finaltime+tide,220.+10.*tide+10.*t/finaltime,-220.-10.*tide-10.*t/finaltime])
        #Bd = Time_boundary(domain=domain_drchlt,function=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
        domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
       
        drchlt_gauge_values = []
        drchlt_proc_tri_ids = []
        for i, point in enumerate(interpolation_points):
            drchlt_gauge_values.append([]) # Empty list for timeseries

            try:
                k = domain_drchlt.get_triangle_containing_point(point)
                if domain_drchlt.tri_full_flag[k] == 1:
                    drchlt_proc_tri_ids.append(k)
                else:
                    drchlt_proc_tri_ids.append(-1)            
            except:
                drchlt_proc_tri_ids.append(-2)


        if verbose: print 'P%d has points = %s' %(myid, drchlt_proc_tri_ids)

        #------------------------------------------------------------
        # Evolve entire domain on each processor
        #------------------------------------------------------------
        for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
                                                   finaltime=finaltime, 
                                                   skip_initial_step = False)):

            stage = domain_drchlt.get_quantity('stage')
            for i in range(4):
                drchlt_gauge_values[i].append(stage.centroid_values[drchlt_proc_tri_ids[i]])

        #------------------------------------------------------------
        # Compare sequential values with parallel values
        #------------------------------------------------------------
        barrier()
        success = True
        for i in range(4):
            if fbound_proc_tri_ids[i] > -1:
                fbound_gauge_values[i]=num.array(fbound_gauge_values[i])
                drchlt_gauge_values[i]=num.array(drchlt_gauge_values[i])
                #print i,fbound_gauge_values[i][4]
                #print i,drchlt_gauge_values[i][4]
                success = success and num.allclose(fbound_gauge_values[i], drchlt_gauge_values[i])
                assert success#, (fbound_gauge_values[i]-drchlt_gauge_values[i])

        #assert_(success)       

        if not sys.platform == 'win32':
            if myid==0: os.remove(sts_file+'.sts')
        
        if myid==0: os.remove(meshname)
Esempio n. 15
0
def setup_mesh(project, setup_initial_conditions=None):
    """
    Code to make the mesh (initial domain)

    The geometry is made on processor 0, then dumped and reloaded
    This reduces the memory demands

    INPUT: project == the project module

    OUTPUT: domain
    """

    if myid == 0:

        if verbose:
            print 'Hello from processor ', myid

        #
        # HERE, WE MAKE/PARTITION/READ THE MESH
        # This can lead to memory savings in parallel runs
        # (possibly because of limitations of python memory management)
        #

        # Let's see if we have already pickled this domain

        pickle_name = 'domain' + '_P%g_%g.pickle' % (1, 0)
        pickle_name = join(project.partition_dir, pickle_name)

        if os.path.exists(pickle_name):
            if verbose:
                print 'Saved domain seems to already exist'
        else:
            if verbose:
                print 'CREATING PARTITIONED DOMAIN'
            domain = build_mesh(project)

            if setup_initial_conditions is not None:
                # Set the initial conditions in serial
                setup_initial_conditions.setup_initial_conditions(
                    domain, project)

            # If pypar is not available don't use sequential_distribute stuff
            # (it will fail)

            if pypar_available:
                if verbose:
                    print 'Saving Domain'
                sequential_distribute_dump(domain,
                                           1,
                                           partition_dir=project.partition_dir,
                                           verbose=verbose)

        # If pypar is not available don't use sequential_distribute stuff (it
        # will fail)

        if pypar_available:

            # Now partition the domain

            par_pickle_name = 'domain' + '_P%g_%g.pickle' % (numprocs, 0)
            par_pickle_name = join(project.partition_dir, par_pickle_name)
            if os.path.exists(par_pickle_name):
                if verbose:
                    print 'Saved partitioned domain seems to already exist'
            else:
                if verbose:
                    print 'Load in saved sequential pickled domain'
                domain = \
                    sequential_distribute_load_pickle_file(
                        pickle_name, np=1, verbose=verbose)

                if verbose:
                    print 'Dump partitioned domains'
                sequential_distribute_dump(domain,
                                           numprocs,
                                           partition_dir=project.partition_dir,
                                           verbose=verbose)

            # This can reduce the memory demands if the domain was made above
            # Even better to have an existing partition (i.e. stop here and
            # rerun)

            domain = None
            gc.collect()
    else:

        domain = None
        if verbose:
            print 'Hello from processor ', myid

    barrier()

    # print 'Distributing domain'
    # domain=distribute(domain)
    # barrier()

    # If pypar is not available don't use sequential_distribute stuff (it will
    # fail)

    if pypar_available:
        if myid == 0:
            print 'LOADING PARTITIONED DOMAIN'

        domain = \
            sequential_distribute_load(
                filename=join(project.partition_dir, 'domain'),
                verbose=verbose)

    # #########################################################################
    # Set output directories
    # #########################################################################

    domain.set_name(project.scenario)  # Name of sww file
    domain.set_datadir(project.output_dir)  # Store sww output here

    # Needs more changes for this to work
    # domain.set_checkpointing(checkpoint_time=project.checkpoint_time)

    # #########################################################################
    # Miscellanious numerics
    # #########################################################################

    domain.set_flow_algorithm(project.flow_algorithm)

    # Force zero beta values [hopefully integrated into source]
    # print 'Warning: Forcing everything to first order'
    # domain.beta_w=0.
    # domain.beta_uh=0.
    # domain.beta_vh=0.
    # domain.beta_w_dry=0.
    # domain.beta_uh_dry=0.
    # domain.beta_vh_dry=0.

    # Adjust velocity computation for max quantities
    # domain.velocity_protection=1.0e-05

    # Adjust CFL
    # domain.set_CFL(0.9)

    # Optionally store vertex values uniquely (large file sizes!)

    domain.set_store_vertices_uniquely(project.store_vertices_uniquely)

    if project.use_local_extrapolation_and_flux_updating:
        domain.set_local_extrapolation_and_flux_updating()

    if project.store_elevation_every_timestep:
        domain.quantities_to_be_stored['elevation'] = 2
    else:
        domain.quantities_to_be_stored['elevation'] = 1

    return domain