コード例 #1
0
    def _get_minmax_velocity_wall(self, wall, axis=0):
        """ Return the minimum and maximum velocity component on the wall

        parameters:
        -----------
            wall: (indexSet)
                The wall.
            axis:
                axis (velocity component).
        """

        # Initialise value to max and min sys values
        maxV = np.ones((1)) * sys.float_info.min
        minV = np.ones((1)) * sys.float_info.max

        # if local domain has wall, get velocities
        if wall.data.size > 0:
            velocities = self.Model.velocityField.data[wall.data, axis]
            # get local min and max
            maxV[0] = velocities.max()
            minV[0] = velocities.min()

        # reduce operation
        uw.barrier()
        comm.Allreduce(MPI.IN_PLACE, maxV, op=MPI.MAX)
        comm.Allreduce(MPI.IN_PLACE, minV, op=MPI.MIN)
        uw.barrier()

        return minV, maxV
コード例 #2
0
ファイル: meshswarm2D.py プロジェクト: dansand/2dthermalslabs
    def update_triangulation(self):

        self.fixedSwarm.shadow_particles_fetch()
        self.lagrSwarm.shadow_particles_fetch()

        # Need to add boundary points for the interpolator / triangulation

        all_particle_coords = np.concatenate((self.fixedSwarm.particleCoordinates.data,
                                              self.lagrSwarm.particleCoordinates.data,
                                              self.lagrSwarm.particleCoordinates.data_shadow,
                                              self.fixedSwarm.particleCoordinates.data_shadow))

        pts = all_particle_coords.shape[0]
        data = np.zeros(pts)

        self.moving_data_start = self.fixedSwarm.particleLocalCount
        self.shadow_data_start = self.fixedSwarm.particleLocalCount + self.lagrSwarm.particleLocalCount

        # The linear interpolator can have its data values reloaded and still works
        # correctly, this behaviour is leveraged by the mesh swarm variable
        # at the moment, so we keep the linear one even if a different interpolator is used
        # in this swarm.
        self._linear_interpolator = LinearNDInterpolator(all_particle_coords, data)
        self.interpolator = self._linear_interpolator
        # self.interpolator = CloughTocher2DInterpolator(all_particle_coords, data)

        self.triangulation = self.interpolator.tri
        self.triangulation_edge_lengths()

        # for var in self.variables:
        #     var.update_data()

        # Things to help compute gradients (could be optional !)

        tri = self.triangulation

        # Triangle encircling vectors

        self.vA = tri.points[tri.simplices[:,1]] - tri.points[tri.simplices[:,0]]
        self.vB = tri.points[tri.simplices[:,2]] - tri.points[tri.simplices[:,1]]
        self.vC = tri.points[tri.simplices[:,0]] - tri.points[tri.simplices[:,2]]

        self.tri_area = 0.5 * (self.vA[:,0] * self.vB[:,1] - self.vA[:,1] * self.vB[:,0])

        w  = np.zeros(tri.npoints)

        for triangle in tri.simplices:
            w[triangle[0]]    += 1.0
            w[triangle[1]]    += 1.0
            w[triangle[2]]    += 1.0

        self.simplex2node_weight = w

        uw.barrier()

        return
コード例 #3
0
def post_hook():
    """
    Stop any brittle yielding near the edges of the model
    """
    coords = fn.input()
    zz = (coords[0] - GEO.nd(Model.minCoord[0])) / (GEO.nd(Model.maxCoord[0]) - GEO.nd(Model.minCoord[0]))
    fact = fn.math.pow(fn.math.tanh(zz*20.0) + fn.math.tanh((1.0-zz)*20.0) - fn.math.tanh(20.0), 4)
    Model.plasticStrain.data[:] = Model.plasticStrain.data[:] * fact.evaluate(Model.swarm)

    """
    Check spacing for when sedimentation should turn off
    # This solution was provided by: https://stackoverflow.com/a/38008452
    """
    rank = uw.rank()
    root = 0

    # get all the moho tracers that are on our CPU, in x sorted order
    moho_tracers = Model.passive_tracers["Moho"]  # Need this for restart safety
    local_array = numpy.sort(moho_tracers.swarm.particleCoordinates.data[:,0])
    sendbuf = numpy.array(local_array)

    # We have to figure out how many particles each CPU has, and let the root
    # cpu know
    sendcounts = numpy.array(MPI.COMM_WORLD.gather(len(sendbuf), root))

    if rank == root:
        # prepare to receive all this data
        recvbuf = numpy.empty(sum(sendcounts), dtype=float)
    else:
        recvbuf = None

    # Gather up all the data and put it in recvbuf
    MPI.COMM_WORLD.Gatherv(sendbuf=sendbuf, recvbuf=(recvbuf, sendcounts), root=root)
    if rank == root:
        # find the biggest gap in the X direction in the moho_tracers
        diff = numpy.max(numpy.diff(numpy.sort(recvbuf)))  # recvbuf is the array of all particles
    else:
        diff = None

    # Now that we know the biggest gap, tell all the other CPUs
    diff = MPI.COMM_WORLD.bcast(diff, root=0)
    biggest_gap = GEO.Dimensionalize(diff, u.km)
    uw.barrier()
    print(uw.rank(), "Biggest gap in tracers", biggest_gap)

    if biggest_gap > gap_to_stop_sedi:
        print("Sedimentation turned: OFF at {}".format(Model.time))
        threshold = -10 * u.kilometers
    else:
        print("Sedimentation turned: ON")
        threshold = -1 * u.kilometers

    Model.surfaceProcesses = GEO.surfaceProcesses.SedimentationThreshold(air=[air], sediment=[sediment], threshold=threshold)
コード例 #4
0
    def advect_mesh(self, dt):

        axis = self.axis

        if axis != 0:
            raise ValueError("Axis not supported yet")

        # Get minimum and maximum coordinates for the current mesh
        minX, maxX = self._get_minmax_coordinates_mesh(axis)

        minvxLeftWall, maxvxLeftWall   = self._get_minmax_velocity_wall(self.Model._left_wall, axis)
        minvxRightWall, maxvxRightWall = self._get_minmax_velocity_wall(self.Model._right_wall, axis)

        if np.abs(maxvxRightWall) > np.abs(minvxRightWall):
            vxRight = maxvxRightWall
        else:
            vxRight = minvxRightWall

        if (np.abs(maxvxLeftWall)  > np.abs(minvxLeftWall)):
            vxLeft = maxvxLeftWall
        else:
            vxLeft = minvxLeftWall

        minX += vxLeft * dt
        maxX += vxRight * dt
        length = np.abs(minX - maxX)

        if self.Model.mesh.dim <3:
            newValues = np.linspace(minX, maxX, self.Model.mesh.elementRes[axis]+1)
            newValues = np.repeat(newValues[np.newaxis,:], self.Model.mesh.elementRes[1] + 1, axis)
        else:
            newValues = np.linspace(minX, maxX, self.Model.mesh.elementRes[axis]+1)
            newValues = np.repeat(newValues[np.newaxis, :], self.Model.mesh.elementRes[1] + 1, axis)
            newValues = np.repeat(newValues[np.newaxis, :, :], self.Model.mesh.elementRes[2] + 1, axis)

        with self._mesh2nd.deform_mesh():
            values = newValues.flatten()
            self._mesh2nd.data[:, axis] = values[self._mesh2nd.data_nodegId.ravel()]

        uw.barrier()

        with self.Model.mesh.deform_mesh():
            self.Model.mesh.data[:, axis] = self._mesh2nd.data[:, axis]

        self.Model.velocityField.data[...] = np.copy(self.Model.velocityField.evaluate(self.Model.mesh))
        self.Model.pressureField.data[...] = np.copy(self.Model.pressureField.evaluate(self.Model.mesh.subMesh))

        if self.Model._right_wall.data.size > 0:
            self.Model.velocityField.data[self.Model._right_wall.data, axis] = vxRight

        if self.Model._left_wall.data.size > 0:
            self.Model.velocityField.data[self.Model._left_wall.data, axis]  = vxLeft
コード例 #5
0
ファイル: marker3D.py プロジェクト: squireg/underworld2
    def advection(self, dt):
        """
        Update marker swarm particles as material points and rebuild data structures
        """
        self._swarm_advector.integrate( dt, update_owners=True)
        self.swarm.shadow_particles_fetch()

        self._update_kdtree()
        self._update_surface_normals()

        uw.barrier()

        return
コード例 #6
0
ファイル: marker2D.py プロジェクト: dansand/2dthermalslabs
    def advection(self, dt):
        """
        Update marker swarm particles as material points and rebuild data structures
        """
        self._swarm_advector.integrate( dt, update_owners=True)
        self.swarm.shadow_particles_fetch()

        self._update_kdtree()
        self._update_surface_normals()

        uw.barrier()

        return
コード例 #7
0
    def _advect_surface(self, dt):

        if self.top:
            # Extract top surface
            x = self.model.mesh.data[self.top.data][:, 0]
            y = self.model.mesh.data[self.top.data][:, 1]

            # Extract velocities from top
            vx = self.model.velocityField.data[self.top.data][:, 0]
            vy = self.model.velocityField.data[self.top.data][:, 1]

            # Advect top surface
            x2 = x + vx * nd(dt)
            y2 = y + vy * nd(dt)

            # Spline top surface
            f = interp1d(x2, y2, kind='cubic', fill_value='extrapolate')

            self.TField.data[self.top.data, 0] = f(x)
        uw.barrier()
        self.TField.syncronise()
コード例 #8
0
ファイル: geodynamics.py プロジェクト: squireg/underworld2
    def run_for(self, endTime=None, checkpoint=None):
        self.time = 0.
        units = endTime.units
        endTime = self.time + nd(endTime)
        step = 0

        next_checkpoint = None
        if checkpoint:
            next_checkpoint = self.time + nd(checkpoint)

        while self.time < endTime:
            self.solve()

            if self.time == next_checkpoint:
                self.checkpointID += 1
                self.checkpoint()
                next_checkpoint += nd(checkpoint)

            # Whats the longest we can run before reaching the end of the model
            # or a checkpoint?
            # Need to generalize that
            dt = self.swarm_advector.get_max_dt()

            if self.temperature:
                dt = min(dt, self.advdiffSystem.get_max_dt())

            if checkpoint:
                dt = min(dt, next_checkpoint - self.time)

            self._dt = min(dt, endTime - self.time)
            uw.barrier()

            self.update()

            step += 1
            if checkpoint or step % 1 == 0:
                print "Time: ", str(sca.Dimensionalize(self.time, units))
コード例 #9
0
    def _get_minmax_coordinates_mesh(self, axis=0):
        """ Return the minimum and maximum coordinates along axis

        parameter:
        ----------
            axis:
                axis

        returns:
        -------
            tuple: minV, maxV

        """
        maxVal = np.zeros((1))
        minVal = np.zeros((1))
        maxVal[0] = self.Model.mesh.data[:, axis].max()
        minVal[0] = self.Model.mesh.data[:, axis].min()

        uw.barrier()
        comm.Allreduce(MPI.IN_PLACE, maxVal, op=MPI.MAX)
        comm.Allreduce(MPI.IN_PLACE, minVal, op=MPI.MIN)
        uw.barrier()

        return minVal, maxVal
コード例 #10
0
ファイル: _glucifer.py プロジェクト: zhang2018git/underworld2
    def _generate(self, figname, objects, props):
        #First merge object list with active
        starttime = MPI.Wtime()
        for obj in objects:
            #Add nested colourbar objects
            if obj._colourBar:
                objects.append(obj._colourBar)
                obj._colourBar.parent = obj  #Save parent ref

            #Set default parent flag
            obj.parent = None

            #Add to stored object list if not present
            if obj not in self._objects:
                self._objects.append(obj)

        #Set default names on objects where omitted by user
        #Needs to be updated every time as indices may have changed
        for o in range(len(self._objects)):
            #Default name + idx if no user set name
            obj = self._objects[o]
            if not "name" in obj.properties:
                if obj.properties.get("colourbar"):
                    obj.properties["name"] = 'ColourBar_' + str(o)
                else:
                    obj.properties["name"] = obj._dr.type[3:] + '_' + str(o)

        #Set the write step
        self._db.timeStep = self.step

        #Delete all drawing objects in register
        for ii in range(self._db.drawingObjects.objects.count, 0, -1):
            libUnderworld.StGermain._Stg_ObjectList_RemoveByIndex(
                self._db.drawingObjects.objects, ii - 1,
                libUnderworld.StGermain.KEEP)

        #Add drawing objects to register and output any custom data on them
        for obj in self._objects:
            #Hide objects not in this figure (also check parent for colour bars)
            obj.properties["visible"] = bool(
                obj in objects or obj.parent and obj.parent in objects)

            #Ensure properties updated before object written to db
            _libUnderworld.gLucifer.lucDrawingObject_SetProperties(
                obj._dr, obj._getProperties())
            if obj.colourMap:
                _libUnderworld.gLucifer.lucColourMap_SetProperties(
                    obj.colourMap._cm, obj.colourMap._getProperties())

            #Add the object to the drawing object register for the database
            libUnderworld.StGermain.Stg_ObjectList_Append(
                self._db.drawingObjects.objects, obj._cself)

        # go ahead and fill db
        libUnderworld.gLucifer._lucDatabase_Execute(self._db, None)

        #Write visualisation state as json data
        libUnderworld.gLucifer.lucDatabase_WriteState(
            self._db, figname, self._get_state(self._objects, props))

        #Output any custom geometry on objects
        if lavavu and uw.rank() == 0 and any(x.geomType is not None
                                             for x in self._objects):
            lv = self.lvget()  #Open the viewer
            for obj in self._objects:
                #Create/Transform geometry by object
                obj.render(lv)

        #Parallel custom render output
        if lavavu and any(
                hasattr(x, "parallel_render") for x in self._objects):
            #In case no external file has been written we need to create a temporary
            #database on root so the other procs can load it
            #Wait for temporary db to be written if not already using an external store
            comm = MPI.COMM_WORLD
            rank = uw.rank()
            self.filename = comm.bcast(self.filename, root=0)
            #print uw.rank(),self.filename
            #Open the viewer with db filename
            lv = self.lvget(self.filename)
            #Loop through objects and run their parallel_render method if present
            for obj in self._objects:
                if hasattr(obj, "parallel_render"):
                    obj.parallel_render(lv, uw.rank())
            #Delete the viewer instance on non-root procs
            uw.barrier()
            if uw.rank() > 0:
                lv = None
                self.viewer = None
コード例 #11
0
ファイル: linkage.py プロジェクト: squireg/underworld2
    def run_for_years(self, years, sigma=0, verbose=False):
        """
        Run the model for a number of years. Possibility to smooth Underworld velocity
        field using a Gaussian filter.
        """
        if not self._model_started:
            self._startup()

        end_years = self.time_years + years

        write_checkpoint = True

        while self.time_years < end_years:

            if verbose and uw.rank() == 0:
                t0 = time.clock()

            # Get solution from Stokes solver.
            self.solver.solve()

            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Solver function took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

            # Checkpointing fields and swarm
            if write_checkpoint or self.time_years == 0.:
                self.checkpoint_function(self, self._checkpoint_number, self.time_years)
                self._checkpoint_number += 1
                self._next_checkpoint_years += self.checkpoint_interval
                if verbose and uw.rank() == 0:
                    tloop = time.clock() - t0
                    print '- Checkpointing function took %0.02f seconds' % (time.clock() - t0)
                    t0 = time.clock()
            write_checkpoint = False

            # What's the longest we can run before we have to write a
            # checkpoint or stop?
            max_years = self._next_checkpoint_years - self.time_years
            max_years = min(end_years - self.time_years, max_years)
            max_seconds = max_years * self.SECONDS_PER_YEAR

            # Ask the Underworld model to update
            dt_seconds = self.update_function(self, max_seconds)
            assert int(dt_seconds * 100.) <= int(max_seconds * 100.), "Maximum dt (seconds) for the update function was %s, but it ran for more than that (%s seconds)" % (max_seconds, dt_seconds)

            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Update function took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

            # Do we need to write a checkpoint later?
            # TODO: make sure floating point imperfections don't desync the seconds/years counters on both sides,
            # especially around writing checkpoints
            if dt_seconds == max_seconds:
                write_checkpoint = True

            dt_years = dt_seconds / self.SECONDS_PER_YEAR

            rg = self.badlands_model.recGrid

            if self.mesh.dim == 2:
                zVals = rg.regZ.mean(axis = 1)
                np_surface = np.column_stack((rg.regX, zVals)) * self.scaleDIM

            if self.mesh.dim == 3:
                np_surface = np.column_stack((rg.rectX, rg.rectY, rg.rectZ))*self.scaleDIM

            #tracer_velocity_mps = np_velocity_field.evaluate(np_surface) * self.scaleTIME / self.scaleDIM
            tracer_velocity_mps = get_UW_velocities(np_surface, self.velocity_field) * self.scaleTIME / self.scaleDIM

            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Evaluate velocity field function took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

            ### INTERFACE PART 1: UW->BL
            # Use the tracer vertical velocities to deform the Badlands TIN
            # convert from meters per second to meters displacement over the whole iteration
            tracer_disp = tracer_velocity_mps * self.SECONDS_PER_YEAR * dt_years
            self._inject_badlands_displacement(self.time_years, dt_years, tracer_disp, sigma)

            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Build displacement function took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

            # Run the Badlands model to the same time point
            self.badlands_model.run_to_time(self.time_years + dt_years)
            
            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Running badlands took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

            # Advance time
            self.time_years += dt_years

            ### INTERFACE PART 2: BL->UW
            # TODO: Improve the performance of this function
            self._update_material_types()
            
            uw.barrier()
            if verbose and uw.rank() == 0:
                tloop = time.clock() - t0
                print '- Update material type took %0.02f seconds' % (time.clock() - t0)
                t0 = time.clock()

        # Get solution from Stokes solver.
        self.solver.solve()

        # Checkpointing fields and swarm for last time step
        self.checkpoint_function(self, self._checkpoint_number, self.time_years)
        self._checkpoint_number += 1
        self._next_checkpoint_years += self.checkpoint_interval
コード例 #12
0
    if os.path.isfile(pressureBC_file):
        # If a file with the bottom pressure already exists, read from it.
        with open(pressureBC_file, 'r') as f:
            bottomPress = numpy.float64(f.readline().strip())
        print("Loaded bottom pressure BC from file: ", bottomPress)
    else:
        # If no existing pressure is around, write the pressure we calculated to the file
        with open(pressureBC_file, 'w') as f:
            f.write("{:.12f}".format(bottomPress))
        print("Saved bottom pressure BC to file")
else:
    bottomPress = None

# since only 1 CPU got the file, send it out to all CPUs
bottomPress = MPI.COMM_WORLD.bcast(bottomPress, root=0)
uw.barrier()  # wait for them to catchup

bottomPress = bottomPress * u.megapascal  # then make it a unit


Model.set_velocityBCs(
                      left  = [total_vel * -0.5, 0. * u.centimetre / u.year], 
                      right = [total_vel * 0.5,  0. * u.centimetre / u.year], 
                      top   = [None,             0. * u.centimetre / u.year],
                      )

Model.set_stressBCs(
        bottom = [0., bottomPress],
        )

def post_hook():
コード例 #13
0
ファイル: _glucifer.py プロジェクト: dansand/underworld2
    def _generate(self, figname, objects, props):
        #First merge object list with active
        starttime = MPI.Wtime()
        for obj in objects:
            #Add nested colourbar objects
            if obj._colourBar:
                objects.append(obj._colourBar)
                obj._colourBar.parent = obj #Save parent ref

            #Set default parent flag
            obj.parent = None

            #Add to stored object list if not present
            if obj not in self._objects:
                self._objects.append(obj)

        #Set default names on objects where omitted by user
        #Needs to be updated every time as indices may have changed
        for o in range(len(self._objects)):
            #Default name + idx if no user set name
            obj = self._objects[o]
            if not "name" in obj.properties:
                if obj.properties.get("colourbar"):
                   obj.properties["name"] = 'ColourBar_' + str(o)
                else:
                   obj.properties["name"] = obj._dr.type[3:] + '_' + str(o)

        #Set the write step
        self._db.timeStep = self.step

        #Delete all drawing objects in register
        for ii in range(self._db.drawingObjects.objects.count,0,-1):
            libUnderworld.StGermain._Stg_ObjectList_RemoveByIndex(self._db.drawingObjects.objects,ii-1, libUnderworld.StGermain.KEEP)

        #Add drawing objects to register and output any custom data on them
        for obj in self._objects:
            #Hide objects not in this figure (also check parent for colour bars)
            obj.properties["visible"] = bool(obj in objects or obj.parent and obj.parent in objects)

            #Ensure properties updated before object written to db
            _libUnderworld.gLucifer.lucDrawingObject_SetProperties(obj._dr, obj._getProperties());
            if obj.colourMap:
                _libUnderworld.gLucifer.lucColourMap_SetProperties(obj.colourMap._cm, obj.colourMap._getProperties());

            #Add the object to the drawing object register for the database
            libUnderworld.StGermain.Stg_ObjectList_Append(self._db.drawingObjects.objects,obj._cself)

        # go ahead and fill db
        libUnderworld.gLucifer._lucDatabase_Execute(self._db,None)

        #Write visualisation state as json data
        libUnderworld.gLucifer.lucDatabase_WriteState(self._db, figname, self._get_state(self._objects, props))

        #Output any custom geometry on objects
        if lavavu and uw.rank() == 0 and any(x.geomType is not None for x in self._objects):
            lv = self.lvget() #Open the viewer
            for obj in self._objects:
                #Create/Transform geometry by object
                obj.render(lv)

        #Parallel custom render output
        if lavavu and any(hasattr(x, "parallel_render") for x in self._objects):
            #In case no external file has been written we need to create a temporary
            #database on root so the other procs can load it
            #Wait for temporary db to be written if not already using an external store
            comm = MPI.COMM_WORLD
            rank = uw.rank()
            self.filename = comm.bcast(self.filename, root=0)
            #print uw.rank(),self.filename
            #Open the viewer with db filename
            lv = self.lvget(self.filename)
            #Loop through objects and run their parallel_render method if present
            for obj in self._objects:
                if hasattr(obj, "parallel_render"):
                    obj.parallel_render(lv, uw.rank())
            #Delete the viewer instance on non-root procs
            uw.barrier()
            if uw.rank() > 0:
                lv = None
                self.viewer = None
import os

# In[2]:

#try:
#    workdir
#except NameError:
#    workdir = os.path.abspath(".")

#outputPath = os.path.join(workdir,"cratonSlab-201710-output/Test01")
outputPath = './'

if uw.rank() == 0:
    if not os.path.exists(outputPath):
        os.makedirs(outputPath)
uw.barrier()

# Set simulation parameters.

# In[3]:

# physical parameters
g = 9.8  # [m/(s.s)],   gravity
alpha = 3 * 1e-5  # [K^-1],      thermal expansivity coefficient
kappa = 1e-6  # [m.m/s],     thermal diffusivity
rho0 = 3300.  # [kg/m^3],    reference density
Temp_Min = 300.0  # [K],         surface temperature, 26.85C, 0C = 273.15K
Temp_Max = 1573.0  # [K],         mantle temperature
R = 8.3145  # [J/(K.mol)], gas constant

deltaTemp = Temp_Max - Temp_Min
コード例 #15
0
    def save(self, outputDir, checkpointID, time):
        """ Save to h5 and create an xdmf file for each tracked field """

        # Save the swarm
        swarm_fname = self.name + '-%s.h5' % checkpointID
        swarm_fpath = os.path.join(outputDir, swarm_fname)

        sH = self.swarm.save(swarm_fpath, units=u.kilometers, time=time)

        if uw.rank() == 0:
            filename = self.name + '-%s.xdmf' % checkpointID
            filename = os.path.join(outputDir, filename)

            # First write the XDMF header
            string = uw.utils._xdmfheader()
            string += uw.utils._swarmspacetimeschema(sH, swarm_fname, time)

        uw.barrier()

        # Save global index
        file_prefix = os.path.join(
            outputDir, self.name + '_global_index-%s' % checkpointID)
        handle = self.global_index.save('%s.h5' % file_prefix)

        if uw.rank() == 0:
            string += _swarmvarschema(handle, "global_index")
        uw.barrier()

        # Save each tracked field
        for field in self.tracked_field:

            file_prefix = os.path.join(
                outputDir,
                self.name + "_" + field["name"] + '-%s' % checkpointID)

            obj = getattr(self, field["name"])
            if not field["timeIntegration"]:
                obj.data[...] = field["value"].evaluate(self.swarm)
            handle = obj.save('%s.h5' % file_prefix, units=field["units"])

            if uw.rank() == 0:
                # Add attribute to xdmf file
                string += _swarmvarschema(handle, field["name"])

        uw.barrier()

        # get swarm parameters - serially read from hdf5 file to get size

        if uw.rank() == 0:
            with h5py.File(name=swarm_fpath, mode="r") as h5f:
                dset = h5f.get('data')
                if dset is None:
                    raise RuntimeError(
                        "Can't find 'data' in file '{}'.\n".format(
                            swarm_fname))
                globalCount = len(dset)
                dim = self.swarm.mesh.dim

            # Write the footer to the xmf
            string += uw.utils._xdmffooter()

            # Write the string to file - only proc 0
            xdmfFH = open(filename, "w")
            xdmfFH.write(string)
            xdmfFH.close()
        uw.barrier()
コード例 #16
0
ファイル: CornerFlow.py プロジェクト: dansand/cornerFlow
outputFile = 'results_model' + Model + '_' + str(ModNum) + '.dat'

if uw.rank() == 0:
    # make directories if they don't exist
    if not os.path.isdir(outputPath):
        os.makedirs(outputPath)
    if not os.path.isdir(imagePath):
        os.makedirs(imagePath)
    if not os.path.isdir(dbPath):
        os.makedirs(dbPath)
    if not os.path.isdir(filePath):
        os.makedirs(filePath)
    if not os.path.isdir(xdmfPath):
        os.makedirs(xdmfPath)

uw.barrier(
)  #Barrier here so no procs run the check in the next cell too early

# ## Params

# In[6]:

dp = edict({})
#Main physical paramters
dp.depth = 300e3  #Depth
dp.refDensity = 3300.  #reference density
dp.refGravity = 9.8  #surface gravity
dp.viscosityScale = 1e20  #reference upper mantle visc.,
dp.refDiffusivity = 1e-6  #thermal diffusivity
dp.refExpansivity = 3e-5  #surface thermal expansivity
dp.gasConstant = 8.314  #gas constant
dp.specificHeat = 1250.  #Specific heat (Jkg-1K-1)
コード例 #17
0
ファイル: meshswarm2D.py プロジェクト: squireg/underworld2
    def update_triangulation(self):

        self.fixedSwarm.shadow_particles_fetch()
        self.lagrSwarm.shadow_particles_fetch()

        # Need to add boundary points for the interpolator / triangulation

        all_particle_coords = np.concatenate(
            (self.fixedSwarm.particleCoordinates.data,
             self.lagrSwarm.particleCoordinates.data,
             self.lagrSwarm.particleCoordinates.data_shadow,
             self.fixedSwarm.particleCoordinates.data_shadow))

        pts = all_particle_coords.shape[0]
        data = np.zeros(pts)

        self.moving_data_start = self.fixedSwarm.particleLocalCount
        self.shadow_data_start = self.fixedSwarm.particleLocalCount + self.lagrSwarm.particleLocalCount

        # The linear interpolator can have its data values reloaded and still works
        # correctly, this behaviour is leveraged by the mesh swarm variable
        # at the moment, so we keep the linear one even if a different interpolator is used
        # in this swarm.
        self._linear_interpolator = LinearNDInterpolator(
            all_particle_coords, data)
        self.interpolator = self._linear_interpolator
        # self.interpolator = CloughTocher2DInterpolator(all_particle_coords, data)

        self.triangulation = self.interpolator.tri
        self.triangulation_edge_lengths()

        # for var in self.variables:
        #     var.update_data()

        # Things to help compute gradients (could be optional !)

        tri = self.triangulation

        # Triangle encircling vectors

        self.vA = tri.points[tri.simplices[:,
                                           1]] - tri.points[tri.simplices[:,
                                                                          0]]
        self.vB = tri.points[tri.simplices[:,
                                           2]] - tri.points[tri.simplices[:,
                                                                          1]]
        self.vC = tri.points[tri.simplices[:,
                                           0]] - tri.points[tri.simplices[:,
                                                                          2]]

        self.tri_area = 0.5 * (self.vA[:, 0] * self.vB[:, 1] -
                               self.vA[:, 1] * self.vB[:, 0])

        w = np.zeros(tri.npoints)

        for triangle in tri.simplices:
            w[triangle[0]] += 1.0
            w[triangle[1]] += 1.0
            w[triangle[2]] += 1.0

        self.simplex2node_weight = w

        uw.barrier()

        return