def pflatten_node_list_list(nls,filename,do_header=True,silent=False):
    """Flatten a list of node lists to a rectangular ascii file.

    pflatten_node_list_list(nls,filename) writes meta data about the node lists
    in nls, which must be either a list or a tuple of valid node lists, to a 
    header of the file filename, and then calls pflatten_node_list(nl,filename)
    for each nl in nls.

    pflatten_node_list_list(...,do_header=False) omits the header.

    See also: pflatten_node_list
    """

    # Make sure we are not wasting our time.
    assert isinstance(nls,(list,tuple)), "argument 1 must be a list or tuple"
    assert isinstance(filename, str), "argument 2 must be a simple string"
    assert isinstance(do_header, bool), "true or false"
    assert isinstance(silent, bool), "true or false"
    for nl in nls:
        assert isinstance(nl,(sph.Spheral.NodeSpace.FluidNodeList3d,
                                  sph.Spheral.SolidMaterial.SolidNodeList3d)
                         ), "argument 1 must contain node lists"

    # Determine if file should be compressed.
    if os.path.splitext(filename)[1] == '.gz':
        import gzip
        open = gzip.open
    else:
        import __builtin__
        open = __builtin__.open

    # Write the header.
    if do_header:
        nbGlobalNodes = 0
        for nl in nls:
            nbGlobalNodes += mpi.allreduce(nl.numInternalNodes, mpi.SUM)
        header = header_template.format(nbGlobalNodes)
        if mpi.rank == 0:
            fid = open(filename,'w')
            fid.write(header)
            fid.close()
            pass
        pass

    # Send contents of nls to be flattened.
    for k in range(len(nls)):
        pflatten_node_list(nls[k],filename,do_header=False,nl_id=k,silent=silent)
        pass

    # And Bob's our uncle.
    return
示例#2
0
文件: PyMPITest.py 项目: LLNL/pynamic
    def integrate(self, rectangles, function):
        # equivalent to mpi.WORLD.bcast(n,0) or rather a
        # C call to MPI_Bcast(MPI_COMM_WORLD,n,0,&status)
        n = mpi.bcast(rectangles)

        h = 1.0/n
        sum = 0.0
        for i in range(mpi.rank+1,n+1,mpi.procs):
            x = h * (i-0.5)
            sum = sum + function(x)

        myAnswer = h * sum
        answer = mpi.allreduce(myAnswer,mpi.SUM)
        return answer
示例#3
0
def computePi( size, nsamples):
    oldpi, pi, mypi,pisum = 0.0,0.0,0.0,0.0
    done = False
    
    inside = 0
    # Monte Carlo bit
    for i in xrange(nsamples):
        x = random.random()
        y = random.random()
        if ((x*x)+(y*y)<1):
            inside+=1
    # 
    sum_inside = mpi.allreduce(inside, 1, mpi.MPI_INT, mpi.MPI_SUM, mpi.MPI_COMM_WORLD) 
    # The "* 4" is needed because we're computing the number of points inside
    # a QUARTER unit circle.  So we're really computing (PI / 4).
    pi = ( sum_inside[0] / (nsamples*size*1.0) ) * 4
    return pi
示例#4
0
文件: PyMPITest.py 项目: LLNL/pynamic
    def parallelRunTest(self):
        v = 1+(mpi.rank)%2

        results = []
        for kind in ['MAX','MIN','SUM','PROD','LAND',
                     'LOR','LXOR',
                     'MINLOC','MAXLOC' ]:
            function = getattr(mpi,kind)
            try:
                r0 = mpi.allreduce(v,function)
            except RuntimeError,s:
                self.fail("All reduce") 

            try:
                r1 = mpi.reduce(v,function)
            except RuntimeError,s:
                self.fail("All reduce") 
示例#5
0
文件: PyMPITest.py 项目: LLNL/pynamic
    def parallelRunTest(self):
        #decide on targets
        #values to be reduced
        num1 = mpi.rank
        num2 = 0
        if mpi.rank == 1:
            num2 = 1
        num3 = 1
        string1 = "FoO"
        list1 = [ mpi.rank + 1 ]
        list2 = [ mpi.rank]
        longList = []
        for i in range(2048):
            longList += [ 1 - mpi.rank*mpi.rank, "oOf"]

        #do all reduces
        results = [0,0,0,    0,0,0  ]
        results[0] = mpi.allreduce( num2,   mpi.LOR)
        results[1] = mpi.allreduce( list1,   mpi.LAND)
        results[2] = mpi.allreduce( list2, mpi.SUM)
        results[3] = mpi.allreduce( string1,  mpi.SUM)
        results[4] = mpi.allreduce( list1,  mpi.MAXLOC)
        results[5] = mpi.allreduce( longList,  mpi.MAXLOC)

        #correct answers
        correctAnswers = [ 0,0,[],"",0, () ]
        correctList = []
        correctAnswers[0] = 1
        correctAnswers[1] = 1
        correctAnswers[3] = "FoO"*mpi.procs
        correctAnswers[4] = (mpi.procs-1, [mpi.procs])
        for x in range(mpi.procs):
            correctAnswers[2] += [x]
        for x in range(2048):
            correctList += [1, "oOf"]
        correctAnswers[5] = (0, correctList)

        for x in range(6):
            if results[x] != correctAnswers[x]:
                failString = "All Reduce failed on test " + str(x)
                failString += "\nAllReduce gave result " + str(results[x])
                failString += " while correct answer is "
                failString += str(correctAnswers[x]) + "\n"
                self.fail(failString)
        return
def cooldown(stepsSoFar,timeNow,dt):
    nbGlobalNodes = mpi.allreduce(sum([nl.numInternalNodes for nl in nodeSet]),
                                  mpi.SUM)
    massScale = mPlanet/nbGlobalNodes
    timeScale = 0.1*gravTime
    dashpotParameter = cooldownPower*massScale/timeScale
    for nl in nodeSet:
        v = nl.velocity()
        m = nl.mass()
        u = nl.specificThermalEnergy()
        if cooldownMethod == 'dashpot':
            for k in range(nl.numInternalNodes):
                v[k] *= 1.0 - min(dashpotParameter*dt/m[k], 1)
                u[k] *= 0.0 #TODO: maybe improve this
                pass
            pass
        elif cooldownMethod == 'stomp':
            for k in range(nl.numInternalNodes):
                v[k] *= 1.0 - cooldownPower
                u[k] *= 0.0 #TODO maybe improve this
                pass
            pass
        pass
    pass
示例#7
0
    def testIt(self):
        print "Testing TreeDistributedBoundary1d on domain %i of %i domains" % \
              (domainID, nDomains)

        # Set the ghost nodes for each domain distributed NodeList.
        self.domainbc.setAllGhostNodes(self.dataBase)
        self.domainbc.finalizeGhostBoundary()
        for nodes in self.dataBase.nodeLists():
            nodes.neighbor().updateNodes()

        # Exchange the global node ID fields.
        self.domainbc.applyGhostBoundary(self.globalIDField1)
        self.domainbc.applyGhostBoundary(self.globalIDField2)
        self.domainbc.applyGhostBoundary(self.globalIDField3)
        self.domainbc.finalizeGhostBoundary()

        # Iterate over each domain.
        for testProc in xrange(mpi.procs):

            # Test each NodeList.
            for (nodes, globalIDField) in ((self.nodes1, self.globalIDField1),
                                           (self.nodes2, self.globalIDField2),
                                           (self.nodes3, self.globalIDField3)):

                # Tell everyone how many nodes we'll be testing, and iterate
                # over them
                n = mpi.bcast(nodes.numInternalNodes, testProc)
                for i in xrange(n):

                    # Broadcast the position and H from the testing processor.
                    rilocal = Vector1d()
                    Hilocal = SymTensor1d()
                    if mpi.rank == testProc:
                        rilocal = nodes.positions()[i]
                        Hilocal = nodes.Hfield()[i]
                    ri = mpi.bcast(rilocal, testProc)
                    Hi = mpi.bcast(Hilocal, testProc)

                    # Get the global answer set for this node.
                    answer = mpi.allreduce([
                        self.globalIDField1[j] for j in findNeighborNodes(
                            ri, Hi, self.kernelExtent, self.nodes1)
                    ] + [
                        self.globalIDField2[j] for j in findNeighborNodes(
                            ri, Hi, self.kernelExtent, self.nodes2)
                    ] + [
                        self.globalIDField3[j] for j in findNeighborNodes(
                            ri, Hi, self.kernelExtent, self.nodes3)
                    ], mpi.SUM)

                    # Have the testing processor build it's own version.
                    if mpi.rank == testProc:
                        masterLists = vector_of_vector_of_int()
                        coarseNeighbors = vector_of_vector_of_int()
                        refineNeighbors = vector_of_vector_of_int()
                        self.dataBase.setMasterNodeLists(
                            ri, Hi, masterLists, coarseNeighbors)
                        self.dataBase.setRefineNodeLists(
                            ri, Hi, coarseNeighbors, refineNeighbors)
                        assert len(refineNeighbors) == 3
                        refine = []
                        for k, globalIDs in enumerate([
                                self.globalIDField1, self.globalIDField2,
                                self.globalIDField3
                        ]):
                            refine.extend(
                                [globalIDs[j] for j in refineNeighbors[k]])

                        # Check the answer.
                        test = checkNeighbors(refine, answer)
                        if not test:
                            sys.stderr.write("FAILED for node %i\n" % i)
                            refine.sort()
                            answer.sort()
                            sys.stderr.write(" refine: %s\n" % str(refine))
                            sys.stderr.write(" answer: %s\n" % str(answer))
                            sys.stderr.write(
                                "  extra: %s\n" %
                                str([x for x in refine if x not in answer]))
                            sys.stderr.write(
                                "missing: %s\n" %
                                str([x for x in answer if x not in refine]))
                        else:
                            sys.stderr.write("PASSED for node %i\n" % i)
                        assert test
示例#8
0
#-------------------------------------------------------------------------------
# Compute the analytic answer.
#-------------------------------------------------------------------------------
import mpi
import NohAnalyticSolution
rlocal = [pos.x for pos in nodes1.positions().internalValues()]
r = mpi.reduce(rlocal, mpi.SUM)
h1 = 1.0/(nPerh*dx)
answer = NohAnalyticSolution.NohSolution(1,
                                         r = r,
                                         v0 = -1.0,
                                         h0 = 1.0/h1)

# Compute the simulated specific entropy.
rho = mpi.allreduce(nodes1.massDensity().internalValues(), mpi.SUM)
Pf = ScalarField("pressure", nodes1)
nodes1.pressure(Pf)
P = mpi.allreduce(Pf.internalValues(), mpi.SUM)
A = [Pi/rhoi**gamma for (Pi, rhoi) in zip(P, rho)]

# The analytic solution for the simulated entropy.
xprof = mpi.allreduce([x.x for x in nodes1.positions().internalValues()], mpi.SUM)
xans, vans, uans, rhoans, Pans, hans = answer.solution(control.time(), xprof)
Aans = [Pi/rhoi**gamma for (Pi, rhoi) in zip(Pans,  rhoans)]
L1 = 0.0
for i in xrange(len(rho)):
  L1 = L1 + abs(rho[i]-rhoans[i])
L1_tot = L1 / len(rho)
if mpi.rank == 0 and outputFile != "None":
 print "L1=",L1_tot,"\n"
示例#9
0
def fragmentProperties(nodeList, fragField, strain=None):

    startTime = time.time()

    # Decide what our dimensionality is.
    if isinstance(nodeList, Spheral.NodeList1d):
        Vector = Spheral.Vector1d
        SymTensor = Spheral.SymTensor1d
        ScalarField = Spheral.ScalarField1d
    elif isinstance(nodeList, Spheral.NodeList2d):
        Vector = Spheral.Vector2d
        SymTensor = Spheral.SymTensor2d
        ScalarField = Spheral.ScalarField2d
    elif isinstance(nodeList, Spheral.NodeList3d):
        Vector = Spheral.Vector3d
        SymTensor = Spheral.SymTensor3d
        ScalarField = Spheral.ScalarField3d
    else:
        raise "fragmentProperties ERROR: What the heck is %s!  I expected a NodeList." % str(
            nodeList)

    # Are we compiling stats on the strain?
    usingStrain = not (strain is None)

    # Determine how many fragments there are.
    numFragments = mpi.allreduce(max(list(fragField.internalValues()) + [0]),
                                 mpi.MAX) + 1
    assert numFragments >= 1

    # Prepare the result.
    result = {}
    for i in xrange(numFragments):
        result[i] = {
            "mass": 0.0,
            "num nodes": 0,
            "position": Vector(),
            "velocity": Vector(),
            "volume": 0.0,
            "mass density": 0.0,
            "thermal energy": 0.0,
            "pressure": 0.0,
            "shape tensor": SymTensor(),
            "shape eigen": Vector(),
        }
        if usingStrain:
            result[i]["strain (min)"] = 0.0
            result[i]["strain (max)"] = 0.0
            result[i]["strain (vol)"] = 0.0
    assert len(result) == numFragments

    # Grab the state fields.
    mass = nodeList.mass()
    position = nodeList.positions()
    velocity = nodeList.velocity()
    rho = nodeList.massDensity()
    u = nodeList.specificThermalEnergy()
    P = ScalarField("pressure", nodeList)
    nodeList.pressure(P)

    # Now iterate over the nodes and accumulate the local result.
    for i in xrange(nodeList.numInternalNodes):
        fragID = fragField[i]
        assert fragID < numFragments
        mi = mass[i]
        result[fragID]["mass"] += mi
        result[fragID]["num nodes"] += 1
        result[fragID]["position"] += position[i] * mi
        result[fragID]["velocity"] += velocity[i] * mi
        result[fragID]["volume"] += mi / rho[i]
        result[fragID]["mass density"] += mi * rho[i]
        result[fragID]["thermal energy"] += mi * u[i]
        result[fragID]["pressure"] += mi * P[i]
        if usingStrain:
            sev = strain[i].eigenValues()
            result[fragID]["strain (min)"] += mi * sev.minElement()
            result[fragID]["strain (max)"] += mi * sev.maxElement()
            result[fragID]["strain (vol)"] += mi * sev.sumElements()
    assert fuzzyEqual(sum([result[i]["mass"] for i in result]),
                      sum(nodeList.mass().internalValues()))

    # Reduce for the global fragment properties.
    assert mpi.allreduce(len(result), mpi.SUM) == numFragments * mpi.procs
    for fragID in xrange(numFragments):
        mfrag = mpi.allreduce(result[fragID]["mass"], mpi.SUM)
        assert mfrag > 0.0
        result[fragID]["mass"] = mfrag
        result[fragID]["num nodes"] = mpi.allreduce(
            result[fragID]["num nodes"], mpi.SUM)
        result[fragID]["position"] = mpi.allreduce(result[fragID]["position"],
                                                   mpi.SUM) / mfrag
        result[fragID]["velocity"] = mpi.allreduce(result[fragID]["velocity"],
                                                   mpi.SUM) / mfrag
        result[fragID]["volume"] = mpi.allreduce(result[fragID]["volume"],
                                                 mpi.SUM)
        result[fragID]["mass density"] = mpi.allreduce(
            result[fragID]["mass density"], mpi.SUM) / mfrag
        result[fragID]["thermal energy"] = mpi.allreduce(
            result[fragID]["thermal energy"], mpi.SUM) / mfrag
        result[fragID]["pressure"] = mpi.allreduce(result[fragID]["pressure"],
                                                   mpi.SUM) / mfrag
        if usingStrain:
            result[fragID]["strain (min)"] = mpi.allreduce(
                result[fragID]["strain (min)"], mpi.SUM) / mfrag
            result[fragID]["strain (max)"] = mpi.allreduce(
                result[fragID]["strain (max)"], mpi.SUM) / mfrag
            result[fragID]["strain (vol)"] = mpi.allreduce(
                result[fragID]["strain (vol)"], mpi.SUM) / mfrag

    # Now that we have the center of mass for each fragment, we can evaluate the shape tensors.
    for i in xrange(nodeList.numInternalNodes):
        fragID = fragField[i]
        assert fragID < numFragments
        mi = mass[i]
        dr = position[i] - result[fragID]["position"]
        result[fragID]["shape tensor"] += dr.selfdyad() * mi

    # Reduce the global shapes.
    for fragID in xrange(numFragments):
        mfrag = result[fragID]["mass"]
        assert mfrag > 0.0
        result[fragID]["shape tensor"] = mpi.allreduce(
            result[fragID]["shape tensor"], mpi.SUM) / mfrag
        result[fragID]["shape eigen"] = result[fragID][
            "shape tensor"].eigenValues()

    # That's it.
    assert fuzzyEqual(
        sum([result[i]["mass"] for i in result]),
        mpi.allreduce(sum(nodeList.mass().internalValues()), mpi.SUM))

    stopTime = time.time()
    print "fragmentProperties:  Required %g seconds." % (stopTime - startTime)

    return result
示例#10
0
def hadesDump0(integrator,
               nsample,
               xmin,
               xmax,
               W,
               isotopes,
               baseFileName,
               baseDirectory=".",
               dumpGhosts=False,
               materials="all"):

    # We currently only support 3-D.
    assert isinstance(integrator, Spheral.Integrator3d)
    assert len(nsample) == 3
    assert isinstance(xmin, Spheral.Vector3d)
    assert isinstance(xmax, Spheral.Vector3d)
    assert isinstance(W, Spheral.TableKernel3d)
    for x in isotopes:
        for xx in x:
            assert len(xx) == 2

    # Prepare to time how long this takes.
    t0 = time.clock()

    # Extract the data base.
    db = integrator.dataBase()

    # If requested, set ghost node info.
    if dumpGhosts and not integrator is None:
        state = Spheral.State3d(db, integrator.physicsPackages())
        derivs = Spheral.StateDerivatives3d(db, integrator.physicsPackages())
        integrator.setGhostNodes()
        integrator.applyGhostBoundaries(state, derivs)

    # Get the set of material names we're going to write.
    if materials == "all":
        materials = [n for n in db.fluidNodeLists()]
    assert len(materials) == len(isotopes)

    # Make sure the output directory exists.
    import mpi
    import os
    if mpi.rank == 0 and not os.path.exists(baseDirectory):
        try:
            os.makedirs(baseDirectory)
        except:
            raise "Cannot create output directory %s" % baseDirectory
    mpi.barrier()

    # Open a file for the output.
    currentTime = integrator.currentTime
    currentCycle = integrator.currentCycle
    filename = baseDirectory + "/" + baseFileName + "-time=%g-cycle=%i.hades" % (
        currentTime, currentCycle)

    if mpi.rank == 0:
        f = open(filename, "wb")

        # Write the header info.
        #f.write(hadesHeader)
        f.write(struct.pack("I", len(materials)))
        f.write(struct.pack("ddd", *tuple(xmin.elements())))
        f.write(struct.pack("ddd", *tuple(xmax.elements())))
        f.write(struct.pack("III", *nsample))
        for materialIsotopes in isotopes:
            f.write(struct.pack("I", len(materialIsotopes)))
            for iso in materialIsotopes:
                f.write(struct.pack("Id", *iso))

    # For each material, sample the mass density and write it out.
    ntot = nsample[0] * nsample[1] * nsample[2]
    for nodes in materials:
        r = Spheral.VectorFieldList3d()
        w = Spheral.ScalarFieldList3d()
        H = Spheral.SymTensorFieldList3d()
        rho = Spheral.ScalarFieldList3d()
        r.appendField(nodes.positions())
        w.appendField(nodes.weight())
        H.appendField(nodes.Hfield())
        rho.appendField(nodes.massDensity())
        fieldListSet = Spheral.FieldListSet3d()
        fieldListSet.ScalarFieldLists.append(rho)
        rhosamp = Spheral.sampleMultipleFields2LatticeMash(
            fieldListSet, r, w, H, W, xmin, xmax, nsample)[0][0][1]
        assert mpi.allreduce(len(rhosamp), mpi.SUM) == ntot
        icum = 0
        for sendProc in xrange(mpi.procs):
            valsproc = [(i, x) for (i, x) in zip(range(ntot), rhosamp)
                        if x > 0.0]
            vals = mpi.bcast(valsproc, sendProc)
            if mpi.rank == 0:
                f.write(struct.pack("I", len(vals)))
                for i, x in vals:
                    f.write(struct.pack("id", i + icum, x))
            icum += len(vals)

    if mpi.rank == 0:
        # Close the file and we're done.
        f.close()

    mpi.barrier()
    print "hadesDump finished: required %0.2f seconds" % (time.clock() - t0)

    return
示例#11
0
        result = [pos[i].x, vel[i].x, rho[i], eps[i], P[i]]
    else:
        result = []
    result = mpi.allreduce(result, mpi.SUM)
    return tuple(result)

# Al sample points.
histories = []
tracerNumber = 1
for nodes, samplePositions in ((nodesAl, (-0.0375, -0.0625, -0.9875)),
                               (nodesTa, ( 0.0375,  0.0625,  0.9875))):
    pos = nodes.positions()
    for x0 in samplePositions:
        thpt = [(abs(pos[i].x - x0), i) for i in xrange(nodes.numInternalNodes)] + [(1e100, -1)]
        thpt.sort()
        dxmin = mpi.allreduce(thpt[0][0], mpi.MIN)
        if thpt[0][0] == dxmin:
            i = thpt[0][1]
            indices = [i]
            sys.stderr.write("Tracer %i is node %i @ %s.\n" % (tracerNumber, i, pos[i]))
        else:
            indices = []
        histories.append(NodeHistory(nodes, indices, tp106tracersample, tracerOutputName % tracerNumber, 
                                     labels = ("pos", "vel", "rho", "eps", "P")))
        tracerNumber += 1

#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
                            statsStep = statsStep,
示例#12
0
    def sample(self, cycle, ttime, dt):

        # Import the geometry appropriate Spheral types.
        assert self.geometry in ("1d", "2d", "3d", "RZ")
        exec("from Spheral%s import *" % self.geometry)

        # Do we need to initialize anything?
        if self.initializefunc:
            self.initializefunc()

        # How many sample values are we going for?
        for nodeListi, nodeList in enumerate(self.db.fluidNodeLists()):
            if nodeList.numNodes > 0:
                nvals = len(self.samplefunc(nodeListi, 0))
        assert nvals > 0

        # Prepare empty slots in the history.
        self.cycleHistory.append(cycle)
        self.timeHistory.append(ttime)
        self.sampleHistory.append([0.0] * nvals)
        Wsum = 0.0

        # Grab position and H FieldLists.
        positions = self.db.globalPosition
        H = self.db.globalHfield

        # Prepare the Neighbor information for sampling at this pos, and walk the neighbors.
        self.db.setMasterNodeLists(self.position, SymTensor.zero)
        self.db.setRefineNodeLists(self.position, SymTensor.zero)
        for nodeListj, nodeList in enumerate(self.db.fluidNodeLists()):
            for j in nodeList.neighbor().coarseNeighborList:

                # Compute the weighting for this position.
                posj = positions(nodeListj, j)
                Hj = H(nodeListj, j)
                Wj = self.W.kernelValue(
                    (Hj * (posj - self.position)).magnitude(), 1.0)**2
                Wsum += Wj

                # Use the user supplied method to extract the field values for this (nodeList, index)
                fieldvals = self.samplefunc(nodeListj, j)
                assert len(fieldvals) == nvals

                # Increment the sampled values for this position.
                for i in xrange(nvals):
                    self.sampleHistory[-1][i] += Wj * fieldvals[i]

        # Normalize the measurements.
        Wsum = max(1.0e-10, mpi.allreduce(Wsum, mpi.SUM))
        for i in xrange(nvals):
            self.sampleHistory[-1][i] = mpi.allreduce(
                self.sampleHistory[-1][i], mpi.SUM) / Wsum

        # Update the history file.
        if mpi.rank == 0:
            assert self.file is not None
            samplestr = ""
            for x in self.sampleHistory[-1]:
                samplestr += str(x) + " "
            self.file.write("%i \t %g \t %s\n" % (cycle, ttime, samplestr))
            self.file.flush()

        return
示例#13
0
def hadesDump(integrator,
              nsample,
              xmin,
              xmax,
              W,
              baseFileName,
              baseDirectory=".",
              procDirBaseName="domains",
              mask=None,
              materials=None):

    # Currently suppport 2D and 3D.
    db = integrator.dataBase()
    if db.nDim == 2:
        import Spheral2d as sph
    elif db.nDim == 3:
        import Spheral3d as sph
    else:
        raise RuntimeError, "hadesDump ERROR: must be 2D or 3D"

    # Prepare to time how long this takes.
    t0 = time.clock()

    # Get the set of material names we're going to write.
    if materials is None:
        materials = list(db.fluidNodeLists())

    # HACK!  We are currently restricting to writing single material output!
    assert len(materials) == 1

    # Make sure the output directory exists.
    if mpi.rank == 0 and not os.path.exists(baseDirectory):
        try:
            os.makedirs(baseDirectory)
        except:
            raise RuntimeError, "Cannot create output directory %s" % baseDirectory
    mpi.barrier()

    # Sample the density.
    ntot = reduce(mul, nsample)
    for nodes in materials:
        print "hadesDump: sampling density for %s..." % nodes.name
        r = sph.VectorFieldList()
        H = sph.SymTensorFieldList()
        rho = sph.ScalarFieldList()
        r.appendField(nodes.positions())
        H.appendField(nodes.Hfield())
        rho.appendField(nodes.massDensity())

        mf = nodes.mass()
        rhof = nodes.massDensity()
        wf = sph.ScalarField("volume", nodes)
        for i in xrange(nodes.numNodes):
            wf[i] = mf[i] / max(1e-100, rhof[i])
        w = sph.ScalarFieldList()
        w.copyFields()
        w.appendField(wf)
        #w.appendField(sph.ScalarField("weight", nodes, 1.0))

        fieldListSet = sph.FieldListSet()
        fieldListSet.ScalarFieldLists.append(rho)
        localMask = sph.IntFieldList()
        if mask is None:
            localMask.copyFields()
            localMask.appendField(sph.IntField("mask", nodes, 1))
        else:
            localMask.appendField(mask.fieldForNodeList(nodes))

        scalar_samples = sph.vector_of_vector_of_double()
        vector_samples = sph.vector_of_vector_of_Vector()
        tensor_samples = sph.vector_of_vector_of_Tensor()
        symTensor_samples = sph.vector_of_vector_of_SymTensor()
        nsample_vec = sph.vector_of_int(db.nDim)
        for i in xrange(db.nDim):
            nsample_vec[i] = nsample[i]

        sph.sampleMultipleFields2Lattice(fieldListSet, r, w, H, localMask, W,
                                         xmin, xmax, nsample_vec,
                                         scalar_samples, vector_samples,
                                         tensor_samples, symTensor_samples)
        print "Generated %i scalar fields" % len(scalar_samples)

        # Rearrange the sampled data into rectangular blocks due to Silo's quad mesh limitations.
        rhosamp, xminblock, xmaxblock, nblock, jsplit = shuffleIntoBlocks(
            db.nDim, scalar_samples[0], xmin, xmax, nsample)
        if rhosamp:
            print "rho range: ", min(rhosamp), max(rhosamp)
        print "     xmin: ", xmin
        print "     xmax: ", xmax
        print "xminblock: ", xminblock
        print "xmaxblock: ", xmaxblock
        print "   nblock: ", nblock
        assert mpi.allreduce(len(rhosamp), mpi.SUM) == ntot

    # Write the master file.
    maxproc = writeMasterSiloFile(ndim=db.nDim,
                                  nblock=nblock,
                                  jsplit=jsplit,
                                  baseDirectory=baseDirectory,
                                  baseName=baseFileName,
                                  procDirBaseName=procDirBaseName,
                                  materials=materials,
                                  rhosamp=rhosamp,
                                  label="Spheral++ cartesian sampled output",
                                  time=integrator.currentTime,
                                  cycle=integrator.currentCycle)

    # Write the process files.
    writeDomainSiloFile(ndim=db.nDim,
                        jsplit=jsplit,
                        maxproc=maxproc,
                        baseDirectory=baseDirectory,
                        baseName=baseFileName,
                        procDirBaseName=procDirBaseName,
                        materials=materials,
                        rhosamp=rhosamp,
                        xminblock=xminblock,
                        xmaxblock=xmaxblock,
                        nblock=nblock,
                        label="Spheral++ cartesian sampled output",
                        time=integrator.currentTime,
                        cycle=integrator.currentCycle,
                        pretendRZ=db.isRZ)

    mpi.barrier()
    print "hadesDump finished: required %0.2f seconds" % (time.clock() - t0)
    return
示例#14
0
elif problem == "cylindrical":
    bcs = [
        ReflectingBoundary(Plane(Vector(z0, r0), Vector(1.0, 0.0))),
        ReflectingBoundary(Plane(Vector(z1, r0), Vector(-1.0, 0.0))),
        ReflectingBoundary(Plane(Vector(z0, r2), Vector(0.0, -1.0)))
    ]
    if r0 > 0.0:
        bcs.append(ReflectingBoundary(Plane(Vector(z0, r0), Vector(0.0, 1.0))))
else:
    assert problem == "spherical"
    boundNodes = vector_of_int()
    pos = nodes2.positions()
    for i in xrange(nodes2.numInternalNodes):
        if pos[i].magnitude() > rmax2:
            boundNodes.append(i)
    print "Selected %i boundary nodes" % mpi.allreduce(len(boundNodes),
                                                       mpi.SUM)
    denialPlane = Plane(Vector(-2.0 * rmax2, 0.0), Vector(
        1.0, 0.0))  # A fake denial plane since we're working in circles.
    bcs = [
        ReflectingBoundary(Plane(Vector(0.0, 0.0), Vector(1.0, 0.0))),
        ConstantBoundary(nodes2, boundNodes, denialPlane)
    ]

for bc in bcs:
    for p in packages:
        p.appendBoundary(bc)

#-------------------------------------------------------------------------------
# Construct an integrator.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
示例#15
0
    def __init__(self,
                 surface,
                 rho,
                 nx,
                 nNodePerh=2.01,
                 SPH=False,
                 rejecter=None):
        self.surface = surface
        self.rho0 = rho

        # Figure out bounds and numbers of nodes to scan the volume with.
        xmin = surface.xmin
        xmax = surface.xmax
        box = xmax - xmin
        assert box.minElement > 0.0
        dx = box.x / nx
        ny = max(1, int(box.y / dx + 0.5))
        nz = max(1, int(box.z / dx + 0.5))

        # Some local geometry.
        ntot0 = nx * ny * nz
        dy = box.y / ny
        dz = box.z / nz
        volume = box.x * box.y * box.z
        self.m0 = rho * volume / ntot0
        hx = 1.0 / (nNodePerh * dx)
        hy = 1.0 / (nNodePerh * dy)
        hz = 1.0 / (nNodePerh * dz)
        self.H0 = SymTensor(hx, 0.0, 0.0, 0.0, hy, 0.0, 0.0, 0.0, hz)

        # Build the intial positions.
        pos0 = fillFacetedVolume(surface, nx, mpi.rank, mpi.procs)

        # Something strange here...
        pos = pos0  # [p for p in pos0 if surface.contains(p)]
        nsurface = mpi.allreduce(len(pos), mpi.SUM)

        # Apply any rejecter.
        print "Applying rejection..."
        if rejecter:
            mask = [rejecter.accept(ri.x, ri.y, ri.z) for ri in pos]
        else:
            mask = [True] * len(pos)
        n0 = len(pos)
        self.x = [pos[i].x for i in xrange(n0) if mask[i]]
        self.y = [pos[i].y for i in xrange(n0) if mask[i]]
        self.z = [pos[i].z for i in xrange(n0) if mask[i]]
        n = len(self.x)

        # Pick a mass per point so we get exactly the correct total mass inside the surface
        # before any rejection.
        M0 = surface.volume * self.rho0
        self.m0 = M0 / nsurface
        self.m = [self.m0] * n

        # At this point we have a less than optimal domain decomposition, but this will
        # be redistributed later anyway so take it and run.
        self.rho = [self.rho0] * n
        self.H = [self.H0] * n
        NodeGeneratorBase.__init__(self, False, self.m)
        return
示例#16
0
    def __init__(self, nx, ny, rho, xmin, xmax, nNodePerh=2.01, SPH=False):
        assert nx > 0
        assert ny > 0
        assert len(xmin) == 2
        assert len(xmax) == 2
        assert xmax[0] >= xmin[0]
        assert xmax[1] >= xmin[1]
        assert nNodePerh > 0.0

        # Remember the input.
        self.nx = nx
        self.ny = ny
        self.xmin = xmin
        self.xmax = xmax

        # If the user provided a constant for rho, then use the constantRho
        # class to provide this value.
        if type(rho) == type(1.0):
            self.rho = ConstantRho(rho)
        else:
            self.rho = rho

        # Compute the number of domains in each direction.
        lx = xmax[0] - xmin[0]
        ly = xmax[1] - xmin[1]
        nxdomains = int(sqrt(lx / ly * mpi.procs) + 0.1)
        nydomains = int(mpi.procs / nxdomains + 0.1)
        assert nxdomains * nydomains == mpi.procs

        # The number of nodes per domain.
        nxperdomain = nx / nxdomains
        nxremainder = nx % nxdomains
        nyperdomain = ny / nydomains
        nyremainder = ny % nydomains
        assert nxremainder < nxdomains
        assert nyremainder < nydomains

        # Compute the value for H.
        dx = lx / nx
        dy = ly / ny
        hx = nNodePerh * dx
        hy = nNodePerh * dy
        assert hx > 0.0 and hy > 0.0
        H0 = SymTensor2d(1.0 / hx, 0.0, 0.0, 1.0 / hy)
        if SPH:
            hxy = sqrt(hx * hy)
            H0 = SymTensor2d.one / hxy

        # The mass per node.
        m0 = lx * ly * rho / (nx * ny)
        assert m0 > 0.0

        # Compute our domain indicies.
        ixdomain = mpi.rank % nxdomains
        iydomain = mpi.rank / nxdomains
        ixmin = nodeindex(ixdomain, nxperdomain, nxremainder)
        ixmax = nodeindex(ixdomain + 1, nxperdomain, nxremainder)
        iymin = nodeindex(iydomain, nyperdomain, nyremainder)
        iymax = nodeindex(iydomain + 1, nyperdomain, nyremainder)
        assert ixmin < ixmax
        assert ixmin >= 0 and ixmax <= nx
        assert iymin < iymax
        assert iymin >= 0 and iymax <= ny

        # Now fill in the node values for this domain.
        self.x = []
        self.y = []
        self.m = []
        self.H = []
        for iy in xrange(iymin, iymax):
            for ix in xrange(ixmin, ixmax):
                self.x.append(xmin[0] + (ix + 0.5) * dx)
                self.y.append(xmin[1] + (iy + 0.5) * dy)
                self.m.append(m0)
                self.H.append(H0)
        assert mpi.allreduce(len(self.x), mpi.SUM) == nx * ny

        # Initialize the base class.
        NodeGeneratorBase.__init__(self, False)

        return
    disp = Vector((rImpactor + rTarget)*cos(pi/180.0*angle_impact),
                  (rImpactor + rTarget)*sin(pi/180.0*angle_impact),
                  0.0)
    for i in xrange(impactorGenerator.localNumNodes()):
        impactorGenerator.x[i] += disp.x
        impactorGenerator.y[i] += disp.y
        impactorGenerator.z[i] += disp.z

    print "Starting node distribution..."
    distributeNodes3d((target, targetGenerator),
                      (impactor,  impactorGenerator))

    nGlobalNodes = 0
    for n in nodeSet:
        print "Generator info for %s" % n.name
        print "   Minimum number of nodes per domain : ", mpi.allreduce(n.numInternalNodes, mpi.MIN)
        print "   Maximum number of nodes per domain : ", mpi.allreduce(n.numInternalNodes, mpi.MAX)
        print "               Global number of nodes : ", mpi.allreduce(n.numInternalNodes, mpi.SUM)
        nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
    del n
    print "Total number of (internal) nodes in simulation: ", nGlobalNodes
    print "Ratio of impactor/target node mass : ", impactor.mass().max()/target.mass().max()
    
    # Intialize the impactor velocity.
    vel = impactor.velocity()
    for i in xrange(impactor.numInternalNodes):
        vel[i].x = -vImpact

# Construct a DataBase to hold our node lists.
db = DataBase()
for n in nodeSet:
示例#18
0
    def __init__(self, nx, ny, nz, rho, xmin, xmax, nNodePerh=2.01, SPH=False):
        assert nx > 0
        assert ny > 0
        assert nz > 0
        assert len(xmin) == 3
        assert len(xmax) == 3
        assert xmax[0] >= xmin[0]
        assert xmax[1] >= xmin[1]
        assert xmax[2] >= xmin[2]
        assert nNodePerh > 0.0

        # Remember the input.
        self.nx = nx
        self.ny = ny
        self.nz = nz
        self.xmin = xmin
        self.xmax = xmax

        # If the user provided a constant for rho, then use the constantRho
        # class to provide this value.
        if type(rho) == type(1.0):
            self.rho = ConstantRho(rho)
        else:
            self.rho = rho

        # Compute the number of domains in each direction.
        lx = xmax[0] - xmin[0]
        ly = xmax[1] - xmin[1]
        lz = xmax[2] - xmin[2]
        nxdomains = int((lx * lx / (ly * lz) * mpi.procs)**(1.0 / 3.0) + 0.1)
        nydomains = int(ly / lx * nxdomains + 0.1)
        nzdomains = int(lz / lx * nxdomains + 0.1)
        assert nxdomains * nydomains * nzdomains == mpi.procs

        # The number of nodes per domain.
        nxperdomain = nx / nxdomains
        nxremainder = nx % nxdomains
        nyperdomain = ny / nydomains
        nyremainder = ny % nydomains
        nzperdomain = nz / nzdomains
        nzremainder = nz % nzdomains
        assert nxremainder < nxdomains
        assert nyremainder < nydomains
        assert nzremainder < nzdomains

        # Compute the value for H.
        dx = lx / nx
        dy = ly / ny
        dz = lz / nz
        hx = nNodePerh * dx
        hy = nNodePerh * dy
        hz = nNodePerh * dz
        assert hx > 0.0 and hy > 0.0 and hz > 0.0
        H0 = SymTensor3d(1.0 / hx, 0.0, 0.0, 0.0, 1.0 / hy, 0.0, 0.0, 0.0,
                         1.0 / hz)
        if SPH:
            hxyz = (hx * hy * hz)**(1.0 / 3.0)
            H0 = SymTensor3d.one / hxyz

        # The mass per node.
        m0 = lx * ly * lz * rho / (nx * ny * nz)
        assert m0 > 0.0

        # Compute our domain indicies.
        ixdomain = mpi.rank % nxdomains
        iydomain = (mpi.rank / nxdomains) % nydomains
        izdomain = mpi.rank / (nxdomains * nydomains)
        ixmin = nodeindex(ixdomain, nxperdomain, nxremainder)
        ixmax = nodeindex(ixdomain + 1, nxperdomain, nxremainder)
        iymin = nodeindex(iydomain, nyperdomain, nyremainder)
        iymax = nodeindex(iydomain + 1, nyperdomain, nyremainder)
        izmin = nodeindex(izdomain, nzperdomain, nzremainder)
        izmax = nodeindex(izdomain + 1, nzperdomain, nzremainder)
        assert ixmin < ixmax
        assert ixmin >= 0 and ixmax <= nx
        assert iymin < iymax
        assert iymin >= 0 and iymax <= ny
        assert izmin < izmax
        assert izmin >= 0 and izmax <= nz

        # Now fill in the node values for this domain.
        self.x = []
        self.y = []
        self.z = []
        self.m = []
        self.H = []
        for iz in xrange(izmin, izmax):
            for iy in xrange(iymin, iymax):
                for ix in xrange(ixmin, ixmax):
                    self.x.append(xmin[0] + (ix + 0.5) * dx)
                    self.y.append(xmin[1] + (iy + 0.5) * dy)
                    self.z.append(xmin[2] + (iz + 0.5) * dz)
                    self.m.append(m0)
                    self.H.append(H0)
        assert mpi.allreduce(len(self.x), mpi.SUM) == nx * ny * nz

        # Initialize the base class.
        NodeGeneratorBase.__init__(self, False)

        return
    print "Generating node distribution."
    generator = GenerateNodeDistribution2d(nx,
                                           ny,
                                           rho0,
                                           seed,
                                           xmin = xmin1,
                                           xmax = xmax2,
                                           nNodePerh = nPerh)
    distributeNodes2d((nodes, generator))
    output('mpi.reduce(nodes.numInternalNodes, mpi.MIN)')
    output('mpi.reduce(nodes.numInternalNodes, mpi.MAX)')
    output('mpi.reduce(nodes.numInternalNodes, mpi.SUM)')

    m = nodes.mass()
    rho = nodes.massDensity()
    print "Area sum: ", mpi.allreduce(sum([m[i]/rho[i] for i in xrange(nodes.numInternalNodes)]), mpi.SUM)

    # Construct the flaws.
    localFlawsBA = weibullFlawDistributionBenzAsphaug(volume,
                                                      1.0,
                                                      randomSeed,
                                                      kWeibull,
                                                      mWeibull,
                                                      nodes,
                                                      1,
                                                      1)
    localFlawsO = weibullFlawDistributionOwen(randomSeed,
                                              kWeibull,
                                              mWeibull,
                                              nodes,
                                              1,     # numFlawsPerNode
    print "Generating the hydrostatic planet"

    genIron = GenerateIcosahedronMatchingProfile3d(nrPlanet,
                                                     rhoProfile,
                                                     rmin = 0.0,
                                                     rmax = rCore,
                                                     nNodePerh=nPerh,
                                                   rMaxForMassMatching=rPlanet)
    genGranite = GenerateIcosahedronMatchingProfile3d(nrPlanet,
                                                    rhoProfile,
                                                    rmin = rCore,
                                                    rmax = rPlanet,
                                                    nNodePerh=nPerh,
                                                      rMaxForMassMatching=rPlanet)

    msum = mpi.allreduce(sum(genIron.m + [0.0]), mpi.SUM)
    msum += mpi.allreduce(sum(genGranite.m + [0.0]), mpi.SUM)
    assert msum > 0.0
    print "Found planet mass = %g kg." % (msum*units.unitMassKg)



    
    print "Starting node distribution..."
    if mpi.procs > 1:
        from VoronoiDistributeNodes import distributeNodes3d
    else:
        from DistributeNodes import distributeNodes3d
    distributeNodes3d((nodesIron,genIron),(nodesGranite,genGranite))
    #distributor((nodes1,genPlanet))
    #distributor((nodes3,genCollider))
示例#21
0
output("db.numFluidNodeLists")

#-------------------------------------------------------------------------------
# Construct constant velocity boundary conditions to be applied to the rod ends.
#-------------------------------------------------------------------------------
x0Nodes = vector_of_int()
x1Nodes = vector_of_int()
[
    x0Nodes.append(i) for i in xrange(nodes.numInternalNodes)
    if nodes.positions()[i].x < -0.5 * length + 5 * dx
]
[
    x1Nodes.append(i) for i in xrange(nodes.numInternalNodes)
    if nodes.positions()[i].x > 0.5 * length - 5 * dx
]
print "Selected %i constant velocity nodes." % (mpi.allreduce(
    len(x0Nodes) + len(x1Nodes), mpi.SUM))

# Set the nodes we're going to control to one single velocity at each end.
v0 = mpi.allreduce(min([nodes.velocity()[i].x for i in x0Nodes] + [100.0]),
                   mpi.MIN)
v1 = mpi.allreduce(max([nodes.velocity()[i].x for i in x1Nodes] + [-100.0]),
                   mpi.MAX)
for i in x0Nodes:
    nodes.velocity()[i].x = v0
for i in x1Nodes:
    nodes.velocity()[i].x = v1

xbc0 = ConstantVelocityBoundary(nodes, x0Nodes)
xbc1 = ConstantVelocityBoundary(nodes, x1Nodes)

bcs = [xbc0, xbc1]
示例#22
0
                                 distributionType="lattice",
                                 xmin=(-R0, -R0, -R0),
                                 xmax=(R0, R0, R0),
                                 rmin=0.0,
                                 rmax=R0,
                                 nNodePerh=nPerh,
                                 SPH=not asph)
distributeNodes((nodes, gen))

# Force the mass to be exactly the desired total.
mass = nodes.mass()
M1 = mass.sumElements()
for i in xrange(nodes.numInternalNodes):
    mass[i] *= M0 / M1

print "Num internal nodes for ", nodes.name, " : ", mpi.allreduce(
    nodes.numInternalNodes, mpi.SUM)
print "Total mass: ", mass.sumElements()

# Set specific thermal energy.
eps0 = 0.05
eps = nodes.specificThermalEnergy()
for i in xrange(nodes.numInternalNodes):
    eps[i] = eps0

#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
示例#23
0
    def setUp(self):
        global itest
        eos = GammaLawGasMKS(5.0 / 3.0, 1.0)
        self.nodes = makeFluidNodeList("test nodes %i" % itest,
                                       eos,
                                       numInternal=nperdomain,
                                       nPerh=2.01,
                                       hmin=1.0e-5,
                                       hmax=0.3)
        itest += 1
        self.pos = self.nodes.positions()
        self.H = self.nodes.Hfield()

        # Figure out the domain bounding volumes.
        dxproc = (x1 - x0) / nxproc
        dyproc = (y1 - y0) / nxproc
        dzproc = (z1 - z0) / nxproc

        ixproc = rank % nxproc
        iyproc = (rank % nxyproc) / nxproc
        izproc = rank / nxyproc
        xminproc = Vector(x0 + ixproc * dxproc, y0 + iyproc * dyproc,
                          z0 + izproc * dzproc)
        xmaxproc = Vector(x0 + (ixproc + 1) * dxproc,
                          y0 + (iyproc + 1) * dyproc,
                          z0 + (izproc + 1) * dzproc)

        # Randomly seed the generators.  We choose from random cells in order
        # to keep nodes from getting too close together.
        xyznodes_all = []
        occupiedCells = set()
        for k in xrange(n):
            i = rangen.randint(0, ncell)
            while i in occupiedCells:
                i = rangen.randint(0, ncell)
            ix = i % nxcell
            iy = (i % nxycell) / nxcell
            iz = i / nxycell
            xyznodes_all.append(
                Vector((ix + 0.5) * dxcell, (iy + 0.5) * dycell,
                       (iz + 0.5) * dzcell))
            occupiedCells.add(i)
        assert len(occupiedCells) == n
        xyznodes_all = mpi.bcast(xyznodes_all)
        xyznodes = [
            v for v in xyznodes_all if testPointInBox(v, xminproc, xmaxproc)
        ]
        dxavg = (x1 - x0) / nx
        dyavg = (y1 - y0) / ny
        dzavg = (z1 - z0) / nz
        self.dxmin = dxavg
        assert mpi.allreduce(len(xyznodes), mpi.SUM) == n

        # Now we can set the node conditions.
        self.nodes.numInternalNodes = len(xyznodes)
        for i in xrange(len(xyznodes)):
            self.pos[i] = xyznodes[i]
            self.H[i] = SymTensor(1.0 / (2.0 * dxavg), 0.0, 0.0, 0.0,
                                  1.0 / (2.0 * dyavg), 0.0, 0.0, 0.0,
                                  1.0 / (2.0 * dzavg))
        self.nodes.neighbor().updateNodes()

        # Fix up the H's.
        #iterateThoseHs(self.nodes)
        return
示例#24
0
        zmin=zminCuAnvil,
        zmax=zmaxCuAnvil,
        nNodePerh=nPerh,
        SPH=(NodeListConstructor is SphNodeList))

    distributeNodes3d(
        (nodesSteel, generatorTube), (nodesPlug, generatorPlug),
        (nodesProj, generatorProj), (nodesSteelAnvil, generatorSteelAnvil),
        (nodesFoamAnvil, generatorFoamAnvil), (nodesCuAnvil, generatorCuAnvil))
    nGlobalNodes = 0
    for n in nodeSet:
        print "Generator info for %s" % n.name
        output("    mpi.allreduce(n.numInternalNodes, mpi.MIN)")
        output("    mpi.allreduce(n.numInternalNodes, mpi.MAX)")
        output("    mpi.allreduce(n.numInternalNodes, mpi.SUM)")
        nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
    del n
    print "Total number of (internal) nodes in simulation: ", nGlobalNodes

    # Bevel the inner opening surface of the target tube.
    numNodesBeveled = bevelTubeEntrance(nodesSteel, 3, tubeOpeningAngle,
                                        rtubeInner, tubeThickness, zBevelBegin)
    print "Beveled %i nodes in the tube opening." % mpi.allreduce(
        numNodesBeveled, mpi.SUM)

    # Adjust the diameter of the projectile inward a bit, so it will slide
    # into the tube properly.
    drProj = compressProjectile * nPerh * rproj / nrproj
    projMultiplier = (rproj - drProj) / rproj
    for i in xrange(nodesProj.numInternalNodes):
        nodesProj.positions()[i].x *= projMultiplier
示例#25
0
文件: parallel.py 项目: umansky/UEDGE
def parallelsum(a):
    if not lparallel: return a
    return mpi.allreduce(a, mpi.SUM)
示例#26
0
    def dump(self, simulationTime, cycle, format="ascii"):

        # Build the name of the directory we will be stuffing the viz file
        # into.
        outputdir = self.baseDirectory  # os.path.join(self.baseDirectory, self.baseFileName)

        # Make sure the output directory exists.
        if mpi.rank == 0:
            if not os.path.exists(outputdir):
                try:
                    os.makedirs(outputdir)
                except:
                    raise ValueError, "Cannot create output directory %s" % outputdir
        mpi.barrier()

        # Now build the output file name, including directory.  Make sure
        # the file does not already exist -- if it does we default to overwriting.
        filename = os.path.join(
            outputdir,
            self.baseFileName + "-time=%g-cycle=%i" % (simulationTime, cycle))
        ##         if os.path.exists(filename):
        ##             raise ValueError, "File %s already exists!  Aborting." % filename

        # Did the user provide a FieldList of cell geometries already?
        # start = TIME.clock()
        if self.cells:

            # Yep, so we build a disjoint set of cells as a polytope tessellation.
            mesh = eval("polytope.Tessellation%s()" % self.dimension)
            nDim = eval("Vector%s.nDimensions" % self.dimension)

            # Are we splitting into triangles/tets, or writing the native polygons/polyhera?
            if self.splitCells:
                index2zone = []
                for nodeListi in xrange(len(self.cells)):
                    n = self.cells[nodeListi].numInternalElements
                    for i in xrange(n):
                        celli = self.cells(nodeListi, i)
                        verts = celli.vertices()
                        noldnodes = len(mesh.nodes) / nDim
                        noldfaces = len(mesh.faces)
                        noldcells = len(mesh.cells)
                        for j in xrange(len(verts)):
                            for k in xrange(nDim):
                                mesh.nodes.append(verts[j][k])

                        if nDim == 2:
                            # Build the triangles
                            PCcelli = PolyClipper.Polygon()
                            PolyClipper.convertToPolygon(PCcelli, celli)
                            tris = PolyClipper.splitIntoTriangles(
                                PCcelli, 1e-10)
                            index2zone.append([])
                            mesh.faces.resize(noldfaces + 3 * len(tris))
                            mesh.cells.resize(noldcells + len(tris))
                            for k, tri in enumerate(tris):
                                mesh.faces[noldfaces + 3 * k +
                                           0].append(noldnodes + tri[0])
                                mesh.faces[noldfaces + 3 * k +
                                           0].append(noldnodes + tri[1])
                                mesh.faces[noldfaces + 3 * k +
                                           1].append(noldnodes + tri[1])
                                mesh.faces[noldfaces + 3 * k +
                                           1].append(noldnodes + tri[2])
                                mesh.faces[noldfaces + 3 * k +
                                           2].append(noldnodes + tri[2])
                                mesh.faces[noldfaces + 3 * k +
                                           2].append(noldnodes + tri[0])
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 3 * k)
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 3 * k + 1)
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 3 * k + 2)
                                index2zone[-1].append(noldcells + k)

                        else:
                            # Build the tetrahedra
                            assert nDim == 3
                            PCcelli = PolyClipper.Polyhedron()
                            PolyClipper.convertToPolyhedron(PCcelli, celli)
                            tets = PolyClipper.splitIntoTetrahedra(
                                PCcelli, 1e-10)
                            index2zone.append([])
                            mesh.faces.resize(noldfaces + 4 * len(tets))
                            mesh.cells.resize(noldcells + len(tets))
                            for k, tet in enumerate(tets):
                                mesh.faces[noldfaces + 4 * k +
                                           0].append(noldnodes + tet[0])
                                mesh.faces[noldfaces + 4 * k +
                                           0].append(noldnodes + tet[1])
                                mesh.faces[noldfaces + 4 * k +
                                           0].append(noldnodes + tet[2])

                                mesh.faces[noldfaces + 4 * k +
                                           1].append(noldnodes + tet[1])
                                mesh.faces[noldfaces + 4 * k +
                                           1].append(noldnodes + tet[3])
                                mesh.faces[noldfaces + 4 * k +
                                           1].append(noldnodes + tet[2])

                                mesh.faces[noldfaces + 4 * k +
                                           2].append(noldnodes + tet[3])
                                mesh.faces[noldfaces + 4 * k +
                                           2].append(noldnodes + tet[0])
                                mesh.faces[noldfaces + 4 * k +
                                           2].append(noldnodes + tet[2])

                                mesh.faces[noldfaces + 4 * k +
                                           3].append(noldnodes + tet[0])
                                mesh.faces[noldfaces + 4 * k +
                                           3].append(noldnodes + tet[3])
                                mesh.faces[noldfaces + 4 * k +
                                           3].append(noldnodes + tet[1])

                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 4 * k)
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 4 * k + 1)
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 4 * k + 2)
                                mesh.cells[noldcells + k].append(noldfaces +
                                                                 4 * k + 3)
                                index2zone[-1].append(noldcells + k)

            else:
                index2zone = None
                copy2polytope(self.cells, mesh)
                # for nodeListi in xrange(len(self.cells)):
                #     n = self.cells[nodeListi].numInternalElements
                #     noldcells = len(mesh.cells)
                #     mesh.cells.resize(noldcells + n)
                #     for i in xrange(n):
                #         celli = self.cells(nodeListi, i)
                #         verts = celli.vertices
                #         facets = celli.facets
                #         noldnodes = len(mesh.nodes)/nDim
                #         noldfaces = len(mesh.faces)
                #         mesh.faces.resize(noldfaces + len(facets))
                #         for j in xrange(len(verts)):
                #             for k in xrange(nDim):
                #                 mesh.nodes.append(verts[j][k])
                #         for j in xrange(len(facets)):
                #             mesh.cells[noldcells + i].append(noldfaces + j)
                #             ipoints = facets[j].ipoints
                #             for k in ipoints:
                #                 mesh.faces[noldfaces + j].append(noldnodes + k)

        else:

            # We need to do the full up polytope tessellation.
            # Build the set of generators from our points.
            gens = vector_of_double()
            nDim = eval("Vector%s.nDimensions" % self.dimension)
            xmin = vector_of_double([1e100] * nDim)
            xmax = vector_of_double([-1e100] * nDim)
            for nodes in self._nodeLists:
                pos = nodes.positions()
                for i in xrange(nodes.numInternalNodes):
                    for j in xrange(nDim):
                        gens.append(pos[i][j])
                        xmin[j] = min(xmin[j], pos[i][j])
                        xmax[j] = max(xmax[j], pos[i][j])

            # Check the boundaries for any additional points we want to use for the bounding box.
            for bound in self._boundaries:
                try:
                    pb = dynamicCastBoundaryToPlanarBoundary2d(bound)
                    for p in (pb.enterPlane.point, pb.exitPlane.point):
                        for j in xrange(nDim):
                            xmin[j] = min(xmin[j], p[j])
                            xmax[j] = max(xmax[j], p[j])
                except:
                    pass

            # Globally reduce and puff up a bit.
            for j in xrange(nDim):
                xmin[j] = mpi.allreduce(xmin[j], mpi.MIN)
                xmax[j] = mpi.allreduce(xmax[j], mpi.MAX)
                delta = 0.01 * (xmax[j] - xmin[j])
                xmin[j] -= delta
                xmax[j] += delta

            # Build the PLC.
            plc = polytope.PLC2d()
            plc.facets.resize(4)
            for i in xrange(4):
                plc.facets[i].resize(2)
                plc.facets[i][0] = i
                plc.facets[i][1] = (i + 1) % 4
            plccoords = vector_of_double(8)
            plccoords[0] = xmin[0]
            plccoords[1] = xmin[1]
            plccoords[2] = xmax[0]
            plccoords[3] = xmin[1]
            plccoords[4] = xmax[0]
            plccoords[5] = xmax[1]
            plccoords[6] = xmin[0]
            plccoords[7] = xmax[1]

            # Blago!
            # f = open("generators_%i_of_%i.txt" % (mpi.rank, mpi.procs), "w")
            # f.write("# generators x    y\n")
            # for i in xrange(len(gens)/2):
            #     f.write("%g    %g\n" % (gens[2*i], gens[2*i+1]))
            # f.write("# PLC coords    x     y\n")
            # for i in xrange(len(plccoords)/2):
            #     f.write("%g    %g\n" % (plccoords[2*i], plccoords[2*i+1]))
            # f.close()
            # Blago!

            # Build the tessellation.
            if self.dimension == "2d":
                mesh = polytope.Tessellation2d()
                if "TriangleTessellator2d" in dir(polytope):
                    serial_tessellator = polytope.TriangleTessellator2d()
                else:
                    assert "BoostTessellator2d" in dir(polytope)
                    serial_tessellator = polytope.BoostTessellator2d()
            else:
                assert self.dimension == "3d"
                raise RuntimeError, "Sorry: 3D tessellation silo dumps are not supported yet."
            if mpi.procs > 1:
                tessellator = eval(
                    "polytope.DistributedTessellator%s(serial_tessellator, False, True)"
                    % self.dimension)
            else:
                tessellator = serial_tessellator
            index2zone = tessellator.tessellateDegenerate(
                gens, plccoords, plc, 1.0e-8, mesh)

        # print "Took %g sec to generate cells" % (TIME.clock() - start)
        # start = TIME.clock()

        # Figure out how many of each type of field we're dumping.
        intFields = [
            x for x in self._fields
            if isinstance(x, eval("IntField%s" % self.dimension))
        ]
        #[x for x in self._fields if isinstance(x, eval("UnsignedField%s" % self.dimension))] +
        #[x for x in self._fields if isinstance(x, eval("ULLField%s" % self.dimension))])
        scalarFields = [
            x for x in self._fields
            if isinstance(x, eval("ScalarField%s" % self.dimension))
        ]
        vectorFields = [
            x for x in self._fields
            if isinstance(x, eval("VectorField%s" % self.dimension))
        ]
        tensorFields = [
            x for x in self._fields
            if isinstance(x, eval("TensorField%s" % self.dimension))
        ]
        symTensorFields = [
            x for x in self._fields
            if isinstance(x, eval("SymTensorField%s" % self.dimension))
        ]

        # For tensor fields we like to dump out some extra info.
        for f in (tensorFields + symTensorFields):
            n = f.nodeList()
            tr = eval("ScalarField%s('%s_trace', n)" %
                      (self.dimension, f.name))
            det = eval("ScalarField%s('%s_determinant', n)" %
                       (self.dimension, f.name))
            mineigen = eval("ScalarField%s('%s_eigen_min', n)" %
                            (self.dimension, f.name))
            maxeigen = eval("ScalarField%s('%s_eigen_max', n)" %
                            (self.dimension, f.name))
            fvals = f.internalValues()
            for i in xrange(n.numInternalNodes):
                tr[i] = fvals[i].Trace()
                det[i] = fvals[i].Determinant()
                eigen = fvals[i].eigenValues()
                mineigen[i] = eigen.minElement()
                maxeigen[i] = eigen.maxElement()
            scalarFields += [tr, det, mineigen, maxeigen]
        # print "Took %g sec to build output fields" % (TIME.clock() - start)
        # start = TIME.clock()

        # Write the output.
        timeslice = siloMeshDump(filename,
                                 mesh,
                                 index2zone=index2zone,
                                 nodeLists=self._nodeLists,
                                 time=simulationTime,
                                 cycle=cycle,
                                 intFields=intFields,
                                 scalarFields=scalarFields,
                                 vectorFields=vectorFields,
                                 tensorFields=tensorFields,
                                 symTensorFields=symTensorFields)

        # print "Took %g sec to calls siloMeshDump" % (TIME.clock() - start)
        # start = TIME.clock()

        # Write the master file listing all the time slices.
        if mpi.rank == 0:
            mastername = os.path.join(self.baseDirectory, self.masterFileName)
            mf = open(mastername, "a")
            mf.write("%s\n" % timeslice)
            mf.close()
        mpi.barrier()

        # That's it.
        del mesh
        while gc.collect():
            pass
        return
示例#27
0
def writeMasterSiloFile(ndim, nblock, jsplit, baseDirectory, baseName,
                        procDirBaseName, materials, rhosamp, label, time,
                        cycle):

    nullOpts = silo.DBoptlist()

    # Decide which domains have information.
    if len(rhosamp) > 0:
        myvote = mpi.rank + 1
    else:
        myvote = 0
    maxproc = mpi.allreduce(myvote, mpi.MAX)
    assert maxproc <= mpi.procs

    # Pattern for constructing per domain variables.
    domainNamePatterns = [
        os.path.join(procDirBaseName, "domain%i.silo:%%s" % i)
        for i in xrange(maxproc)
    ]
    domainVarNames = Spheral.vector_of_string()
    for iproc, p in enumerate(domainNamePatterns):
        domainVarNames.append(p % "/hblk0/den")
    assert len(domainVarNames) == maxproc

    # We need each domains nblock info.
    nblocks = [nblock]
    for sendproc in xrange(1, maxproc):
        if mpi.rank == sendproc:
            mpi.send(nblock, dest=0, tag=50)
        if mpi.rank == 0:
            nblocks.append(mpi.recv(source=sendproc, tag=50)[0])

    # Create the master file.
    if mpi.rank == 0:
        fileName = os.path.join(baseDirectory, baseName + ".silo")
        f = silo.DBCreate(fileName, SA._DB_CLOBBER, SA._DB_LOCAL, label,
                          SA._DB_HDF5)
        nullOpts = silo.DBoptlist()

        # Write the domain file names and types.
        domainNames = Spheral.vector_of_string()
        meshTypes = Spheral.vector_of_int(maxproc, SA._DB_QUADMESH)
        for p in domainNamePatterns:
            domainNames.append(p % "hblk0/hydro_mesh")
        optlist = silo.DBoptlist(1024)
        assert optlist.addOption(SA._DBOPT_CYCLE, cycle) == 0
        assert optlist.addOption(SA._DBOPT_DTIME, time) == 0
        assert silo.DBPutMultimesh(f, "hydro_mesh", domainNames, meshTypes,
                                   optlist) == 0

        # Write material names.
        material_names = Spheral.vector_of_string()
        matnames = Spheral.vector_of_string(1, "void")
        matnos = Spheral.vector_of_int(1, 0)
        for p in domainNamePatterns:
            material_names.append(p % "/hblk0/Materials")
        for i, name in enumerate([x.name for x in materials]):
            matnames.append(name)
            matnos.append(i + 1)
        assert len(material_names) == maxproc
        assert len(matnames) == len(materials) + 1
        assert len(matnos) == len(materials) + 1
        optlist = silo.DBoptlist(1024)
        assert optlist.addOption(SA._DBOPT_CYCLE, cycle) == 0
        assert optlist.addOption(SA._DBOPT_DTIME, time) == 0
        assert optlist.addOption(SA._DBOPT_MMESH_NAME, "hydro_mesh") == 0
        assert optlist.addOption(SA._DBOPT_MATNAMES, SA._DBOPT_NMATNOS,
                                 matnames) == 0
        assert optlist.addOption(SA._DBOPT_MATNOS, SA._DBOPT_NMATNOS,
                                 matnos) == 0
        assert silo.DBPutMultimat(f, "Materials", material_names, optlist) == 0

        # Write the variables descriptors.
        # We currently hardwire for the single density variable.
        types = Spheral.vector_of_int(maxproc, SA._DB_QUADVAR)
        assert len(domainVarNames) == maxproc
        optlistMV = silo.DBoptlist()
        assert optlistMV.addOption(SA._DBOPT_CYCLE, cycle) == 0
        assert optlistMV.addOption(SA._DBOPT_DTIME, time) == 0
        #assert optlistMV.addOption(SA._DBOPT_TENSOR_RANK, SA._DB_VARTYPE_SCALAR) == 0
        assert optlistMV.addOption(SA._DBOPT_BLOCKORIGIN, 0) == 0
        assert optlistMV.addOption(SA._DBOPT_MMESH_NAME, "hydro_mesh") == 0
        assert silo.DBPutMultivar(f, "den", domainVarNames, types,
                                  optlistMV) == 0

        # Write the dummy variable "akap_0" to tell Hades we're actually Hydra or something.
        assert silo.DBPutQuadvar1(f, "akap_0", "hydro_mesh",
                                  Spheral.vector_of_double(ndim * ndim, 0.0),
                                  Spheral.vector_of_double(), SA._DB_ZONECENT,
                                  Spheral.vector_of_int(ndim, ndim),
                                  nullOpts) == 0

        # Write domain and mesh size info.
        assert silo.DBMkDir(f, "Decomposition") == 0
        assert silo.DBWrite(f, "Decomposition/NumDomains", maxproc) == 0
        assert silo.DBWrite(f, "Decomposition/NumLocalDomains", maxproc) == 0
        assert silo.DBWrite(f, "Decomposition/NumBlocks", 1) == 0
        #assert silo.DBWrite(f, "Decomposition/LocalName", "hblk") == 0
        localDomains = Spheral.vector_of_int()
        domainFiles = Spheral.vector_of_vector_of_int(1)
        for i in xrange(maxproc):
            localDomains.append(i)
            domainFiles[0].append(i)
        assert silo.DBWrite(f, "Decomposition/LocalDomains", localDomains) == 0
        assert silo.DBWrite(f, "DomainFiles", domainFiles) == 0

        for iproc in xrange(maxproc):
            assert silo.DBMkDir(f, "Decomposition/gmap%i" % iproc) == 0
            stuff = Spheral.vector_of_int(12, 0)
            for jdim in xrange(ndim):
                stuff[6 + jdim] = nblocks[iproc][jdim]
            if iproc in (0, maxproc - 1):
                assert silo.DBWrite(
                    f, "Decomposition/gmap%i/NumNeighbors" % iproc, 1) == 0
            else:
                assert silo.DBWrite(
                    f, "Decomposition/gmap%i/NumNeighbors" % iproc, 2) == 0
            assert silo.DBWrite(f, "Decomposition/gmap%i/gmap" % iproc,
                                stuff) == 0

    # Close the file.
    if mpi.rank == 0:
        assert silo.DBClose(f) == 0
        del f

    return maxproc
def pflatten_node_list(nl,filename,do_header=True,nl_id=0,silent=False):
    """Flatten physical field values from a node list to a rectangular ascii file.

    pflatten_node_list(nl,filename) extracts field variables from all nodes of nl,
    which must be a valid node list, and writes them as a rectangular table into
    the text file filename. (A short header is also written, using the # comment
    character so the resulting file can be easily read with numpy.loadtext.) The
    file will be overwritten if it exists. If filename has the .gz extension it
    will be compressed using gzip.

    pflatten_node_list(...,do_header=False) omits the header and appends the 
    flattened nl to the end of the file if one exists.

    pflatten_node_list(...,nl_id=id) places the integer id in the first column
    of every node (row) in the node list. This can be used when appending multiple
    lists to the same file, providing a convenient way to distinguish nodes from
    different lists when the file is later read. The default id (for single node
    list files) is 0.

    The format of the output table is (one line per node):
      id eos_id x y z vx vy vz m rho p T U hmin hmax

    The p in pflatten is for 'parallel', a reminder that all nodes will be
    processed in their local rank, without ever being communicated or collected
    in a single process. Each mpi rank will wait its turn to access the output 
    file, so the writing is in fact serial, but avoids bandwidth and memory waste
    and is thus suitable for large node lists from high-res runs.

    See also: spickle_node_list
    """

    # Make sure we are not wasting our time.
    assert isinstance(nl,(sph.Spheral.NodeSpace.FluidNodeList3d,
                          sph.Spheral.SolidMaterial.SolidNodeList3d)
                      ), "argument 1 must be a node list"
    assert isinstance(filename, str), "argument 2 must be a simple string"
    assert isinstance(do_header, bool), "true or false"
    assert isinstance(silent, bool), "true or false"
    assert isinstance(nl_id, int), "int only idents"
    assert not isinstance(nl_id, bool), "int only idents"

    # Determine if file should be compressed.
    if os.path.splitext(filename)[1] == '.gz':
        import gzip
        open = gzip.open
    else:
        import __builtin__
        open = __builtin__.open

    # Write the header.
    if do_header:
        nbGlobalNodes = mpi.allreduce(nl.numInternalNodes, mpi.SUM)
        header = header_template.format(nbGlobalNodes)
        if mpi.rank == 0:
            fid = open(filename,'w')
            fid.write(header)
            fid.close()
            pass
        pass
     
    # Start collecting data.
    if not silent:
        sys.stdout.write('Flattening ' + nl.label() + ' ' + nl.name + '........')
    
    # Get values of field variables stored in internal nodes.
    xloc = nl.positions().internalValues()
    vloc = nl.velocity().internalValues()
    mloc = nl.mass().internalValues()
    rloc = nl.massDensity().internalValues()
    uloc = nl.specificThermalEnergy().internalValues()
    Hloc = nl.Hfield().internalValues()
    #(pressure and temperature are stored in the eos object.)
    eos = nl.equationOfState()
    ploc = sph.ScalarField('ploc',nl)
    Tloc = sph.ScalarField('loc',nl)
    rref = nl.massDensity()
    uref = nl.specificThermalEnergy()
    eos.setPressure(ploc,rref,uref)
    eos.setTemperature(Tloc,rref,uref)

    # Procs take turns writing internal node values to file.
    for proc in range(mpi.procs):
        if proc == mpi.rank:
            fid = open(filename,'a')
            for nk in range(nl.numInternalNodes):
                line  = "{:2d}  ".format(nl_id)
                line += "{:2d}  ".format(getattr(nl,'eos_id',-1))
                line += "{0.x:+12.5e}  {0.y:+12.5e}  {0.z:+12.5e}  ".format(xloc[nk])
                line += "{0.x:+12.5e}  {0.y:+12.5e}  {0.z:+12.5e}  ".format(vloc[nk])
                line += "{0:+12.5e}  ".format(mloc[nk])
                line += "{0:+12.5e}  ".format(rloc[nk])
                line += "{0:+12.5e}  ".format(ploc[nk])
                line += "{0:+12.5e}  ".format(Tloc[nk])
                line += "{0:+12.5e}  ".format(uloc[nk])
                line += "{0:+12.5e}  ".format(Hloc[nk].Inverse().eigenValues().minElement())
                line += "{0:+12.5e}  ".format(Hloc[nk].Inverse().eigenValues().maxElement())
                line += "\n"
                fid.write(line)
                pass
            fid.close()
            pass
        mpi.barrier()
        pass
     
    # And Bob's our uncle.
    if not silent:
        print "Done."
示例#29
0
def hadesDump1(integrator,
               nsample,
               xmin,
               xmax,
               W,
               isotopes,
               baseFileName,
               baseDirectory=".",
               mask=None,
               dumpGhosts=True,
               materials="all"):

    # We currently only support 3-D.
    assert isinstance(integrator, Spheral.Integrator3d)
    assert len(nsample) == 3
    assert isinstance(xmin, Spheral.Vector3d)
    assert isinstance(xmax, Spheral.Vector3d)
    assert isinstance(W, Spheral.TableKernel3d)
    for x in isotopes:
        for xx in x:
            assert len(xx) == 2

    # Prepare to time how long this takes.
    t0 = time.clock()

    # Extract the data base.
    db = integrator.dataBase()

    # If requested, set ghost node info.
    if dumpGhosts and not integrator is None:
        state = Spheral.State3d(db, integrator.physicsPackages())
        derivs = Spheral.StateDerivatives3d(db, integrator.physicsPackages())
        integrator.setGhostNodes()
        integrator.applyGhostBoundaries(state, derivs)

    # Get the set of material names we're going to write.
    if materials == "all":
        materials = [n for n in db.fluidNodeLists()]
    assert len(materials) == len(isotopes)

    # HACK!  We are currently restricting to writing single material output!
    assert len(materials) == 1

    # Make sure the output directory exists.
    import mpi
    import os
    if mpi.rank == 0 and not os.path.exists(baseDirectory):
        try:
            os.makedirs(baseDirectory)
        except:
            raise "Cannot create output directory %s" % baseDirectory
    mpi.barrier()

    # Write the density header file.
    print "hadesDump: writing density header..."
    if mpi.rank == 0:
        filename = os.path.join(baseDirectory, baseFileName + ".spr")
        f = open(filename, "w")
        f.write("3\n")
        for i in xrange(3):
            f.write("%i\n" % nsample[i])
            f.write("%f\n" % xmin(i))
            f.write("%f\n" % ((xmax(i) - xmin(i)) / nsample[i]))
        f.write("3\n")
        f.close()
    mpi.barrier()

    # Sample the density.
    ntot = nsample[0] * nsample[1] * nsample[2]
    for nodes in materials:
        print "hadesDump: sampling density for %s..." % nodes.name
        r = Spheral.VectorFieldList3d()
        H = Spheral.SymTensorFieldList3d()
        rho = Spheral.ScalarFieldList3d()
        r.appendField(nodes.positions())
        w.appendField(nodes.weight())
        H.appendField(nodes.Hfield())
        rho.appendField(nodes.massDensity())

        w = Spheral.ScalarFieldList3d()
        w.copyFields()
        w.appendField(Spheral.ScalarField3d("weight", nodes, 1.0))

        fieldListSet = Spheral.FieldListSet3d()
        fieldListSet.ScalarFieldLists.append(rho)
        localMask = Spheral.IntFieldList3d()
        if mask is None:
            localMask.copyFields()
            localMask.appendField(Spheral.IntField3d("mask", nodes, 1))
        else:
            localMask.appendField(mask.fieldForNodeList(nodes))

        scalar_samples = Spheral.vector_of_vector_of_double()
        vector_samples = Spheral.vector_of_vector_of_Vector3d()
        tensor_samples = Spheral.vector_of_vector_of_Tensor3d()
        symTensor_samples = Spheral.vector_of_vector_of_SymTensor3d()
        nsample_vec = Spheral.vector_of_int(3)
        for i in xrange(3):
            nsample_vec[i] = nsample[i]

        Spheral.sampleMultipleFields2Lattice3d(fieldListSet, r, w, H,
                                               localMask, W, xmin, xmax,
                                               nsample_vec, scalar_samples,
                                               vector_samples, tensor_samples,
                                               symTensor_samples)
        print "Generated %i scalar fields" % len(scalar_samples)
        rhosamp = scalar_fields[0]
        nlocal = len(rhosamp)
        assert mpi.allreduce(nlocal, mpi.SUM) == ntot

        print "hadesDump: writing density for %s..." % nodes.name
        filename = os.path.join(baseDirectory, baseFileName + ".sdt")
        for sendProc in xrange(mpi.procs):
            if mpi.rank == sendProc:
                f = open(filename, "ab")
                f.write(struct.pack(nlocal * "f", *tuple(rhosamp)))
                f.close()
            mpi.barrier()

    # Write the material arrays.
    print "hadesDump: writing material flags..."
    filename = os.path.join(baseDirectory, baseFileName + "_mat.sdt")
    for sendProc in xrange(mpi.procs):
        if mpi.rank == sendProc:
            f = open(filename, "ab")
            f.write(struct.pack(nlocal * "i", *(nlocal * (1, ))))
            f.close()
        mpi.barrier()

    # Write the isotopes.
    print "hadesDump: writing isotopics..."
    if mpi.rank == 0:
        filename = os.path.join(baseDirectory, "isos.mat")
        f = open(filename, "w")
        i = 0
        for isofracs in isotopes:
            f.write("isofrac(%i) =" % i)
            for (iso, frac) in isofracs:
                f.write(" %i %f" % (iso, frac))
            f.write("\n")
            i += 1
        f.close()
    mpi.barrier()

    mpi.barrier()
    print "hadesDump finished: required %0.2f seconds" % (time.clock() - t0)
    return
示例#30
0
            raise ValueError, "The restarted state does not match!"
        else:
            print "Restart check PASSED."

else:
    control.advance(goalTime, maxSteps)
    control.updateViz(control.totalSteps, integrator.currentTime, 0.0)
    control.dropRestartFile()

#-------------------------------------------------------------------------------
# Plot the results.
#-------------------------------------------------------------------------------
import NohAnalyticSolution
answer = NohAnalyticSolution.NohSolution(3, h0=nPerh * rmax / nx)

r = mpi.allreduce([x.magnitude() for x in nodes1.positions().internalValues()],
                  mpi.SUM)
rho = mpi.allreduce(list(nodes1.massDensity().internalValues()), mpi.SUM)
rans, vans, epsans, rhoans, Pans, hans = answer.solution(control.time(), r)
if mpi.rank == 0:
    L1 = 0.0
    for i in xrange(len(rho)):
        L1 = L1 + abs(rho[i] - rhoans[i])
    L1_tot = L1 / len(rho)
    print "L1=", L1_tot, "\n"
    with open("Converge.txt", "a") as myfile:
        myfile.write("%s %s %s %s %s\n" % (nx, ny, nz, nx + ny + nz, L1_tot))
if graphics:

    # Plot the node positions.
    from SpheralGnuPlotUtilities import *
示例#31
0
    def __init__(self,
                 filename,
                 materialName,
                 nNodePerh=2.01,
                 SPH=False,
                 precision=20,
                 Hscalefactor=1.0,
                 extraFields=[],
                 initializeBase=True,
                 readFileToMemory=False,
                 refineNodes=0):

        self.filename = filename
        self.nPerh = nNodePerh
        self.SPH = SPH
        self.extraFields = extraFields

        self.precision = "%" + "%i.%ie" % (precision + 3, precision)

        # For now we restrict to reading from a single (serial) file.
        allfiles = mpi.allreduce([filename], mpi.SUM)
        assert min([x == filename for x in allfiles])
        self.serialfile = True

        # Open the file.
        if mpi.rank == 0:
            f = gzip.open(filename, "r")
            if readFileToMemory:
                self.f = f.readlines()
                f.close()
            else:
                self.f = f
        else:
            self.f = None

        # Read the positions.
        vals = readField2String(materialName, "positions", self.f)
        n = len(vals)
        self.x = []
        self.y = []
        for val in vals:
            x, y = tuple([float(x) for x in val.split()])
            self.x.append(x)
            self.y.append(y)
        assert len(self.x) == n
        assert len(self.y) == n

        # Read the masses.
        vals = readField2String(materialName, "mass", self.f)
        assert len(vals) == n
        self.m = [float(x) for x in vals]
        assert len(self.m) == n

        # Read the mass densities.
        vals = readField2String(materialName, "density", self.f)
        assert len(vals) == n
        self.rho = [float(x) for x in vals]
        assert len(self.rho) == n

        # Read the velocities.
        vals = readField2String(materialName, "velocity", self.f)
        assert len(vals) == n
        self.vx = []
        self.vy = []
        for val in vals:
            vx, vy = tuple([float(x) for x in val.split()])
            self.vx.append(vx)
            self.vy.append(vy)
        assert len(self.vx) == n
        assert len(self.vy) == n

        # Read the specific thermal energies.
        vals = readField2String(materialName, "specificThermalEnergy", self.f)
        assert len(vals) == n
        self.eps = [float(x) for x in vals]
        assert len(self.eps) == n

        # Read the H tensors.
        vals = readField2String(materialName, "Hinverse2", self.f)
        assert len(vals) == n
        self.H = []
        for val in vals:
            Hi2 = SymTensor2d(*tuple([float(x)
                                      for x in val.split()])) * Hscalefactor
            H = Hi2.Inverse().sqrt()
            if SPH:
                hxy = sqrt(H.Determinant())
                H = SymTensor2d.one * hxy
            self.H.append(H)
        assert len(self.H) == n

        # Read in any extra fields the user requested.
        # Note we make the assumption here that any extra fields are in fact scalar fields.
        for fname in extraFields:
            vals = readField2String(materialName, fname, self.f)
            assert len(vals) == n
            self.__dict__[fname] = [float(x) for x in vals]

        # Initialize the base class.
        if initializeBase:
            fields = tuple([
                self.x, self.y, self.m, self.rho, self.vx, self.vy, self.eps,
                self.H
            ] + [self.__dict__[x] for x in extraFields])
            NodeGeneratorBase.__init__(self, self.serialfile, *fields)

        if mpi.rank == 0:
            self.f.close()

        # Apply the requested number of refinements.
        for i in xrange(refineNodes):
            refineNodes2d(self)

        return
示例#32
0
               epsPlot=epsPlot,
               PPlot=PPlot)

#-------------------------------------------------------------------------------
# Measure the difference between the simulation and analytic answer.
#-------------------------------------------------------------------------------
# Figure out which of the node we want to measure the error on.
rmin = 0.05
rmax = 0.35
rall = [x.magnitude() for x in nodes.positions().internalValues()]
imask = [
    i for i in xrange(nodes.numInternalNodes)
    if (rall[i] > rmin and rall[i] < rmax)
]
Nlocal = len(imask)
Nglobal = mpi.allreduce(Nlocal, mpi.SUM)

# Find the local profiles.
r = nodes.positions().internalValues()
vel = nodes.velocity().internalValues()
rho = nodes.massDensity().internalValues()
eps = nodes.specificThermalEnergy().internalValues()
P = nodes.pressure().internalValues()
xprof = [rall[i] for i in imask]
vprof = [vel[i].dot(r[i].unitVector()) for i in imask]
rhoprof = [rho[i] for i in imask]
epsprof = [eps[i] for i in imask]
Pprof = [P[i] for i in imask]
Aprof = [Pi / rhoi**gamma for (Pi, rhoi) in zip(Pprof, rhoprof)]

# Compute the analytic answer on the positions of the nodes.
示例#33
0
    def __init__(self,n,densityProfileMethod,
                 rmin = 0.0,
                 rmax = 0.0,
                 thetaMin = 0.0,
                 thetaMax = pi,
                 phiMin = 0.0,
                 phiMax = 2.0*pi,
                 nNodePerh = 2.01,
                 offset = None,
                 rejecter = None,
                 m0 = 0.0):

        assert n > 0
        assert rmin < rmax
        assert thetaMin < thetaMax
        assert thetaMin >= 0.0 and thetaMin <= 2.0*pi
        assert thetaMax >= 0.0 and thetaMax <= 2.0*pi
        assert phiMin < phiMax
        assert phiMin >= 0.0 and phiMin <= 2.0*pi
        assert phiMax >= 0.0 and phiMax <= 2.0*pi
        assert nNodePerh > 0.0
        assert offset is None or len(offset)==3
        
        self.rejecter = None
        if rejecter:
            self.rejecter = rejecter
        
        import random
        
        if offset is None:
            self.offset = Vector3d(0,0,0)
        else:
            self.offset = Vector3d(offset[0],offset[1],offset[2])
        
        self.n = n
        self.rmin = rmin
        self.rmax = rmax
        self.thetaMin = thetaMin
        self.thetaMax = thetaMax
        self.phiMin = phiMin
        self.phiMax = phiMax
        self.nNodePerh = nNodePerh        
        
        # If the user provided a constant for rho, then use the constantRho
        # class to provide this value.
        if type(densityProfileMethod) == type(1.0):
            self.densityProfileMethod = ConstantRho(densityProfileMethod)
        else:
            self.densityProfileMethod = densityProfileMethod
        
        # Determine how much total mass there is in the system.
        self.totalMass = self.integrateTotalMass(self.densityProfileMethod,
                                                     rmin, rmax,
                                                     thetaMin, thetaMax,
                                                     phiMin, phiMax)

        print "Total mass of %g in the range r = (%g, %g), theta = (%g, %g), phi = (%g, %g)" % \
            (self.totalMass, rmin, rmax, thetaMin, thetaMax, phiMin, phiMax)

        # Now set the nominal mass per node.
        if (m0 == 0.0):
            self.m0 = float(self.totalMass/n)
        else:
            self.m0 = float(m0)
            n = int(self.totalMass/self.m0)
        assert self.m0 > 0.0
        print "Nominal mass per node of %f for %d nodes." % (self.m0,n)
        
        from Spheral import SymTensor3d
        self.x = []
        self.y = []
        self.z = []
        self.m = []
        self.H = []

        # first shell is a tetrahedron
        self.positions = []
        nshell = 4
        rhoc = self.densityProfileMethod(0.0)
        print rhoc,self.m0
        mi   = self.m0
        drc  = pow(0.333*mi/rhoc,1.0/3.0)
        print drc
        hi   = nNodePerh*(drc)

        random.seed(nshell)
        dt = random.random()*pi
        dt2 = random.random()*pi
            
        rot = [[1.0,0.0,0.0],[0.0,cos(dt),-sin(dt)],[0.0,sin(dt),cos(dt)]]
        rot2 = [[cos(dt2),0.0,sin(dt2)],[0.0,1.0,0.0],[-sin(dt2),0.0,cos(dt2)]]

        t = sqrt(3.0)/3.0
        p1 = self.rotater([t,t,t],rot,rot2)
        p2 = self.rotater([t,-t,-t],rot,rot2)
        p3 = self.rotater([-t,-t,t],rot,rot2)
        p4 = self.rotater([-t,t,-t],rot,rot2)

        ps = [p1,p2,p3,p4]
        
        for pn in ps:
            self.positions.append(pn)
        mi = self.densityProfileMethod(drc*0.5)*(4.0/3.0*3.14159*drc**3)/4.0

        for pn in ps:
            x = pn[0]*0.5*drc
            y = pn[1]*0.5*drc
            z = pn[2]*0.5*drc

            if rejecter:
                if rejecter.accpet(x,y,z):
                    self.x.append(x)
                    self.y.append(y)
                    self.z.append(z)
                    self.m.append(mi)
                    self.H.append(SymTensor3d.one*(1.0/hi))
            else:
                self.x.append(x)
                self.y.append(y)
                self.z.append(z)
                self.m.append(mi)
                self.H.append(SymTensor3d.one*(1.0/hi))

        # now march up to the surface
        r = 0
        dr = drc
        while r <= rmax:
            r += dr
                
    
        # If requested, shift the nodes.
        if offset:
            for i in xrange(len(self.x)):
                self.x[i] += offset[0]
                self.y[i] += offset[1]
                self.z[i] += offset[2]
            
        print "Generated a total of %i nodes." % mpi.allreduce(len(self.x),mpi.SUM)
        NodeGeneratorBase.__init__(self, False,
                                   self.x, self.y, self.z, self.m, self.H)
        return
def spickle_node_list(nl,filename=None,silent=False):
    """Pack physical field variables from a node list in a dict and pickle.

    (Note: This is not a true pickler class.)

    spickle_node_list(nl,filename) extracts field variables from all nodes of nl,
    which must be a valid node list, and packs them in a dict that is returned
    to the caller. If the optional argument filename is a string then dict will
    also be pickled to a file of that name. The file will be overwritten if it
    exists.

    The s in spickle is for 'serial', a reminder that this method collects all
    nodes of the node list (from all ranks) in a single process. Thus this method
    is mainly useful for interactive work with small node lists. It is the user's
    responsibility to make sure her process has enough memory to hold the returned
    dict.

    See also: pflatten_node_list
    """

    # Make sure we are not wasting our time.
    assert isinstance(nl,(sph.Spheral.NodeSpace.FluidNodeList3d,
                          sph.Spheral.SolidMaterial.SolidNodeList3d)
                      ), "argument 1 must be a node list"
    assert isinstance(silent, bool), "true or false"
    
    # Start collecting data.
    if not silent:
        sys.stdout.write('Pickling ' +  nl.label() + ' ' + nl.name + '........')

    # Get values of field variables stored in internal nodes.
    xloc = nl.positions().internalValues()
    vloc = nl.velocity().internalValues()
    mloc = nl.mass().internalValues()
    rloc = nl.massDensity().internalValues()
    uloc = nl.specificThermalEnergy().internalValues()
    Hloc = nl.Hfield().internalValues()
    #(pressure and temperature are stored in the eos object.)
    eos = nl.equationOfState()
    ploc = sph.ScalarField('ploc',nl)
    Tloc = sph.ScalarField('loc',nl)
    rref = nl.massDensity()
    uref = nl.specificThermalEnergy()
    eos.setPressure(ploc,rref,uref)
    eos.setTemperature(Tloc,rref,uref)

    # Zip fields so that we have all fields for each node in the same tuple.
    #  We do this so we can concatenate everything in a single reduction operation,
    #  to ensure that all fields in one record in the final list belong to the 
    #  same node.
    localFields = zip(xloc, vloc, mloc, rloc, uloc, ploc, Tloc, Hloc)

    # Do a SUM reduction on all ranks.
    #  This works because the + operator for python lists is a concatenation!
    globalFields = mpi.allreduce(localFields, mpi.SUM)

    # Create a dictionary to hold field variables.
    nlFieldDict = dict(name=nl.name,
                       x=[],   # position vector
                       v=[],   # velocity vector
                       m=[],   # mass
                       rho=[], # mass density
                       p=[],   # pressure
                       h=[],   # smoothing ellipsoid axes
                       T=[],   # temperature
                       U=[],   # specific thermal energy
                      )

    # Loop over nodes to fill field values.
    nbGlobalNodes = mpi.allreduce(nl.numInternalNodes, mpi.SUM)
    for k in range(nbGlobalNodes):
        nlFieldDict[  'x'].append((globalFields[k][0].x, globalFields[k][0].y, globalFields[k][0].z))
        nlFieldDict[  'v'].append((globalFields[k][1].x, globalFields[k][1].y, globalFields[k][1].z))
        nlFieldDict[  'm'].append( globalFields[k][2])
        nlFieldDict['rho'].append( globalFields[k][3])
        nlFieldDict[  'U'].append( globalFields[k][4])
        nlFieldDict[  'p'].append( globalFields[k][5])
        nlFieldDict[  'T'].append( globalFields[k][6])
        nlFieldDict[  'h'].append((globalFields[k][7].Inverse().eigenValues().x,
                                   globalFields[k][7].Inverse().eigenValues().y,
                                   globalFields[k][7].Inverse().eigenValues().z,
                                   ))

    # Optionally, pickle the dict to a file.
    if mpi.rank == 0:
        if filename is not None:
            if isinstance(filename, str):
                with open(filename, 'wb') as fid:
                    pickle.dump(nlFieldDict, fid)
                    pass
                pass
            else:
                msg = "Dict NOT pickled to file because " + \
                      "argument 2 is {} instead of {}".format(type(filename), type('x'))
                sys.stderr.write(msg+'\n')
                pass
            pass
        pass
        
    # And Bob's our uncle.
    if not silent:
        print "Done."
    return nlFieldDict
    # at the requested angle.  We'll have it coming in from the positive x direction
    # in the xy plane.
    disp = Vector((rImpactor + rTarget) * cos(pi / 180.0 * angle_impact),
                  (rImpactor + rTarget) * sin(pi / 180.0 * angle_impact), 0.0)
    for i in xrange(impactorGenerator.localNumNodes()):
        impactorGenerator.x[i] += disp.x
        impactorGenerator.y[i] += disp.y
        impactorGenerator.z[i] += disp.z

    print "Starting node distribution..."
    distributeNodes3d((target, targetGenerator), (impactor, impactorGenerator))

    nGlobalNodes = 0
    for n in nodeSet:
        print "Generator info for %s" % n.name
        print "   Minimum number of nodes per domain : ", mpi.allreduce(
            n.numInternalNodes, mpi.MIN)
        print "   Maximum number of nodes per domain : ", mpi.allreduce(
            n.numInternalNodes, mpi.MAX)
        print "               Global number of nodes : ", mpi.allreduce(
            n.numInternalNodes, mpi.SUM)
        nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
    del n
    print "Total number of (internal) nodes in simulation: ", nGlobalNodes
    print "Ratio of impactor/target node mass : ", impactor.mass().max(
    ) / target.mass().max()

    # Intialize the impactor velocity.
    vel = impactor.velocity()
    for i in xrange(impactor.numInternalNodes):
        vel[i].x = -vImpact
示例#36
0
    def testPolyhedralMeshParallel(self):
        mesh, void = generatePolyhedralMesh([self.nodes],
                                            xmin=xmin,
                                            xmax=xmax,
                                            generateParallelConnectivity=True)

        neighborDomains = [int(x) for x in mesh.neighborDomains]
        sharedNodes = []
        for ll in mesh.sharedNodes:
            sharedNodes.append([int(x) for x in ll])
        assert len(neighborDomains) == len(mesh.sharedNodes)

        ##         # Check the correct domains are talking to each other.
        ##         nxproc = int(sqrt(numDomains))
        ##         assert nxproc*nxproc == numDomains
        ##         ixproc = rank % nxproc
        ##         iyproc = rank / nxproc
        ##         neighborDomainsAnswer = []
        ##         for iy in xrange(max(0, iyproc - 1), min(nxproc, iyproc + 2)):
        ##             for ix in xrange(max(0, ixproc - 1), min(nxproc, ixproc + 2)):
        ##                 if not (ix == ixproc and iy == iyproc):
        ##                     neighborDomainsAnswer.append(ix + iy*nxproc)
        ##         ok = mpi.allreduce((neighborDomains == neighborDomainsAnswer), mpi.MIN)
        ##         self.failUnless(ok, "Strange neighbor domains for %i : %s ?= %s" % (rank, neighborDomains, neighborDomainsAnswer))

        # Check that the communicated mesh nodes are consistent.
        boxInv = xmax - xmin
        boxInv = Vector(1.0 / boxInv.x, 1.0 / boxInv.y, 1.0 / boxInv.z)
        for sendProc in xrange(numDomains):
            numChecks = mpi.bcast(len(neighborDomains), root=sendProc)
            assert mpi.allreduce(numChecks,
                                 mpi.MIN) == mpi.allreduce(numChecks, mpi.MAX)
            for k in xrange(numChecks):
                if rank == sendProc:
                    ksafe = k
                else:
                    ksafe = 0
                recvProc = mpi.bcast(neighborDomains[ksafe], root=sendProc)
                recvHashes = mpi.bcast([
                    hashPosition(mesh.node(i).position(), xmin, xmax, boxInv)
                    for i in sharedNodes[ksafe]
                ],
                                       root=sendProc)
                recvPos = mpi.bcast(
                    [str(mesh.node(i).position()) for i in sharedNodes[ksafe]],
                    root=sendProc)
                ok = True
                msg = ""
                if rank == recvProc:
                    assert sendProc in neighborDomains
                    kk = neighborDomains.index(sendProc)
                    assert kk < len(sharedNodes)
                    ok = ([
                        hashPosition(
                            mesh.node(i).position(), xmin, xmax, boxInv)
                        for i in sharedNodes[kk]
                    ] == recvHashes)
                    msg = (
                        "Shared node indicies don't match %i %i\n   %s != %s\n    %s\n    %s"
                        % (rank, sendProc,
                           str([
                               hashPosition(
                                   mesh.node(i).position(), xmin, xmax, boxInv)
                               for i in sharedNodes[kk]
                           ]), recvHashes, [
                               str(mesh.node(i).position())
                               for i in sharedNodes[kk]
                           ], recvPos))
                self.failUnless(mpi.allreduce(ok, mpi.MIN), msg)

        # Check that all shared nodes have been found.
        myHashes = [
            hashPosition(mesh.node(i).position(), xmin, xmax, boxInv)
            for i in xrange(mesh.numNodes)
        ]
        myHashSet = set(myHashes)
        for sendProc in xrange(numDomains):
            theirHashSet = mpi.bcast(myHashSet, root=sendProc)
            if sendProc != mpi.rank:
                commonHashes = myHashSet.intersection(theirHashSet)
                self.failIf(
                    len(commonHashes) > 0
                    and (not sendProc in neighborDomains),
                    "Missed a neighbor domain : %i %i : %i" %
                    (mpi.rank, sendProc, len(commonHashes)))
                self.failIf(
                    len(commonHashes) == 0 and (sendProc in neighborDomains),
                    "Erroneously communicating between domains : %i %i" %
                    (mpi.rank, sendProc))
                if len(commonHashes) > 0:
                    k = neighborDomains.index(sendProc)
                    self.failUnless(
                        len(commonHashes) == len(sharedNodes[k]),
                        "Size of shared nodes does not match: %i %i : %i %i" %
                        (mpi.rank, sendProc, len(commonHashes),
                         len(sharedNodes[k])))
                    sharedHashes = set([myHashes[i] for i in sharedNodes[k]])
                    self.failUnless(sharedHashes == commonHashes,
                                    "Set of common hashes does not match")

        return
示例#37
0
    Wsum = 0.0
    for nodeID in xrange(nodes1.numInternalNodes):
        Hi = H[nodeID]
        etaij = (Hi * pos[nodeID]).magnitude()
        if smoothSpike:
            Wi = WT.kernelValue(etaij / smoothSpikeScale, 1.0)
        else:
            if etaij < smoothSpikeScale * kernelExtent:
                Wi = 1.0
            else:
                Wi = 0.0
        Ei = Wi * Espike
        epsi = Ei / mass[nodeID]
        eps[nodeID] = epsi
        Wsum += Wi
    Wsum = mpi.allreduce(Wsum, mpi.SUM)
    assert Wsum > 0.0
    for nodeID in xrange(nodes1.numInternalNodes):
        eps[nodeID] /= Wsum
        Esum += eps[nodeID] * mass[nodeID]
        eps[nodeID] += eps0
else:
    i = -1
    rmin = 1e50
    for nodeID in xrange(nodes1.numInternalNodes):
        rij = pos[nodeID].magnitude()
        if rij < rmin:
            i = nodeID
            rmin = rij
        eps[nodeID] = eps0
    rminglobal = mpi.allreduce(rmin, mpi.MIN)
示例#38
0
        if pos[i].x < z0 + dz:
            eps[i] += epsi
            Esum += mass[i] * epsi
elif problem == "cylindrical":
    epsi = Espike / (rho0 * pi * dr * dr)
    for i in xrange(nodes1.numInternalNodes):
        if pos[i].y < r0 + dr:
            eps[i] += epsi
            Esum += mass[i] * epsi
else:
    epsi = 0.5 * Espike / (rho0 * pi * dr * dr * dz)
    for i in xrange(nodes1.numInternalNodes):
        if pos[i].magnitude() < sqrt(dr * dr + dz * dz):
            eps[i] += epsi
            Esum += mass[i] * epsi
Eglobal = mpi.allreduce(Esum, mpi.SUM)
if problem == "planar":
    Eexpect = 0.5 * Espike * pi * (r1 * r1 - r0 * r0)
elif problem == "cylindrical":
    Eexpect = Espike * (z1 - z0)
else:
    Eexpect = 0.5 * Espike
print "Initialized a total energy of", Eglobal, Eexpect, Eglobal / Eexpect
assert fuzzyEqual(Eglobal, Eexpect)

#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
示例#39
0
                      (rTarget + rImpactor) * sin(pi / 180.0 * angleImpact),
                      0.0)
    for k in range(impactorGenerator.localNumNodes()):
        impactorGenerator.x[k] += displace.x
        impactorGenerator.y[k] += displace.y
        impactorGenerator.z[k] += displace.z
        pass

    # Fill node lists using generators and distribute to ranks.
    print "Starting node distribution..."
    distributeNodes3d((target, targetGenerator), (impactor, impactorGenerator))
    nGlobalNodes = 0
    for n in nodeSet:
        print "Generator info for %s" % n.name
        print "   Minimum number of nodes per domain : ", \
              mpi.allreduce(n.numInternalNodes, mpi.MIN)
        print "   Maximum number of nodes per domain : ", \
              mpi.allreduce(n.numInternalNodes, mpi.MAX)
        print "               Global number of nodes : ", \
              mpi.allreduce(n.numInternalNodes, mpi.SUM)
        nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
    del n
    print "Total number of (internal) nodes in simulation: ", nGlobalNodes
    WMR = (max(impactor.mass().max(),
               target.mass().max()) / min(impactor.mass().min(),
                                          target.mass().min()))
    if WMR < 1.5:
        sys.stderr.write("\033[32m")
    else:
        sys.stderr.write("\033[31m")
    print "Worst node mass ratio: {}".format(WMR)
                   0.0)
 for k in range(impactorGenerator.localNumNodes()):
     impactorGenerator.x[k] += displace.x
     impactorGenerator.y[k] += displace.y
     impactorGenerator.z[k] += displace.z
     pass
                                                    
 # Fill node lists using generators and distribute to ranks.
 print "Starting node distribution..."
 distributeNodes3d((target, targetGenerator),
                   (impactor, impactorGenerator))
 nGlobalNodes = 0
 for n in nodeSet:
     print "Generator info for %s" % n.name
     print "   Minimum number of nodes per domain : ", \
           mpi.allreduce(n.numInternalNodes, mpi.MIN)
     print "   Maximum number of nodes per domain : ", \
           mpi.allreduce(n.numInternalNodes, mpi.MAX)
     print "               Global number of nodes : ", \
           mpi.allreduce(n.numInternalNodes, mpi.SUM)
     nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
 del n
 print "Total number of (internal) nodes in simulation: ", nGlobalNodes
 WMR = (max(impactor.mass().max(), target.mass().max())/
        min(impactor.mass().min(), target.mass().min()))
 if WMR < 1.5:
     sys.stderr.write("\033[32m")
 else:
     sys.stderr.write("\033[31m")
 print "Worst node mass ratio: {}".format(WMR)
 sys.stderr.write("\033[0m")
示例#41
0
    def __init__(self,
                 ndim,
                 n,
                 rho,
                 boundary,
                 gradrho,
                 holes,
                 centroidFrac,
                 maxIterations,
                 fracTol,
                 tessellationBaseDir,
                 tessellationFileName,
                 nNodePerh,
                 randomseed,
                 maxNodesPerDomain,
                 seedPositions,
                 enforceConstantMassPoints,
                 cacheFileName):

        assert ndim in (2,3)
        assert n > 0

        # Load our handy aliases.
        if ndim == 2:
            import Spheral2d as sph
        else:
            import Spheral3d as sph

        # Did we get passed a function or a constant for the density?
        if type(rho) in (float, int):
            def rhofunc(posi):
                return rho
            rhomax = rho
        else:
            rhofunc = rho
            rhomax = None
        self.rhofunc = rhofunc

        # Some useful geometry.
        box = boundary.xmax - boundary.xmin
        length = box.maxElement()
        boundvol = boundary.volume
        for hole in holes:
            boundvol -= hole.volume
        if boundvol <= 0.0:
            # The holes were not entirely contained in the bounding volume, so we punt.
            boundvol = 0.5*boundary.volume
        boxvol = 1.0
        for idim in xrange(ndim):
            boxvol *= box[idim]
        fracOccupied = min(1.0, boxvol/boundvol)
        assert fracOccupied > 0.0 and fracOccupied <= 1.0

        # If there is an pre-existing cache file, load it instead of doing all the work.
        if not self.restoreState(cacheFileName):

            # Create a temporary NodeList we'll use store and update positions.
            eos = sph.GammaLawGasMKS(2.0, 2.0)
            WT = sph.TableKernel(sph.NBSplineKernel(7), 1000)
            hmax = 2.0*(boundvol/pi*n)**(1.0/ndim)
            nodes = sph.makeFluidNodeList("tmp generator nodes", 
                                          eos,
                                          hmin = 1e-10,
                                          hmax = hmax,
                                          kernelExtent = WT.kernelExtent,
                                          hminratio = 1.0,
                                          nPerh = nNodePerh,
                                          topGridCellSize = 2.0*WT.kernelExtent*hmax)
        
            # Make a first pass looking for the maximum density (roughly).
            pos = nodes.positions()
            mass = nodes.mass()
            rhof = nodes.massDensity()
            H = nodes.Hfield()
            imin, imax = self.globalIDRange(n)
            nlocal = imax - imin
            nodes.numInternalNodes = nlocal
        
            # If the user provided the starting or seed positions, use 'em.
            if seedPositions is not None:
                hi = min(hmax, 2.0 * (boundvol/(pi*n))**(1.0/ndim))
                assert hi > 0.0
                nlocal = len(seedPositions)
                assert mpi.allreduce(nlocal, mpi.SUM) == n
                nodes.numInternalNodes = nlocal
                for i in xrange(nlocal):
                    pos[i] = seedPositions[i]
                    rhoi = rhofunc(pos[i])
                    rhof[i] = rhoi
                    mass[i] = rhoi * boundvol/n  # Not actually correct, but mass will be updated in centroidalRelaxNodes
                    H[i] = sph.SymTensor.one / hi
        
            else:
                # If necessary probe for a maximum density statistically.
                rangen = random.Random(randomseed + mpi.rank)
                if not rhomax:
                    rhomax = 0.0
                    nglobal = 0
                    while nglobal < n:
                        p = boundary.xmin + length*sph.Vector(rangen.random(), rangen.random(), rangen.random())
                        use = boundary.contains(p, False)
                        if use:
                            ihole = 0
                            while use and ihole < len(holes):
                                use = not holes[ihole].contains(p, True)
                                ihole += 1
                        if use:
                            rhomax = max(rhomax, rhofunc(p))
                            i = 1
                        else:
                            i = 0
                        nglobal += mpi.allreduce(i, mpi.SUM)
                    rhomax = mpi.allreduce(rhomax, mpi.MAX)
                print "MedialGenerator: selected a maximum density of ", rhomax
            
                # It's a bit tricky to properly use the Sobol sequence in parallel.  We handle this by searching for the lowest
                # seeds that give us the desired number of points.
                seeds = []
                seed = 0
                while mpi.allreduce(len(seeds), mpi.SUM) < n:
                    localseed = seed + mpi.rank
                    [coords, newseed] = i4_sobol(ndim, localseed)
                    p = boundary.xmin + length*sph.Vector(*tuple(coords))
                    use = boundary.contains(p, False)
                    if use:
                        ihole = 0
                        while use and ihole < len(holes):
                            use = not holes[ihole].contains(p, True)
                            ihole += 1
                    if use:
                        rhoi = rhofunc(p)
                        if rangen.random() < rhoi/rhomax:
                            seeds.append(localseed)
                    seed += mpi.procs
            
                # Drop the highest value seeds to ensure we have the correct number of total points.
                nglobal = mpi.allreduce(len(seeds), mpi.SUM)
                assert n + mpi.procs >= nglobal
                seeds.sort()
                seeds = [-1] + seeds
                while mpi.allreduce(len(seeds), mpi.SUM) > n + mpi.procs:
                    maxseed = mpi.allreduce(seeds[-1], mpi.MAX)
                    assert maxseed > -1
                    if seeds[-1] == maxseed:
                        seeds = seeds[:-1]
                seeds = seeds[1:]
            
                # Load balance the number of seeds per domain.
                if len(seeds) > nlocal:
                    extraseeds = seeds[nlocal:]
                else:
                    extraseeds = []
                extraseeds = mpi.allreduce(extraseeds, mpi.SUM)
                seeds = seeds[:nlocal]
                for iproc in xrange(mpi.procs):
                    ngrab = max(0, nlocal - len(seeds))
                    ntaken = mpi.bcast(ngrab, root=iproc)
                    if mpi.rank == iproc:
                        seeds += extraseeds[:ngrab]
                    extraseeds = extraseeds[ntaken:]
                assert len(extraseeds) == 0
                assert len(seeds) == nlocal
                assert mpi.allreduce(len(seeds), mpi.SUM) == n
            
                # Initialize the desired number of generators in the boundary using the Sobol sequence.
                hi = min(hmax, 2.0 * (boundvol/(pi*n))**(1.0/ndim))
                assert hi > 0.0
                for i, seed in enumerate(seeds):
                    [coords, newseed] = i4_sobol(ndim, seed)
                    p = boundary.xmin + length*sph.Vector(*tuple(coords))
                    rhoi = rhofunc(p)
                    pos[i] = p
                    rhof[i] = rhoi
                    mass[i] = rhoi * boundvol/n  # Not actually correct, but mass will be updated in centroidalRelaxNodes
                    H[i] = sph.SymTensor.one / hi
        
                # Each domain has independently generated the correct number of points, but they are randomly distributed.
                # Before going further it's useful to try and spatially collect the points by domain.
                # We'll use the Spheral Peano-Hilbert space filling curve implementation to do this.
                if mpi.procs > 1:
                    db = sph.DataBase()
                    db.appendNodeList(nodes)
                    maxNodes = max(maxNodesPerDomain, 2*n/mpi.procs)
                    redistributor = sph.PeanoHilbertOrderRedistributeNodes(2.0)
                    redistributor.redistributeNodes(db)
        
            # If we're in parallel we need the parallel boundary.
            if mpi.procs > 1:
                boundaries = [sph.TreeDistributedBoundary.instance()]
            else:
                boundaries = []
        
            # Iterate the points toward centroidal relaxation.
            vol, surfacePoint = centroidalRelaxNodes([(nodes, boundary, holes)],
                                                     W = WT,
                                                     rho = rhofunc,
                                                     gradrho = gradrho,
                                                     boundaries = boundaries,
                                                     fracTol = fracTol,
                                                     centroidFrac = centroidFrac,
                                                     maxIterations = maxIterations,
                                                     tessellationBaseDir = tessellationBaseDir,
                                                     tessellationFileName = tessellationFileName)
        
            # Store the values the descendent generators will need.
            self.vol, self.surface, self.pos, self.m, self.H = [], [], [], [], []
            for i in xrange(nodes.numInternalNodes):
                self.vol.append(vol(0,i))
                self.surface.append(surfacePoint(0,i))
                self.pos.append(sph.Vector(pos[i]))
                self.m.append(vol(0,i) * rhofunc(pos[i]))
                self.H.append(sph.SymTensor(H[i]))
            assert mpi.allreduce(len(self.vol), mpi.SUM) == n
            assert mpi.allreduce(len(self.surface), mpi.SUM) == n
            assert mpi.allreduce(len(self.pos), mpi.SUM) == n
            assert mpi.allreduce(len(self.m), mpi.SUM) == n
            assert mpi.allreduce(len(self.H), mpi.SUM) == n
        
            # If requested, enforce constant mass points.
            if enforceConstantMassPoints:
                msum = mpi.allreduce(sum([0.0] + self.m), mpi.SUM)
                self.m = [msum/n]*len(self.pos)

            # If requested, we can store the state of the generator such that it can be
            # later restored without going through all that work.
            if cacheFileName:
                self.dumpState(cacheFileName)

        return
    # We disturb the lattice symmetry to avoid artificial singularities.
    for k in range(planetGenerator.localNumNodes()):
        planetGenerator.x[k] *= 1.0 + random.uniform(-0.02, 0.02)
        planetGenerator.y[k] *= 1.0 + random.uniform(-0.02, 0.02)
        planetGenerator.z[k] *= 1.0 + random.uniform(-0.02, 0.02)
        pass


    # Fill node list using generators and distribute to ranks.
    print "Starting node distribution..."
    distributeNodes3d((planet, planetGenerator))
    nGlobalNodes = 0
    for n in nodeSet:
        print "Generator info for %s" % n.name
        print "   Minimum number of nodes per domain : ", \
              mpi.allreduce(n.numInternalNodes, mpi.MIN)
        print "   Maximum number of nodes per domain : ", \
              mpi.allreduce(n.numInternalNodes, mpi.MAX)
        print "               Global number of nodes : ", \
              mpi.allreduce(n.numInternalNodes, mpi.SUM)
        nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
    del n
    print "Total number of (internal) nodes in simulation: ", nGlobalNodes
    
    pass
# The spheral controller needs a DataBase object to hold the node lists.
db = DataBase()
for n in nodeSet:
    db.appendNodeList(n)
del n