예제 #1
0
def mixingScale(cycle, t, dt):
    si = []
    ci = []
    di = []
    ke = []
    for nodeL in nodeSet:
     xprof = mpi.reduce([x.x for x in nodeL.positions().internalValues()], mpi.SUM)
     yprof = mpi.reduce([x.y for x in nodeL.positions().internalValues()], mpi.SUM)
     vely = mpi.reduce([v.y for v in nodeL.velocity().internalValues()], mpi.SUM)
     hprof = mpi.reduce([1.0/sqrt(H.Determinant()) for H in nodeL.Hfield().internalValues()], mpi.SUM)
     rhoprof = mpi.reduce(nodeL.massDensity().internalValues(), mpi.SUM)
     if mpi.rank == 0:
      for j in xrange (len(xprof)):
        ke.append(0.5*rhoprof[j]*vely[j]*vely[j])
        if yprof[j] < 0.5:
          si.append(vely[j]*hprof[j]*hprof[j]*sin(4*pi*xprof[j])*exp(-4.0*pi*abs(yprof[j]-0.25)))
          ci.append(vely[j]*hprof[j]*hprof[j]*cos(4*pi*xprof[j])*exp(-4.0*pi*abs(yprof[j]-0.25)))
          di.append(hprof[j]*hprof[j]*exp(-4.0*pi*abs(yprof[j]-0.25)))
        else:
          si.append(vely[j]*hprof[j]*hprof[j]*sin(4*pi*xprof[j])*exp(-4.0*pi*abs((1.0-yprof[j])-0.25)))
          ci.append(vely[j]*hprof[j]*hprof[j]*cos(4*pi*xprof[j])*exp(-4.0*pi*abs((1.0-yprof[j])-0.25)))
          di.append(hprof[j]*hprof[j]*exp(-4.0*pi*abs((1.0-yprof[j])-0.25)))
    if mpi.rank == 0:
      S=sum(si)
      C=sum(ci)
      D=sum(di)
      M=sqrt((S/D)*(S/D)+(C/D)*(C/D))*2.0
      KE = max(ke)
      print "At time t = %s, Mixing Amp M = %s \n" % (t,M)
      with open(os.path.join(dataDir, mixFile), "a") as myfile:
        myfile.write("%s\t %s\t %s\n" % (t, M, KE))
예제 #2
0
파일: net.py 프로젝트: smackesey/sparco
 def compute_phi_gradient(self):
   nbuf, rbuf = self.nbuf, self.rbuf
   npats = nbuf.npats[0]
   for i in range(npats):
     res = sp.util.obj(nbuf.x[i], nbuf.a[i], nbuf.phi, lam=self.job.lam)
     for k, r in zip(['xhat', 'dx', 'E', 'dphi', 'l1_penalty'], res):
       getattr(nbuf, k)[i] = r
   nbuf.sum.dphi = np.sum(nbuf.dphi[:npats], axis=0)
   mpi.reduce(nbuf.sum.dphi, rbuf.dphi, op=mpi.SUM)
   if mpi.rank == mpi.root:
     rbuf.dphi = rbuf.dphi / np.sum(rbuf.npats)
예제 #3
0
    def __call__(self, cycle, time, dt):
        procs = mpi.procs
        rank = mpi.rank
        serialData = []
        i, j = 0, 0
        for i in xrange(procs):
            for nodeL in self.nodeSet:
                if rank == i:
                    for j in xrange(nodeL.numInternalNodes):
                        serialData.append([
                            nodeL.positions()[j],
                            3.0 / (nodeL.Hfield()[j].Trace()),
                            nodeL.mass()[j],
                            nodeL.massDensity()[j],
                            nodeL.specificThermalEnergy()[j]
                        ])

        serialData = mpi.reduce(serialData, mpi.SUM)
        if rank == 0:
            f = open(self.directory + "/serialDump" + str(cycle) + ".ascii",
                     'w')
            for i in xrange(len(serialData)):
                f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(
                    i, serialData[i][0][0], serialData[i][0][1], 0.0,
                    serialData[i][1], serialData[i][2], serialData[i][3],
                    serialData[i][4]))
            f.close()
예제 #4
0
파일: Pi.py 프로젝트: neffmallon/pistol
def mpipi(npoints=100,function=pi_simple):
    from random import random,seed
    import mpi
    seed(random()*mpi.rank) # BAD way of doing parallel RNG
    seq = function(npts)
    pi = seq[-1]
    pi_sum = mpi.reduce(pi,mpi.SUM)
    if mpi.rank == 0: print pi_sum/float(mpi.size)
예제 #5
0
    def parallelRunTest(self):
        #target = int(mpi.procs / 3)
        target = 0
        myNum1 = 2
        myNum2 = (mpi.rank % 2) + 1
        myNum3 = mpi.rank * 2
        if mpi.rank == 0:
            obj1 = [1, "foo"]
            obj2 = "String"
        if mpi.rank != 0:
            obj1 = (mpi.rank % 2) + 1
            obj2 = 1

        answer1 = mpi.reduce(myNum1, mpi.PROD, target)
        answer2 = mpi.reduce(myNum2, mpi.PROD, target)
        answer3 = mpi.reduce(myNum3, mpi.PROD, target)
        answer4 = mpi.reduce(obj1, mpi.PROD, target)
        answer5 = mpi.reduce(obj2, mpi.PROD, target)

        #Generate correct answers
        correctAnswer1 = 1
        correctAnswer2 = 1
        correctAnswer4 = [1, "foo"]
        for x in range(mpi.procs):
            correctAnswer1 *= 2
            correctAnswer2 *= (x % 2) + 1
            if x > 0:
                correctAnswer4 *= (x % 2) + 1

        if mpi.rank == target:
            if answer1 != correctAnswer1:
                self.fail("reduce PROD failed on #'s 0")
            if answer2 != correctAnswer2:
                failString = "reduce PROD failed on #'s test 1\n"
                failString += "Reduce gave result " + str(answer2)
                failString += " and correct answer is " + str(correctAnswer2)
                self.fail(failString)
            if answer3 != 0:
                self.fail("reduce PROD failed on #'s 2")
            if answer4 != correctAnswer4:
                self.fail("reduce PROD failed on list and #'s")
            if answer5 != "String":
                self.fail("reduce PROD failed on string and #'s")

        return
예제 #6
0
    def parallelRunTest(self):
        target = int(mpi.procs / 2)
        #target = 0
        myStr = "la" + str(mpi.rank)
        myNum = mpi.rank
        myList = ["la", myNum]
        myTuple = ("FoO", "GOO")
        myLongStr = ""
        for i in range(512):
            myLongStr += str(i % 10)

        results = [0, 0, 0, 0, 0]
        results[0] = mpi.reduce(myStr, mpi.SUM, target)
        results[1] = mpi.reduce(myNum, mpi.SUM, target)
        results[2] = mpi.reduce(myList, mpi.SUM, target)
        results[3] = mpi.reduce(myTuple, mpi.SUM, target)
        results[4] = mpi.reduce(myLongStr, mpi.SUM, target)

        #Set up correct answer for string reduce
        correctAnswers = [0, 0, 0, 0, ""]

        #Set up correct answer for list reduce
        correctAnswers[0] = ""
        correctAnswers[1] = (mpi.procs * (mpi.procs - 1)) / 2
        correctAnswers[2] = []
        correctAnswers[3] = ("FoO", "GOO") * mpi.procs
        for x in range(mpi.procs):
            correctAnswers[0] += "la" + str(x)
            correctAnswers[2] += ["la", x]

        for x in range(mpi.procs):
            correctAnswers[4] += myLongStr

        for x in range(5):
            if mpi.rank == target:
                if correctAnswers[x] != results[x]:
                    self.fail(
                        "Reduce SUM failed on test %s (\n%r is not \n%r)" %
                        (x, results[x], correctAnswers[x]))
            else:
                if results[x] != None:
                    failStr = "Reduce SUM failed on off-target proc on test "
                    failStr += str(i)
                    self.fail(failStr)
        return
예제 #7
0
파일: PyMPITest.py 프로젝트: LLNL/pynamic
    def parallelRunTest(self):
        #target = int(mpi.procs / 3)
        target = 0
        myNum1 = 2
        myNum2 = (mpi.rank % 2)+ 1
        myNum3 = mpi.rank * 2
        if mpi.rank == 0:
            obj1 = [1, "foo"]
            obj2 = "String"
        if mpi.rank != 0:
            obj1 = (mpi.rank % 2) + 1;
            obj2 = 1;

        answer1 = mpi.reduce( myNum1, mpi.PROD, target )
        answer2 = mpi.reduce( myNum2, mpi.PROD, target )
        answer3 = mpi.reduce( myNum3, mpi.PROD, target )
        answer4 = mpi.reduce( obj1,   mpi.PROD, target )
        answer5 = mpi.reduce( obj2,   mpi.PROD, target )

        #Generate correct answers
        correctAnswer1 = 1
        correctAnswer2 = 1
        correctAnswer4 = [1,"foo"]
        for x in range(mpi.procs):
            correctAnswer1 *= 2
            correctAnswer2 *= (x % 2) + 1
            if x > 0:
                correctAnswer4 *= (x% 2) + 1

        if mpi.rank == target:
            if answer1 != correctAnswer1:
                self.fail("reduce PROD failed on #'s 0")
            if answer2 != correctAnswer2:
                failString = "reduce PROD failed on #'s test 1\n"
                failString += "Reduce gave result " + str(answer2)
                failString += " and correct answer is " + str(correctAnswer2)
                self.fail(failString)
            if answer3 != 0:
                self.fail("reduce PROD failed on #'s 2")
            if answer4 != correctAnswer4:
                self.fail("reduce PROD failed on list and #'s")
            if answer5 != "String":
                self.fail("reduce PROD failed on string and #'s")

        return
예제 #8
0
파일: PyMPITest.py 프로젝트: LLNL/pynamic
    def parallelRunTest(self):
        target = int(mpi.procs / 2)
        #target = 0
        myStr = "la" + str(mpi.rank)
        myNum = mpi.rank
        myList = [ "la", myNum ]
        myTuple = ("FoO", "GOO")
        myLongStr = ""
        for i in range(512):
          myLongStr += str(i%10)

        results = [0,0,0,0,0]
        results[0] = mpi.reduce(myStr,mpi.SUM, target)
        results[1] = mpi.reduce(myNum,mpi.SUM, target)
        results[2] = mpi.reduce(myList,mpi.SUM, target)
        results[3]= mpi.reduce(myTuple,mpi.SUM, target)
        results[4]= mpi.reduce( myLongStr, mpi.SUM, target)

        #Set up correct answer for string reduce
        correctAnswers = [0,0,0,0,""]

        #Set up correct answer for list reduce
        correctAnswers[0] = ""
        correctAnswers[1] = (mpi.procs*(mpi.procs-1))/2
        correctAnswers[2] = []
        correctAnswers[3] = ("FoO", "GOO")*mpi.procs
        for x in range(mpi.procs):
            correctAnswers[0] += "la" + str(x)
            correctAnswers[2] += ["la", x]

        for x in range(mpi.procs):
            correctAnswers[4] += myLongStr

        for x in range(5):
            if mpi.rank == target:
                if correctAnswers[x] != results[x]:
                    self.fail( "Reduce SUM failed on test %s (\n%r is not \n%r)"%(x,results[x],correctAnswers[x]) )
            else:
                if results[x] != None:
                    failStr = "Reduce SUM failed on off-target proc on test "
                    failStr += str(i)
                    self.fail(failStr)
        return
예제 #9
0
 def reduceObjImpl(self, x, op, ans):
     for root in xrange(mpi.procs):
         globalx = mpi.reduce(x, op=op, root=root)
         ok = True
         if mpi.rank == root:
             ok = (globalx == ans)
         else:
             ok = (globalx is None)
         if not ok:
             print globalx, ans
         self.check(ok)
예제 #10
0
def integrate(rectangles, function):
    # equivalent to mpi.WORLD.bcast(n,0) or rather a
    # C call to MPI_Bcast(PYTHON_COMM_WORLD,n,0,&status)
    n = mpi.bcast(rectangles)

    h = 1.0/n
    sum = 0.0
    for i in range(mpi.rank+1,n+1,mpi.procs):
        x = h * (i-0.5)
        sum = sum + function(x)

    myAnswer = h * sum
    answer = mpi.reduce(myAnswer,mpi.SUM)
    return answer
예제 #11
0
def reduce_test(comm, generator, kind, op, op_kind, root):
    if comm.rank == root:
        print("Reducing to %s of %s at root %d..." % (op_kind, kind, root)),
    my_value = generator(comm.rank)
    result = mpi.reduce(comm, my_value, op, root)
    if comm.rank == root:
        expected_result = generator(0)
        for p in range(1, comm.size):
            expected_result = op(expected_result, generator(p))
        assert result == expected_result
        print("OK.")
    else:
        assert result == None
    return
예제 #12
0
def reduce_test(comm, generator, kind, op, op_kind, root):
    if comm.rank == root:
        print ("Reducing to %s of %s at root %d..." % (op_kind, kind, root)),
    my_value = generator(comm.rank)
    result = mpi.reduce(comm, my_value, op, root)
    if comm.rank == root:
        expected_result = generator(0);
        for p in range(1, comm.size):
            expected_result = op(expected_result, generator(p))
        assert result == expected_result
        print "OK."
    else:
        assert result == None
    return
예제 #13
0
파일: PyMPITest.py 프로젝트: LLNL/pynamic
    def parallelRunTest(self):
        v = 1+(mpi.rank)%2

        results = []
        for kind in ['MAX','MIN','SUM','PROD','LAND',
                     'LOR','LXOR',
                     'MINLOC','MAXLOC' ]:
            function = getattr(mpi,kind)
            try:
                r0 = mpi.allreduce(v,function)
            except RuntimeError,s:
                self.fail("All reduce") 

            try:
                r1 = mpi.reduce(v,function)
            except RuntimeError,s:
                self.fail("All reduce") 
예제 #14
0
    def parallelRunTest(self):
        v = 1 + (mpi.rank) % 2

        results = []
        for kind in [
                'MAX', 'MIN', 'SUM', 'PROD', 'LAND', 'LOR', 'LXOR', 'MINLOC',
                'MAXLOC'
        ]:
            function = getattr(mpi, kind)
            try:
                r0 = mpi.allreduce(v, function)
            except RuntimeError, s:
                self.fail("All reduce")

            try:
                r1 = mpi.reduce(v, function)
            except RuntimeError, s:
                self.fail("All reduce")
예제 #15
0
    def parallelRunTest(self):
        #decide on targets
        #targets = [ int(mpi.procs / 3), 0, mpi.procs - 1]

        targets = [0, 0, 0]
        #values to be reduced
        num1 = mpi.rank
        num2 = 0
        if mpi.rank == 1:
            num2 = 1
        num3 = 1
        num4 = (2**(mpi.rank % 8))
        num5 = 8
        if mpi.rank == 1:
            num5 = 9
        list1 = [mpi.rank + 1]
        list2 = [mpi.rank]

        #do reduces
        results = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        results[0] = mpi.reduce(num1, mpi.LOR, targets[0])
        results[1] = mpi.reduce(num1, mpi.LAND, targets[1])
        results[2] = mpi.reduce(num2, mpi.LOR, targets[2])
        results[3] = mpi.reduce(num3, mpi.LAND, targets[0])
        results[4] = mpi.reduce(num3, mpi.LXOR, targets[1])
        results[5] = mpi.reduce(list1, mpi.LOR, targets[2])
        results[6] = mpi.reduce(list1, mpi.LAND, targets[0])
        results[7] = mpi.reduce(list2, mpi.LAND, targets[1])
        results[8] = mpi.reduce(num1, mpi.MIN, targets[2])
        results[9] = mpi.reduce(num2, mpi.MAX, targets[0])
        results[10] = mpi.reduce(list1, mpi.MIN, targets[1])
        results[11] = mpi.reduce(num1, mpi.MINLOC, targets[2])
        results[12] = mpi.reduce(list1, mpi.MAXLOC, targets[0])
        results[13] = mpi.reduce(num4, mpi.BAND, targets[1])
        results[14] = mpi.reduce(num5, mpi.BXOR, targets[2])

        #correct answers
        correctAnswers = [
            1, 0, 1, 1, mpi.procs % 2, 1, 1, 1, 0, 1, [1], (0, 0),
            (mpi.procs - 1, [mpi.procs]), 0, (mpi.procs % 2) * 8 + 1
        ]

        for x in range(15):
            if mpi.rank == targets[x % 3] and results[x] != correctAnswers[x]:
                failString = "reduce logical failed on test " + str(x)
                failString += "\nReduce gave result " + str(results[x])
                failString += " while correct answer is "
                failString += str(correctAnswers[x]) + "\n"
                self.fail(failString)

            elif mpi.rank != targets[x % 3] and results[x] != None:
                errstr = "reduce LOGICAL failed on off-target"
                errstr += "process on test " + str(x)
                self.fail(errstr)
        return
예제 #16
0
   if time + dt >= goalTime:
      dt = goalTime-time
      teton.dtrmn = dt

   teton.dtrad = dt
   
   cycleTime = advance(mesh, teton, partList)
   totWorkTime= totWorkTime + cycleTime
   totIters = totIters + teton.ninrt
   time = time + dt
   cycle = cycle + 1


mpi.barrier()
totMeshZones = mpi.reduce(myNumZones,mpi.SUM)

checkAnalyticAnswer(goalTime, mesh, partList)

if mpi.rank == 0:
   numUnknowns = teton.ngr * teton.nangsn * totMeshZones * 8
   dumpLineout(mesh, teton,"final.out")
   totWorkTime/=1.0e6
   print "proc 0 num zones =",myNumZones,"  Total mesh zones = ",totMeshZones
   print "SuOlson test version ",versionNumber," completed at time=",time,"   goalTime=",goalTime
   print "Cumulative Iteration Count: ",totIters
   print "Cumulative Cycle Advance Time: ",totWorkTime," s."
   print "Cumulative Angle Loop Time:    ",teton.angleLoopTime, " s."
   print "numUnknowns = ",numUnknowns
   grindTime = totWorkTime /( numUnknowns * totIters * mpi.size)
   print " grind time = ",grindTime
예제 #17
0
else:
    control.advance(goalTime, maxSteps)
    control.updateViz(control.totalSteps, integrator.currentTime, 0.0)
    control.dropRestartFile()

if serialDump:
    procs = mpi.procs
    rank = mpi.rank
    serialData = []
    i, j = 0, 0
    for i in xrange(procs):
        for nodeL in nodeSet:
            if rank == i:
                for j in xrange(nodeL.numInternalNodes):
                    serialData.append([
                        nodeL.positions()[j],
                        3.0 / (nodeL.Hfield()[j].Trace()),
                        nodeL.mass()[j],
                        nodeL.massDensity()[j],
                        nodeL.specificThermalEnergy()[j]
                    ])
    serialData = mpi.reduce(serialData, mpi.SUM)
    if rank == 0:
        f = open(dataDir + "/serialDump.ascii", 'w')
        for i in xrange(len(serialData)):
            f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(
                i, serialData[i][0][0], serialData[i][0][1], 0.0,
                serialData[i][1], serialData[i][2], serialData[i][3],
                serialData[i][4]))
        f.close()
예제 #18
0
            error = [data[i] - ans[i] for i in xrange(len(data))]
            Pn = Pnorm.Pnorm(error, r)
            L1 = Pn.gridpnorm(1, rmin, rmax)
            L2 = Pn.gridpnorm(2, rmin, rmax)
            Linf = Pn.gridpnorm("inf", rmin, rmax)
            print "\t%s \t\t%g \t\t%g \t\t%g" % (name, L1, L2, Linf)

#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
if outputFile != "None":
    outputFile = os.path.join(dataDir, outputFile)
    from SpheralGnuPlotUtilities import multiSort
    P = ScalarField("pressure", nodes1)
    nodes1.pressure(P)
    xprof = mpi.reduce([x.x for x in nodes1.positions().internalValues()], mpi.SUM)
    yprof = mpi.reduce([x.y for x in nodes1.positions().internalValues()], mpi.SUM)
    rhoprof = mpi.reduce(nodes1.massDensity().internalValues(), mpi.SUM)
    Pprof = mpi.reduce(P.internalValues(), mpi.SUM)
    #vprof = mpi.reduce(list([vi.dot(ri.unitVector()) for ri,vi in zip(nodes1.positions().internalValues(),nodes1.velocity().internalValues())]),mpi.SUM)
    rprof = mpi.reduce([ri.magnitude() for ri in nodes1.positions().internalValues()],mpi.SUM)
    vx = mpi.reduce(list([v.x for v in nodes1.velocity().internalValues()]),mpi.SUM)
    vy = mpi.reduce([v.y for v in nodes1.velocity().internalValues()],mpi.SUM)
    np = int(nodes1.numInternalNodes)
    if np is None:
        np = 0
    #print "np=%d" % np
    np = mpi.reduce(np,mpi.SUM)
    #print "np=%d" % np
    vprof = []
    if mpi.rank == 0:
예제 #19
0
            raise ValueError, "The restarted state does not match!"
        else:
            print "Restart check PASSED."

else:
    if control.time() < goalTime:
        control.step(5)
        control.advance(goalTime, maxSteps)

#-------------------------------------------------------------------------------
# Compute the analytic answer.
#-------------------------------------------------------------------------------
import mpi
import NohAnalyticSolution
rlocal = [pos.x for pos in nodes1.positions().internalValues()]
r = mpi.reduce(rlocal, mpi.SUM)
h1 = 1.0/(nPerh*dx)
answer = NohAnalyticSolution.NohSolution(1,
                                         r = r,
                                         v0 = -1.0,
                                         h0 = 1.0/h1)

# Compute the simulated specific entropy.
rho = mpi.allreduce(nodes1.massDensity().internalValues(), mpi.SUM)
Pf = ScalarField("pressure", nodes1)
nodes1.pressure(Pf)
P = mpi.allreduce(Pf.internalValues(), mpi.SUM)
A = [Pi/rhoi**gamma for (Pi, rhoi) in zip(P, rho)]

# The analytic solution for the simulated entropy.
xprof = mpi.allreduce([x.x for x in nodes1.positions().internalValues()], mpi.SUM)
예제 #20
0
#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
if outputFile != "None":
    from SpheralGnuPlotUtilities import multiSort
    state = State(db, integrator.physicsPackages())
    outputFile = os.path.join(dataDir, outputFile)
    pos = state.vectorFields(HydroFieldNames.position)
    rho = state.scalarFields(HydroFieldNames.massDensity)
    P = state.scalarFields(HydroFieldNames.pressure)
    vel = state.vectorFields(HydroFieldNames.velocity)
    eps = state.scalarFields(HydroFieldNames.specificThermalEnergy)
    Hfield = state.symTensorFields(HydroFieldNames.H)
    S = state.symTensorFields(SolidFieldNames.deviatoricStress)
    ps = state.scalarFields("plastic strain")
    xprof = mpi.reduce([x.x for x in internalValues(pos)], mpi.SUM)
    rhoprof = mpi.reduce(internalValues(rho), mpi.SUM)
    Pprof = mpi.reduce(internalValues(P), mpi.SUM)
    vprof = mpi.reduce([v.x for v in internalValues(vel)], mpi.SUM)
    epsprof = mpi.reduce(internalValues(eps), mpi.SUM)
    hprof = mpi.reduce([1.0/sqrt(H.Determinant()) for H in internalValues(Hfield)], mpi.SUM)
    sprof = mpi.reduce([x.xx for x in internalValues(S)], mpi.SUM)
    psprof = mpi.reduce(internalValues(ps), mpi.SUM)
    mof = mortonOrderIndices(db)
    mo = mpi.reduce(internalValues(mof), mpi.SUM)
    if mpi.rank == 0:
        multiSort(mo, xprof, rhoprof, Pprof, vprof, epsprof, hprof, sprof, psprof)
        f = open(outputFile, "w")
        f.write(("#" + 17*" %16s" + "\n") % ("x", "rho", "P", "v", "eps", "h", "S", psprof, "m", 
                                             "int(x)", "int(rho)", "int(P)", "int(v)", "int(eps)", "int(h)", "int(S)", "int(ps)"))
        for (xi, rhoi, Pi, vi, epsi, hi, si, psi, mi) in zip(xprof, rhoprof, Pprof, vprof, epsprof, hprof, sprof, psprof, mo):
예제 #21
0
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
if steps is None:
    if control.time() < goalTime:
        control.advance(goalTime, maxSteps)
else:
    control.step(steps)

#-------------------------------------------------------------------------------
# Compute the analytic answer.
#-------------------------------------------------------------------------------
sys.path.append("../../../tests/Hydro/AcousticWave")
import AcousticWaveSolution
xlocal = [pos.x for pos in nodes1.positions().internalValues()]
xglobal = mpi.reduce(xlocal, mpi.SUM)
dx = (x1 - x0) / nx1
h1 = 1.0 / (nPerh * dx)
answer = AcousticWaveSolution.AcousticWaveSolution(eos, cs, rho1, x0, x1, A,
                                                   twopi * kfreq, h1)

### Compute the simulated specific entropy.
##rho = mpi.allreduce(nodes1.massDensity().internalValues(), mpi.SUM)
##P = mpi.allreduce(nodes1.pressure().internalValues(), mpi.SUM)
##A = [Pi/rhoi**gamma for (Pi, rhoi) in zip(P, rho)]

### The analytic solution for the simulated entropy.
##xans, vans, uans, rhoans, Pans, hans = answer.solution(control.time(), xglobal)
##Aans = [Pi/rhoi**gamma for (Pi, rhoi) in zip(Pans,  rhoans)]

#-------------------------------------------------------------------------------
예제 #22
0
파일: fractal.py 프로젝트: LLNL/pynamic
            if numIters >= maxIterationsPerPoint:
                break;
            nxt = f(nxt)
            numIters = numIters+1

        #Convert the number of iterations to a color value
        colorFac = 255.0*float(numIters)/float(maxIterationsPerPoint)
        myRGB = ( colorFac*0.8 + 32, 24+0.1*colorFac, 0.5*colorFac )

        #append this color value to a running list
        myArray.append( int(myRGB[2]) ) #blue first
        myArray.append( int(myRGB[1]) )    #The green
        myArray.append( int(myRGB[0]) )  #Red is last

#Now I reduce the lists to process 0!!
masterString = mpi.reduce( myArray.tostring(), mpi.SUM, 0 )

#Tell user that we're done
message = "process " + str(mpi.rank) + " done with computation!!"
print message

#Process zero does the file writing
if mpi.rank == 0:
    masterArray = array('B')
    masterArray.fromstring(masterString)

    #Write a BMP header
    myBMPHeader = makeBMPHeader( bmpSize[0], bmpSize[1] )
    print "Header length is ", len(myBMPHeader)
    print "BMP size is ", bmpSize
    print "Data length is ", len(masterString)
예제 #23
0
    # Subtract the maximum potential value off the answer so that we have  
    # a well-defined potential.
    maxVal = max(psis)
    psis -= maxVal
    return [psi for psi in psis]

# The above solution implemented with discrete cosine transforms (DCTs) 
# for speed.
def answerDCT(G, points):
    from transforms import idctn
    from numpy import array
    # FIXME

# Compare our solution with the analytic one.
xprof = mpi.reduce(nodes.positions().internalValues(), mpi.SUM)
psiProf = mpi.reduce(psi.internalValues(), mpi.SUM)
psiAns = mpi.reduce(answer(gravity.G(), nodes.positions().internalValues()), mpi.SUM)
#print [psiProf[i] - psiAns[i] for i in xrange(len(psiProf))]
N = nx*ny*nz
from pylab import *
ion()
if mpi.rank == 0:
    import Pnorm
    assert(len(psiProf) == len(psiAns))
    error = [psiProf[i] - psiAns[i] for i in xrange(len(psiAns))]
    Pn = Pnorm.Pnorm(error, xprof)
    L1 = Pn.pnorm(1)
    L2 = Pn.pnorm(2)
    Linf = Pn.pnorm("inf")
    print "Error norms for gravitational potential:"
예제 #24
0
#control.appendPeriodicWork(updateDiagnostics, 1)

#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
if steps is None:
    if control.time() < goalTime:
        control.advance(goalTime, maxSteps)
else:
    control.step(steps)

#-------------------------------------------------------------------------------
# Plot the final state.
#-------------------------------------------------------------------------------
xlocal = [pos.x for pos in nodes1.positions().internalValues()]
xprof = mpi.reduce(xlocal, mpi.SUM)
if graphics == "gnu":
    from SpheralGnuPlotUtilities import *
    state = State(db, integrator.physicsPackages())
    rhoPlot, velPlot, epsPlot, PPlot, HPlot = plotRadialState(db)
    cs = state.scalarFields(HydroFieldNames.soundSpeed)
    csPlot = plotFieldList(cs,
                           xFunction="%s.magnitude()",
                           winTitle="Sound speed",
                           colorNodeLists=False)
    EPlot = plotEHistory(control.conserve)

    if svph:
        volPlot = plotFieldList(hydro.volume(),
                                xFunction="%s.magnitude()",
                                winTitle="volume",
예제 #25
0
import sys,mpi
rank,size = mpi.init(len(sys.argv),sys.argv)
sigma = mpi.reduce( 7, 1, mpi.MPI_INT, mpi.MPI_SUM, 0, mpi.MPI_COMM_WORLD )
print "Sum:",sigma 
mpi.finalize()
예제 #26
0
# Advance to the end time.
#-------------------------------------------------------------------------------
if steps is None:
    control.advance(goalTime)
else:
    control.step(steps)

if outputFile != "None":
    outputFile = os.path.join(dataDir, outputFile)
    from SpheralGnuPlotUtilities import multiSort
    P1 = ScalarField("pressure", diskNodes1)
    P2 = ScalarField("pressure", diskNodes2)
    diskNodes1.pressure(P1)
    diskNodes2.pressure(P2)

    xprof1 = mpi.reduce([x.x for x in diskNodes1.positions().internalValues()],
                        mpi.SUM)
    yprof1 = mpi.reduce([y.y for y in diskNodes1.positions().internalValues()],
                        mpi.SUM)
    rhoprof1 = mpi.reduce(diskNodes1.massDensity().internalValues(), mpi.SUM)
    Pprof1 = mpi.reduce(P1.internalValues(), mpi.SUM)
    rprof1 = mpi.reduce(
        [ri.magnitude() for ri in diskNodes1.positions().internalValues()],
        mpi.SUM)
    vx1 = mpi.reduce([v.x for v in diskNodes1.velocity().internalValues()],
                     mpi.SUM)
    vy1 = mpi.reduce([v.y for v in diskNodes1.velocity().internalValues()],
                     mpi.SUM)

    xprof2 = mpi.reduce([x.x for x in diskNodes2.positions().internalValues()],
                        mpi.SUM)
    yprof2 = mpi.reduce([y.y for y in diskNodes2.positions().internalValues()],
예제 #27
0
파일: PyMPITest.py 프로젝트: LLNL/pynamic
    def parallelRunTest(self):
        #decide on targets
        #targets = [ int(mpi.procs / 3), 0, mpi.procs - 1]

        targets = [ 0, 0, 0]
        #values to be reduced
        num1 = mpi.rank
        num2 = 0
        if mpi.rank == 1:
            num2 = 1
        num3 = 1
        num4 = (2 ** (mpi.rank%8))
        num5 = 8
        if mpi.rank == 1:
          num5 = 9
        list1 = [ mpi.rank + 1 ]
        list2 = [ mpi.rank]

        #do reduces
        results = [0,0,0,    0,0,0,    0,0,0,    0,0,0,    0,0,0 ]
        results[0] = mpi.reduce( num1,   mpi.LOR,  targets[0])
        results[1] = mpi.reduce( num1,   mpi.LAND, targets[1])
        results[2] = mpi.reduce( num2, mpi.LOR,  targets[2])
        results[3] = mpi.reduce( num3,  mpi.LAND,  targets[0])
        results[4] = mpi.reduce( num3,  mpi.LXOR,  targets[1])
        results[5] = mpi.reduce( list1,  mpi.LOR,  targets[2])
        results[6] = mpi.reduce( list1,  mpi.LAND,  targets[0])
        results[7] = mpi.reduce( list2,  mpi.LAND,  targets[1])
        results[8] = mpi.reduce( num1, mpi.MIN, targets[2] )
        results[9] = mpi.reduce( num2, mpi.MAX, targets[0] )
        results[10] = mpi.reduce( list1, mpi.MIN, targets[1] )
        results[11] = mpi.reduce( num1, mpi.MINLOC, targets[2])
        results[12] = mpi.reduce( list1, mpi.MAXLOC, targets[0] )
        results[13] = mpi.reduce( num4, mpi.BAND, targets[1] )
        results[14] = mpi.reduce( num5, mpi.BXOR, targets[2] )

        #correct answers
        correctAnswers = [  1,0,1,    
                            1,mpi.procs%2,1,     
                            1,1,0,
                            1,[1],(0,0),
                            ( mpi.procs - 1, [mpi.procs] ), 0, 
                            (mpi.procs%2)*8 + 1]

        for x in range(15):
            if mpi.rank == targets[x%3] and results[x] != correctAnswers[x]:
                failString = "reduce logical failed on test " + str(x)
                failString += "\nReduce gave result " + str(results[x])
                failString += " while correct answer is "
                failString += str(correctAnswers[x]) + "\n"
                self.fail(failString)

            elif mpi.rank != targets[x%3] and results[x] != None:
                errstr = "reduce LOGICAL failed on off-target";
                errstr += "process on test " + str(x)
                self.fail( errstr)
        return
예제 #28
0
            nodeL.pressure(Pfield)
            for j in xrange(nodeL.numInternalNodes):
                pError.append(Pfield[j] - P)
                rhoError.append(nodeL.massDensity()[j] - rho)
                velError.append(
                    nodeL.velocity()[j][0])  #Velcoity is supposed to be zero
                xdata.append(nodeL.positions()[j][0])
                if serialDump:
                    serialData.append([
                        nodeL.positions()[j],
                        3.0 / (nodeL.Hfield()[j].Trace()),
                        nodeL.mass()[j],
                        nodeL.massDensity()[j],
                        nodeL.specificThermalEnergy()[j], Pfield[j]
                    ])
serialData = mpi.reduce(serialData, mpi.SUM)
pError = mpi.reduce(pError, mpi.SUM)
rhoError = mpi.reduce(rhoError, mpi.SUM)
velError = mpi.reduce(velError, mpi.SUM)
xdata = mpi.reduce(xdata, mpi.SUM)
if rank == 0 and serialDump:
    f = open(os.path.join(dataDir, "./serialDump.ascii"), 'w')
    for i in xrange(len(serialData)):
        f.write("{0} {1} {2} {3} {4} {5} {6}\n".format(
            i, serialData[i][0][0], serialData[i][1], serialData[i][2],
            serialData[i][3], serialData[i][4], serialData[i][5]))
    f.close()
if rank == 0:
    print len(pError)
    print "Pressure results: L1 error = %g, L2 Error = %g, L inf Error = %g \n" % (
        Pnorm(pError, xdata).pnorm(1), Pnorm(
예제 #29
0
    plots = [(paz, "GreshoVortex-velazimuthal.png"),
             (pmag, "GreshoVortex-velmag.png")]

    # Make hardcopies of the plots.
    for p, filename in plots:
        p.hardcopy(os.path.join(baseDir, filename), terminal="png")

#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
if outputFile != "None":
    outputFile = os.path.join(baseDir, outputFile)
    from SpheralGnuPlotUtilities import multiSort
    P = ScalarField("pressure", nodes)
    nodes.pressure(P)
    xprof = mpi.reduce([x.x for x in nodes.positions().internalValues()],
                       mpi.SUM)
    yprof = mpi.reduce([x.y for x in nodes.positions().internalValues()],
                       mpi.SUM)
    rhoprof = mpi.reduce(nodes.massDensity().internalValues(), mpi.SUM)
    Pprof = mpi.reduce(P.internalValues(), mpi.SUM)
    vprof = mpi.reduce(
        [v.magnitude() for v in nodes.velocity().internalValues()], mpi.SUM)
    velx = mpi.reduce([v.x for v in nodes.velocity().internalValues()],
                      mpi.SUM)
    vely = mpi.reduce([v.y for v in nodes.velocity().internalValues()],
                      mpi.SUM)
    epsprof = mpi.reduce(nodes.specificThermalEnergy().internalValues(),
                         mpi.SUM)
    hprof = mpi.reduce(
        [1.0 / sqrt(H.Determinant()) for H in nodes.Hfield().internalValues()],
        mpi.SUM)
예제 #30
0
            if numIters >= maxIterationsPerPoint:
                break
            nxt = f(nxt)
            numIters = numIters + 1

        #Convert the number of iterations to a color value
        colorFac = 255.0 * float(numIters) / float(maxIterationsPerPoint)
        myRGB = (colorFac * 0.8 + 32, 24 + 0.1 * colorFac, 0.5 * colorFac)

        #append this color value to a running list
        myArray.append(int(myRGB[2]))  #blue first
        myArray.append(int(myRGB[1]))  #The green
        myArray.append(int(myRGB[0]))  #Red is last

#Now I reduce the lists to process 0!!
masterString = mpi.reduce(myArray.tostring(), mpi.SUM, 0)

#Tell user that we're done
message = "process " + str(mpi.rank) + " done with computation!!"
print message

#Process zero does the file writing
if mpi.rank == 0:
    masterArray = array('B')
    masterArray.fromstring(masterString)

    #Write a BMP header
    myBMPHeader = makeBMPHeader(bmpSize[0], bmpSize[1])
    print "Header length is ", len(myBMPHeader)
    print "BMP size is ", bmpSize
    print "Data length is ", len(masterString)
예제 #31
0
#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
if outputFile != "None":
    from SpheralGnuPlotUtilities import multiSort
    state = State(db, integrator.physicsPackages())
    outputFile = os.path.join(dataDir, outputFile)
    pos = state.vectorFields(HydroFieldNames.position)
    rho = state.scalarFields(HydroFieldNames.massDensity)
    P = state.scalarFields(HydroFieldNames.pressure)
    vel = state.vectorFields(HydroFieldNames.velocity)
    eps = state.scalarFields(HydroFieldNames.specificThermalEnergy)
    Hfield = state.symTensorFields(HydroFieldNames.H)
    S = state.symTensorFields(SolidFieldNames.deviatoricStress)
    ps = state.scalarFields(SolidFieldNames.plasticStrain)
    rprof = mpi.reduce([x.magnitude() for x in internalValues(pos)], mpi.SUM)
    rhoprof = mpi.reduce(internalValues(rho), mpi.SUM)
    Pprof = mpi.reduce(internalValues(P), mpi.SUM)
    vprof = mpi.reduce([v.x for v in internalValues(vel)], mpi.SUM)
    epsprof = mpi.reduce(internalValues(eps), mpi.SUM)
    hprof = mpi.reduce([2.0 / (H.Trace()) for H in internalValues(Hfield)],
                       mpi.SUM)
    sprof = mpi.reduce([x.xx for x in internalValues(S)], mpi.SUM)
    psprof = mpi.reduce(internalValues(ps), mpi.SUM)
    mof = mortonOrderIndices(db)
    mo = mpi.reduce(internalValues(mof), mpi.SUM)
    if mpi.rank == 0:
        multiSort(mo, rprof, rhoprof, Pprof, vprof, epsprof, hprof, sprof,
                  psprof)
        f = open(outputFile, "w")
        f.write(("#" + 17 * ' "%16s"' + "\n") %
예제 #32
0
    def getobsp(self, snum, stime, tetrad, zerotime=0.0, debug=0):
        """
        
        LISApar.getobsp(length,deltat,tetrad,zerotime=0.0)
        is the parallel-computing equivalent of getobs and
        getobsc, and it is used to compute the TDI responses
        of large sets of Wave objects. It must be called
        from an instance of LISApar, with the following
        parameters:
        
        - length is the total length of the TDI-observable
          arrays that will be returned;
        
        - deltat is the cadence of the time series;
        
        - zerotime is the initial time for the time series;
        
        - tetrad is a tuple (lisa,wavefactory,parameterlist,
          observables) of four elements:

          * lisa is an instance of a LISA class, which
            should be the same for every CPU taking part in
            the computation;

          * wavefactory is a Python function taking any
            number of parameters, and returning an instance of
            a synthLISA Wave object; the function must be
            defined for every CPU taking part in the
            computation;

          * parameterlist is a list of source parameters (or
            of parameter n-tuples, if wavefactory takes more
            than one parameter), which will be distributed
            among the CPUs, and passed to the Wave Factory to
            construct synthLISA Wave objects; the parameter
            sets need to be defined only on the root CPU, but
            it won't hurt to define them everywhere. They can
            contain any Python types (they are pickled before
            distribution), but not synthLISA objects;

          * observables is a list or tuple of TDI
            observables, which must be given as unbound
            methods, such as synthlisa.TDI.X1 or
            synthlisa.TDI.time.
        
        The distribution of the parameter sets among the
        CPUs tries to balance the load of the computation.
        If the number of sources is not divisible by the
        number of CPUs, it will assign a smaller number of
        sources to the root CPU, and the same number of
        sources to all other CPUs."""

        # accept four levels (0-4) of debugging info

        inittime = time.time()

        myrank = self.rank
        size = self.size

        try:
            (lisa, srcfunc, parameters, obs) = tetrad
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): third parameter must be a 4-tuple containing a",
                print "LISA instance, a Wave factory, an array of parameters for the factory,",
                print "and a set of TDI observables given as class methods (such as synthlisa.TDI.X)."
            raise IndexError

        if type(parameters) not in (list, tuple, numpy.ndarray):
            if myrank == 0:
                print "LISApar.getobsp(...): needs a list of parameters to feed to the factory!"
            raise IndexError

        if size == 1:
            if myrank == 0:
                print "LISApar.getobsp(...): must be run with more than one cpu!"
            raise NotImplementedError

        if size > len(parameters):
            if myrank == 0:
                print "LISApar.getobsp(...): needs to run with more sources than cpus!"
            raise IndexError

        # root may get zero processors

        blocksize, remain = divmod(len(parameters), size)

        if remain > 0:
            blockadd, remain = divmod(remain, size - 1)
            blocksize = blocksize + blockadd

        if myrank == 0 and debug > 2:
            print "Standard block: ", blocksize,
            print "; root block: ", len(parameters) - blocksize * (size - 1)

        if myrank == 0:
            if debug > 3:
                print "Preparing for parallel execution..."

            for cpu in range(1, size):
                blockstart, blockend = (cpu - 1) * blocksize, cpu * blocksize

                serial_pars = pickle.dumps(parameters[blockstart:blockend])
                len_pars = len(serial_pars)

                mpi.isend(len_pars, 1, mpi.MPI_INT, cpu, 0, mpi.MPI_COMM_WORLD)
                mpi.isend(serial_pars, len_pars, mpi.MPI_CHAR, cpu, 1,
                          mpi.MPI_COMM_WORLD)

            mypars = parameters[blockend:]
        else:
            len_pars = mpi.recv(1, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD)
            serial_pars = mpi.recv(len_pars, mpi.MPI_CHAR, 0, 1,
                                   mpi.MPI_COMM_WORLD)

            mypars = pickle.loads(serial_pars)

        if debug > 2:
            print "CPU ", myrank, " received ", len(
                mypars), " source parameters ", mypars

        try:
            if type(mypars[0]) in (list, tuple, numpy.ndarray):
                sources = map(lambda x: srcfunc(*x), mypars)
            else:
                sources = map(srcfunc, mypars)

            if len(filter(lambda x: not isinstance(x, synthlisa.Wave),
                          sources)) > 0:
                raise TypeError
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): srcfunc must return a synthlisa.Wave when applied",
                print "to each element of the parameter list"
            raise TypeError

        if debug > 3:
            print "CPU ", myrank, " created sources ", sources

        wavearray = synthlisa.WaveArray(sources)

        if not isinstance(lisa, synthlisa.LISA):
            if myrank == 0:
                print "LISApar.getobsp(...): lisa must be an instance of synthlisa.LISA."
            raise TypeError

        tdisignal = synthlisa.TDIsignal(lisa, wavearray)

        # is it possible to permanently bind an unbound method?
        # yes, by doing bound_obs = obs.__get__(tdisignal)
        # but it's not clear this will yield a faster call

        if type(obs) == list or type(obs) == tuple:
            multobs = len(obs)

            array = numpy.zeros((snum, multobs), dtype='d')
            for i in numpy.arange(0, snum):
                for j in range(0, multobs):
                    array[i, j] = obs[j](tdisignal, zerotime + i * stime)
        else:
            multobs = 1

            array = numpy.zeros(snum, dtype='d')
            for i in numpy.arange(0, snum):
                array[i] = obs(tdisignal, zerotime + i * stime)

        sumresults = mpi.reduce(array, snum * multobs, mpi.MPI_DOUBLE,
                                mpi.MPI_SUM, 0, mpi.MPI_COMM_WORLD)

        if myrank == 0 and debug > 0:
            currenttime = time.time() - inittime

            vel = snum / currenttime
            print "Completed in %d s [%d (multi)samples/s]." % (
                int(currenttime), int(vel))

        if myrank == 0:
            if multobs == 1:
                return sumresults
            else:
                return sumresults.reshape(snum, multobs)
        else:
            return None
예제 #33
0
    def testIt(self):
        print "Testing TreeDistributedBoundary3d on domain %i of %i domains" % \
              (domainID, numDomains)

        # Set the ghost nodes for each domain distributed NodeList.
        self.domainbc.setAllGhostNodes(self.dataBase)
        self.domainbc.finalizeGhostBoundary()
        for nodes in self.dataBase.nodeLists():
            nodes.neighbor().updateNodes()

        # Exchange the global node ID fields.
        self.domainbc.applyGhostBoundary(self.globalIDField1)
        self.domainbc.applyGhostBoundary(self.globalIDField2)
        self.domainbc.applyGhostBoundary(self.globalIDField3)
        self.domainbc.finalizeGhostBoundary()

        # Iterate over each domain.
        for testProc in xrange(mpi.procs):

            # Test each NodeList.
            for (nodes, globalIDField) in ((self.nodes1, self.globalIDField1),
                                           (self.nodes2, self.globalIDField2),
                                           (self.nodes3, self.globalIDField3)):

                # Tell everyone how many nodes we'll be testing, and iterate
                # over them
                n = mpi.bcast(nodes.numInternalNodes, testProc)
                for i in random.sample(range(n), min(10, n)):

                    # Broadcast the position and H from the testing processor.
                    rilocal = Vector3d()
                    Hilocal = SymTensor3d()
                    if mpi.rank == testProc:
                        rilocal = nodes.positions()[i]
                        Hilocal = nodes.Hfield()[i]
                    ri = mpi.bcast(rilocal, testProc)
                    Hi = mpi.bcast(Hilocal, testProc)

                    # Get the global answer set for this node.
                    answer = mpi.reduce([self.globalIDField1[j] for j in findNeighborNodes(ri, Hi, self.kernelExtent, self.nodes1)] +
                                        [self.globalIDField2[j] for j in findNeighborNodes(ri, Hi, self.kernelExtent, self.nodes2)] +
                                        [self.globalIDField3[j] for j in findNeighborNodes(ri, Hi, self.kernelExtent, self.nodes3)],
                                        mpi.SUM, testProc)

                    # Have the testing processor build it's own version.
                    if mpi.rank == testProc:
                        masterLists = vector_of_vector_of_int()
                        coarseNeighbors = vector_of_vector_of_int()
                        refineNeighbors = vector_of_vector_of_int()
                        self.dataBase.setMasterNodeLists(ri, Hi, masterLists, coarseNeighbors, False)
                        self.dataBase.setRefineNodeLists(ri, Hi, coarseNeighbors, refineNeighbors)
                        assert len(refineNeighbors) == 3
                        refine = []
                        for k, globalIDs in enumerate([self.globalIDField1,
                                                       self.globalIDField2,
                                                       self.globalIDField3]):
                            refine.extend([globalIDs[j] for j in refineNeighbors[k]])

                        # Check the answer.
                        test = checkNeighbors(refine, answer)
                        if not test:
                            sys.stderr.write("FAILED for node %i\n" % i)
                        else:
                            sys.stderr.write("PASSED for node %i of %i\n" % (i, n))
                        assert test
예제 #34
0
                print "\t%s \t\t%g \t\t%g \t\t%g" % (name, L1, L2, Linf)
                myfile.write("\t\t%g \t\t%g \t\t%g" % (L1, L2, Linf))
            myfile.write("\n")

#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
rmaxnorm = 0.35
rminnorm = 0.05

if outputFile != "None":
    outputFile = os.path.join(dataDir, outputFile)
    from SpheralTestUtilities import multiSort
    P = ScalarField("pressure", nodes1)
    nodes1.pressure(P)
    xprof = mpi.reduce([x.x for x in nodes1.positions().internalValues()],
                       mpi.SUM)
    yprof = mpi.reduce([x.y for x in nodes1.positions().internalValues()],
                       mpi.SUM)
    zprof = mpi.reduce([x.z for x in nodes1.positions().internalValues()],
                       mpi.SUM)
    rprof = mpi.reduce(
        [ri.magnitude() for ri in nodes1.positions().internalValues()],
        mpi.SUM)
    rhoprof = mpi.reduce(nodes1.massDensity().internalValues(), mpi.SUM)
    Pprof = mpi.reduce(P.internalValues(), mpi.SUM)
    vprof = mpi.reduce([
        vi.dot(ri.unitVector())
        for ri, vi in zip(nodes1.positions().internalValues(),
                          nodes1.velocity().internalValues())
    ], mpi.SUM)
    #vprof = mpi.reduce([v.magnitude() for v in nodes1.velocity().internalValues()], mpi.SUM)
예제 #35
0
def gridSample(fieldList,
               zFunction="%s",
               nx=100,
               ny=100,
               xmin=None,
               xmax=None,
               ymin=None,
               ymax=None):

    assert nx > 0 and ny > 0

    # Set up our return value array.
    xValues = np.array([[0.0] * nx] * ny)
    yValues = np.array([[0.0] * nx] * ny)
    zValues = np.array([[0.0] * nx] * ny)

    # Gather the fieldList info across all processors to process 0.
    localNumNodes = []
    localX = []
    localY = []
    for ifield in xrange(fieldList.numFields):
        field = fieldList[ifield]
        n = field.nodeList().numNodes
        localNumNodes.append(n)
        for r in field.nodeList().positions():
            localX.append(r.x)
            localY.append(r.y)
    globalNumNodes = mpi.gather(localNumNodes)
    globalX = mpi.gather(localX)
    globalY = mpi.gather(localY)

    # If the user did not specify the sampling volume, then find the min and
    # max node positions.
    if xmin == None:
        xmin = min(localX)
    if ymin == None:
        ymin = min(localY)
    if xmax == None:
        xmax = max(localX)
    if ymax == None:
        ymax = max(localY)
    xmin = mpi.allreduce(xmin, mpi.MIN)
    ymin = mpi.allreduce(ymin, mpi.MIN)
    xmax = mpi.allreduce(xmax, mpi.MAX)
    ymax = mpi.allreduce(ymax, mpi.MAX)

    assert xmax > xmin
    assert ymax > ymin

    # Figure out the sizes of the bins we're going to be sampling in
    dx = (xmax - xmin) / nx
    dy = (ymax - ymin) / ny

    # Loop over all the grid sampling positions, and figure out this processors
    # contribution.
    for iy in xrange(ny):
        for ix in xrange(nx):
            xValues[iy][ix] = xmin + (ix + 0.5) * dx
            yValues[iy][ix] = ymin + (iy + 0.5) * dy
            r = Vector2d(xValues[iy][ix], yValues[iy][ix])
            z = fieldList.sample(r)
            localZ = eval(zFunction % "z")
            globalZ = mpi.reduce(localZ, mpi.SUM)
            if mpi.rank == 0:
                print "%i %i %i %s %g %g" % (mpi.rank, ix, iy, r, z, localZ)
                print "%i %g" % (mpi.rank, globalZ)
                zValues[iy][ix] = globalZ

    return xValues, yValues, zValues
예제 #36
0
#-------------------------------------------------------------------------------
if steps is None:
    if control.time() < goalTime:
        control.advance(goalTime, maxSteps)
    if checkReversibility:
        for i in xrange(nodes1.numNodes):
            vel[i] = -vel[i]
        control.advance(2 * goalTime, maxSteps)
else:
    control.step(steps)

#-------------------------------------------------------------------------------
# Plot the final state.
#-------------------------------------------------------------------------------
xlocal = [pos.x for pos in nodes1.positions().internalValues()]
xprof = mpi.reduce(xlocal, mpi.SUM)
if graphics == "gnu":
    from SpheralGnuPlotUtilities import *
    state = State(db, integrator.physicsPackages())
    rhoPlot, velPlot, epsPlot, PPlot, HPlot = plotState(state)
    if mpi.rank == 0:
        plotAnswer(answer, control.time(), rhoPlot, velPlot, epsPlot, PPlot,
                   HPlot, xprof)
        #plotAnswer(answer.plus, control.time(), rhoPlot, velPlot, epsPlot, PPlot, HPlot, xprof)
        #plotAnswer(answer.minus, control.time(), rhoPlot, velPlot, epsPlot, PPlot, HPlot, xprof)
    cs = state.scalarFields(HydroFieldNames.soundSpeed)
    csPlot = plotFieldList(cs, winTitle="Sound speed", colorNodeLists=False)
    EPlot = plotEHistory(control.conserve)

    # Plot the correction terms.
예제 #37
0
               z = pingpong.send(n,src)

               del msg
               del status
               del x
               del y
               del z

     # I am the odd one out
     else:
          for run in range(runs):
               mpi.barrier()
               mpi.barrier()

     # Get from all processes
     all_python = mpi.reduce(python,mpi.SUM)
     all_c = mpi.reduce(c,mpi.SUM)

     if all_python and all_c and mpi.rank == 0:
          if half:
               divisor = half
          else:
               divisor = 1

          if runs > 1:
               all_python = all_python[1:]
               all_c = all_c[1:]
               
          best_python = min(all_python)
          worst_python = max(all_python)
          avg_python = reduce(lambda x,y: x+y, all_python)/runs