def test_magnitude(): """function expects an array of N 3D vectors""" # test bordercases and other arbitrary shapes for vf in vfshapes: array = np.random.random_sample((vf, 3)) # to include negative + larger values newarray = (array - 0.5) * 1000 # ensure array of required shape shape = [int(dimension) for dimension in newarray.shape] assert shape == [vf, 3] # compute expected result newarraysq = newarray ** 2 exp = newarraysq.sum(1) ** 0.5 # compute actual result act = ana.magnitude(newarray) # ensure result is a numpy array assert isinstance(act, np.ndarray) # code works? assert exp.all() == act.all()
def test_magnitude(): """function expects an array of N 3D vectors""" # test bordercases and other arbitrary shapes for vf in vfshapes: array = np.random.random_sample((vf, 3)) # to include negative + larger values newarray = (array - 0.5) * 1000 # ensure array of required shape shape = [int(dimension) for dimension in newarray.shape] assert shape == [vf, 3] # compute expected result newarraysq = newarray**2 exp = newarraysq.sum(1)**0.5 # compute actual result act = ana.magnitude(newarray) # ensure result is a numpy array assert isinstance(act, np.ndarray) # code works? assert exp.all() == act.all()
def ovf2vtk_main(): start_time = time.time() banner_doc = 70 * "-" + \ "\novf2vtk --- converting ovf files to vtk files" + "\n" + \ "Hans Fangohr, Richard Boardman, University of Southampton\n"""\ + 70 * "-" # extracts command line arguments # If any of the arguments given appear in the command line, a list of... # ...these args and corresponding values (if any) is returned -> ('args'). # Any arguments that dont dont match the given ones are retuned in a... # ...separate list -> ('params') # Note (fangohr 30/12/2006 20:52): the use of getopt is historic, args, params = getopt.getopt(sys.argv[1:], 'Vvhbta:', [ "verbose", "help", "add=", "binary", "text", "ascii", "surface-effects", "version", "datascale=", "posscale=" ]) # default value surfaceEffects = False datascale = 0.0 # 0.0 has special meaning -- see help text posscale = 0.0 # 0.0 has special meaning -- see help text # provide data from getopt.getopt (args) in form of dictionary options = {} for item in args: if item[1] == '': options[item[0]] = None else: options[item[0]] = item[1] keys = options.keys() # set system responses to arguments given if "--surface-effects" in keys: surfaceEffects = True if "--posscale" in keys: posscale = float(options["--posscale"]) if "--datascale" in keys: datascale = float(options["--datascale"]) if "-v" in keys or "--verbose" in keys: print("running in verbose mode") debug = True else: debug = False if "-h" in keys or "--help" in keys: print(__doc__) sys.exit(0) if "-V" in keys or "--version" in keys: print("This is version {:s}.".format(version)) sys.exit(0) if len(params) == 0: print(__doc__) print("ERROR: An input file (and an output file need to be " "specified).") sys.exit(1) else: infile = params[0] if len(params) == 1: print(__doc__) print("ERROR: An input file AND an output file need to be specified.") print("specify output file") sys.exit(1) else: outfile = params[1] # okay: it seems the essential parameters are given. # Let's check for others: print(banner_doc) if debug: print("infile = {}".format(infile)) print("outfile = {}".format(outfile)) print("args = {}".format(args)) print("options = {}".format(options)) print("datascale = {}".format(datascale)) print("posscale = {}".format(posscale)) # read data from infile vf = omf.read_structured_omf_file(infile, debug) # compute magnitude for all cells Ms = ana.magnitude(vf) # Compute number of cells with non-zero Ms (rpb01r) Ms_num_of_nonzeros = Numeric.sum(Numeric.not_equal(Ms, 0.0)) print("({:5.2f}% of {:d} cells filled)".format( 100.0 * Ms_num_of_nonzeros / len(Ms), len(Ms))) # scale magnetisation data as required: if datascale == 0.0: scale = max(Ms) print("Will scale data down by {:f}".format(scale)) else: scale = datascale # normalise vectorfield by scale vf = Numeric.divide(vf, scale) # read metadata in data file ovf_run = omf.analyze(infile) datatitle = ovf_run["Title:"] + "/{:g}".format(scale) # # need x, y and z vectors for vtk format # # taking actual spacings for dx, dy and dz results generally in # poor visualisation results (in particular for thin films, one # would like to have some magnification in z-direction). Also:vtk # is not happy with positions on the 10e-9 scale, so one better # scales this to something closer to unity. # extract dimensions from file dimensions = (int(ovf_run["xnodes:"]), int(ovf_run["ynodes:"]), int(ovf_run["znodes:"])) # scale data by given factor if posscale != 0.0: # find range between max and min values of components xrange = abs(float(ovf_run["xmax:"]) - float(ovf_run["xmin:"])) yrange = abs(float(ovf_run["ymax:"]) - float(ovf_run["ymin:"])) zrange = abs(float(ovf_run["zmax:"]) - float(ovf_run["zmin:"])) # define no. of x,y,z nodes xnodes = float(ovf_run["xnodes:"]) ynodes = float(ovf_run["ynodes:"]) znodes = float(ovf_run["znodes:"]) # define stepsizes xstepsize = float(ovf_run["xstepsize:"]) ystepsize = float(ovf_run["ystepsize:"]) zstepsize = float(ovf_run["zstepsize:"]) # define bases xbase = float(ovf_run["xbase:"]) ybase = float(ovf_run["ybase:"]) zbase = float(ovf_run["zbase:"]) # find dx, dy, dz in SI units: dx = xrange / xnodes dy = yrange / ynodes dz = zrange / znodes # find scale factor that OOMMF uses for xstepsize and xnodes, # etc. (Don't know how to get this directly.) xscale = dx * xstepsize yscale = dy * ystepsize zscale = dz * zstepsize # extract x, y and z positions from ovf file. xbasevector = [None] * dimensions[0] # create empty vector for i in range(dimensions[0]): # data is stored for 'centre' of each cuboid, therefore (i+0.5) xbasevector[i] = xbase + (i + 0.5) * xstepsize * xscale ybasevector = [None] * dimensions[1] for i in range(dimensions[1]): ybasevector[i] = ybase + (i + 0.5) * ystepsize * yscale zbasevector = [None] * dimensions[2] for i in range(dimensions[2]): zbasevector[i] = zbase + (i + 0.5) * zstepsize * zscale # finally, convert list to numeric (need to have this consistent) xbasevector = Numeric.array(xbasevector) / float(posscale) ybasevector = Numeric.array(ybasevector) / float(posscale) zbasevector = Numeric.array(zbasevector) / float(posscale) else: # posscale == 0.0 # this generally looks better: xbasevector = Numeric.arange(dimensions[0]) ybasevector = Numeric.arange(dimensions[1]) zbasevector = Numeric.arange(dimensions[2]) # # write ascii or binary vtk-file (default is binary) # vtk_data = 'binary' if '--ascii' in keys or '-t' in keys or '--text' in keys: vtk_data = 'ascii' if debug: print("switching to ascii vtk-data") if '--binary' in keys or '-b' in keys: vtk_data = 'binary' if debug: print("switching to binary vtk-data") # # and now open vtk-file # vtkfilecomment = "Output from ovf2vtk (version {:s}), {:s}, infile={:s}. "\ .format(version, time.asctime(), infile) vtkfilecomment += "Calling command line was '{:s}' executed in '{:s}'"\ .format(" ".join(sys.argv), os.getcwd()) # define inputs RecGrid = pyvtk.RectilinearGrid(xbasevector.tolist(), ybasevector.tolist(), zbasevector.tolist()) PData = pyvtk.PointData(pyvtk.Vectors(vf.tolist(), datatitle)) # define vtk file. vtk = pyvtk.VtkData(RecGrid, vtkfilecomment, PData, format=vtk_data) # now compute all the additional data such as angles, etc # check whether we should do all keys = map(lambda x: x[1], args) if "all" in keys: args = [] for add_arg in add_features: args.append(("--add", add_arg)) # when ovf2vtk was re-written using Numeric, I had to group # certain operations to make them fast. Now some switches are # unneccessary. (fangohr 25/08/2003 01:35) # To avoid executing the # same code again, we remember what we have computed already: done_angles = 0 done_comp = 0 for arg in args: if arg[0] == "-a" or arg[0] == "--add": print("working on {}".format(arg)) data = [] lookup_table = 'default' # compute observables that need more than one field value # i.e. div, rot if arg[1][0:6] == "divrot": # rotation = vorticity, curl (div, rot, rotx, roty, rotz, rotmag) = \ ana.divergence_and_curl(vf, surfaceEffects, ovf_run) # change order of observables for upcoming loop observables = (rotx, roty, rotz, rotmag, rot, div) comments = [ "curl, x-comp", "curl, y-comp", "curl, z-comp", "curl, magnitude", "curl", "divergence" ] # append data to vtk file for obs, comment in zip(observables, comments): # for rotx, roty, rotz, rotmag, div if comment != "curl": vtk.point_data.append( pyvtk.Scalars(obs.tolist(), comment, lookup_table)) # for rot else: vtk.point_data.append( pyvtk.Vectors(obs.tolist(), comment)) # components elif arg[1] in ["Mx", "My", "Mz", "Ms"]: if done_comp == 0: done_comp = 1 comments = "x-component", "y-component", "z-component" for data, comment in zip(ana.components(vf), comments): vtk.point_data.append( pyvtk.Scalars(data.tolist(), comment, lookup_table)) # magnitude of magnitisation Mmag = ana.magnitude(vf) vtk.point_data.append( pyvtk.Scalars(Mmag.tolist(), "Magnitude", lookup_table)) elif arg[1] in ["xy", "xz", "yz"]: if done_angles == 0: done_angles = 1 # in-plane angles comments = ("xy in-plane angle", "yz in-plane angle", "xz in-plane angle") for data, comment in zip(ana.plane_angles(vf), comments): vtk.point_data.append( pyvtk.Scalars(data.tolist(), comment, lookup_table)) else: print("only xy, xz, Mx, My, Mz, divergence, Ms, or 'all' \ allowed after -a or --add") print("Current choice is {}".format(arg)) print(__doc__) sys.exit(1) # # eventually, write the file # print("saving file ({:s})".format(outfile)) vtk.tofile(outfile, format=vtk_data) print("finished conversion (execution time {:5.3s} seconds)".format( str(time.time() - start_time)))
def ovf2vtk_main(): start_time = time.time() banner_doc = 70 * "-" + \ "\novf2vtk --- converting ovf files to vtk files" + "\n" + \ "Hans Fangohr, Richard Boardman, University of Southampton\n"""\ + 70 * "-" # extract command line arguments additions, params = getopt.getopt(sys.argv[1:], 'Vvhbta:', ["verbose", "help", "add=", "binary", "text", "ascii", "surface-effects", "version", "datascale=", "posscale="]) # Note (fangohr 30/12/2006 20:52): the use of getopt is historic, # and so is the use of the name 'additions'. # default value surfaceEffects = False datascale = 0.0 # 0.0 has special meaning -- see help text posscale = 0.0 # 0.0 has special meaning -- see help text # provide data from getopt.getopt (additions) in form of hash table options = {} for item in additions: if item[1] == '': options[item[0]] = None else: options[item[0]] = item[1] keys = options.keys() if "--surface-effects" in keys: surfaceEffects = True if "--posscale" in keys: posscale = float(options["--posscale"]) if "--datascale" in keys: datascale = float(options["--datascale"]) if "-v" in keys or "--verbose" in keys: print "running in verbose mode" debug = True else: debug = False if "-h" in keys or "--help" in keys: print __doc__ sys.exit(0) if "-V" in keys or "--version" in keys: print "This is version {:s}.".format(ovf2vtk.__version__) sys.exit(0) if len(params) == 0: print __doc__ print "ERROR: An input file (and an output file need to be specified)." sys.exit(1) else: infile = params[0] if len(params) == 1: print __doc__ print "ERROR: An input file AND an output file need to be specified." print "specify output file" sys.exit(1) else: outfile = params[1] # okay: it seems the essential parameters are given. # Let's check for others: print banner_doc if debug: print "infile = ", infile print "outfile = ", outfile print "additions= ", additions print "options = ", options print "datascale=", datascale print "posscale=", posscale # read data from infile vf = read_structured_omf_file(infile, debug) # compute magnitude for all cells Ms = magnitude(vf) # Compute number of cells with non-zero Ms (rpb01r) Ms_num_of_nonzeros = Numeric.sum(Numeric.not_equal(Ms, 0.0)) print "({:5.2f}% of {:d} cells filled)"\ .format(100.0*Ms_num_of_nonzeros/len(Ms), len(Ms)) # read metadata in data file ovf_run = analyze(infile) # scale magnetisation data as required: if datascale == 0.0: scale = max(Ms) print "Will scale data down by {:f}".format(scale) else: scale = datascale vf = Numeric.divide(vf, scale) datatitle = ovf_run["Title:"]+"/{:g}".format(scale) # # need x, y and z vectors for vtk format # # taking actual spacings for dx, dy and dz results generally in # poor visualisation results (in particular for thin films, one # would like to have some magnification in z-direction). Also:vtk # is not happy with positions on the 10e-9 scale, so one better # scales this to something closer to unity. # extract dimensions from file dimensions = (int(ovf_run["xnodes:"]), int(ovf_run["ynodes:"]), int(ovf_run["znodes:"])) if posscale != 0.0: # scale data by given factor # find dx, dy, dz in SI units: Lx = abs(float(ovf_run["xmax:"])-float(ovf_run["xmin:"])) Ly = abs(float(ovf_run["ymax:"])-float(ovf_run["ymin:"])) Lz = abs(float(ovf_run["zmax:"])-float(ovf_run["zmin:"])) dx = Lx / float(ovf_run["xnodes:"]) dy = Ly / float(ovf_run["ynodes:"]) dz = Lz / float(ovf_run["znodes:"]) # find scale factor that OOMMF uses for xstepsize and xnodes, # etc. (Don't know how to get this directly.) xscale = dx * float(ovf_run["xstepsize:"]) yscale = dy * float(ovf_run["ystepsize:"]) zscale = dz * float(ovf_run["zstepsize:"]) # extract x, y and z positions from ovf file. xbasevector = [None] * dimensions[0] # create empty vector for i in range(dimensions[0]): # data is stored for 'centre' of each cuboid, therefore (i+0.5) xbasevector[i] = float(ovf_run["xbase:"]) +\ (i+0.5) * float(ovf_run["xstepsize:"])*xscale ybasevector = [None] * dimensions[1] for i in range(dimensions[1]): ybasevector[i] = float(ovf_run["ybase:"]) + (i+0.5) * \ float(ovf_run["ystepsize:"]) * yscale zbasevector = [None] * dimensions[2] for i in range(dimensions[2]): zbasevector[i] = float(ovf_run["zbase:"]) + (i+0.5) *\ float(ovf_run["zstepsize:"]) * zscale # finally, convert list to numerix (need to have this consistent) xbasevector = Numeric.array(xbasevector)/float(posscale) ybasevector = Numeric.array(ybasevector)/float(posscale) zbasevector = Numeric.array(zbasevector)/float(posscale) else: # posscale == 0.0 # # this generally looks better: # xbasevector = Numeric.arange(dimensions[0]) ybasevector = Numeric.arange(dimensions[1]) zbasevector = Numeric.arange(dimensions[2]) # # write ascii or binary vtk-file (default is binary) # vtk_data = 'binary' if '--ascii' in keys or '-t' in keys or '--text' in keys: vtk_data = 'ascii' if debug: print "switching to ascii vtk-data" if '--binary' in keys or '-b' in keys: vtk_data = 'binary' if debug: print "switching to binary vtk-data" # # and now open vtk-file # vtkfilecomment = "Output from ovf2vtk (version {:s}), {:s}, infile={:s}. "\ .format(ovf2vtk.__version__, time.asctime(), infile) vtkfilecomment += "Calling command line was '{:s}' executed in '{:s}'"\ .format(" ".join(sys.argv), os.getcwd()) vtk = pyvtk.VtkData(pyvtk.RectilinearGrid(xbasevector.tolist(), ybasevector.tolist(), zbasevector.tolist()), vtkfilecomment, pyvtk.PointData(pyvtk.Vectors(vf.tolist(), datatitle)), format=vtk_data) # # now compute all the additional data such as angles, etc # # check whether we should do all keys = map(lambda x: x[1], additions) if "all" in keys: additions = [] for add in add_features: additions.append(("--add", add)) # when ovf2vtk was re-written using Numeric, I had to group # certain operations to make them fast. Now some switches are # unneccessary. (fangohr 25/08/2003 01:35) # To avoid executing the # same code again, we remember what we have computed already: done_angles = 0 done_comp = 0 for add in additions: if add[0] == "-a" or add[0] == "--add": print "working on", add data = [] # compute observables that need more than one field value # i.e. div, rot if add[1][0:6] == "divrot": # rotation = vorticity, curl (div, rot, rotx, roty, rotz, rotmag) = \ divergence_and_curl(vf, surfaceEffects, ovf_run) comment = "curl, x-comp" vtk.point_data.append(pyvtk.Scalars(rotx.tolist(), comment, lookup_table='default')) comment = "curl, y-comp" vtk.point_data.append(pyvtk.Scalars(roty.tolist(), comment, lookup_table='default')) comment = "curl, z-comp" vtk.point_data.append(pyvtk.Scalars(rotz.tolist(), comment, lookup_table='default')) comment = "curl, magnitude" vtk.point_data.append(pyvtk.Scalars(rotmag.tolist(), comment, lookup_table='default')) comment = "curl" vtk.point_data.append(pyvtk.Vectors(rot.tolist(), comment)) comment = "divergence" vtk.point_data.append(pyvtk.Scalars(div.tolist(), comment, lookup_table='default')) elif add[1] in ["Mx", "My", "Mz", "Ms"]: # components if not done_comp: done_comp = 1 comments = "x-component", "y-component", "z-component" for data, comment in zip(components(vf), comments): vtk.point_data.append(pyvtk.Scalars(data.tolist(), comment, lookup_table='\ default')) # magnitude of magnitisation Mmag = magnitude(vf) vtk.point_data.append(pyvtk.Scalars(Mmag.tolist(), "Magnitude", lookup_table='\ default')) elif add[1] in ["xy", "xz", "yz"]: if not done_angles: done_angles = 1 # in-plane angles comments = ("xy in-plane angle", "yz in-plane angle", "xz in-plane angle") for data, comment in zip(plane_angles(vf), comments): vtk.point_data.append(pyvtk.Scalars(data.tolist(), comment, lookup_table='\ default')) else: print "only xy, xz, Mx, My, Mz, divergence, Ms, or 'all' \ allowed after -a or --add" print "Current choice is", add print __doc__ sys.exit(1) # # eventually, write the file # print "saving file ({:s})".format(outfile) vtk.tofile(outfile, format=vtk_data) print "finished conversion (execution time {:5.3s} seconds)"\ .format(str(time.time()-start_time))
def test_divergence_and_curl(): """function takes inputs vf (a Nx3 array), SurfaceEffects (a boolean), and ovf_run (a dictionary of keyword pairs)""" # takes the filename and connects to the product of the files' Nx, Ny, Nz node_products = {os.path.join('..', 'Examples', 'cantedvortex.omf'): 32768, os.path.join('..', 'Examples', 'ellipsoidwrap.omf'): 768, os.path.join('..', 'Examples', 'h2hleftedge.ohf'): 25600, os.path.join('..', 'Examples', 'yoyoleftedge.ohf'): 6000} filenames = (os.path.join('..', 'Examples', 'cantedvortex.omf'), os.path.join('..', 'Examples', 'ellipsoidwrap.omf'), os.path.join('..', 'Examples', 'h2hleftedge.ohf'), os.path.join('..', 'Examples', 'yoyoleftedge.ohf')) # test that final shapes of returned objects are correct. # 'divflat' shape should be Nx x Ny x Nz whereas... # ...rotflat should be Nx x Ny x Nz x 3 for filename in filenames: # 'vf' and 'ovf_run' are returned in functions within omfread.py divflat = ana.divergence_and_curl( omf.read_structured_omf_file(filename), False, omf.analyze(filename))[0] rotflat = ana.divergence_and_curl( omf.read_structured_omf_file(filename), False, omf.analyze(filename))[1] rotmag = ana.magnitude(rotflat) assert [int(i) for i in divflat.shape] == [node_products[filename]] assert [int(i) for i in rotflat.shape] == [node_products[filename], 3] assert [int(i) for i in rotmag.shape] == [node_products[filename]] for i in range(3): shape = rotflat[:, i].shape assert [int(j) for j in shape] == [node_products[filename]] # test the return types are numpy arrays. objects = [divflat, rotflat, rotflat[:, 0], rotflat[:, 1], rotflat[:, 2], rotmag] for obj in objects: assert isinstance(obj, np.ndarray) # test returned objects contain correct data values for both... # ...surfaceeffects=true and surfaceeffects=false for each example file. surfaceEffects = [True, False] for filename in filenames: for boolean in surfaceEffects: # actual result act = ana.divergence_and_curl( omf.read_structured_omf_file(filename), boolean, omf.analyze(filename)) # expected result. The original version of the function... # ... i.e. not refactored exp = analysis_original.divergence_and_curl( omf.read_structured_omf_file(filename), boolean, omf.analyze(filename)) for j in range(len(act)): assert act[j].all() == exp[j].all() # test special 2d case; Nz = 1 dic = {"xnodes:": 3, "ynodes:": 3, "znodes:": 1, "xstepsize:": 0.01, "ystepsize:": 0.01, "zstepsize:": 0.01} for boolean in surfaceEffects: # actual result act = ana.divergence_and_curl(vfexample2, boolean, dic) # expected result. The original version of the function... # ... i.e. not refactored exp = analysis_original.divergence_and_curl(vfexample2, boolean, dic) for j in range(len(act)): assert act[j].all() == exp[j].all()
def ovf2vtk_main(): start_time = time.time() banner_doc = 70*"-"+\ "\novf2vtk --- converting ovf files to vtk files"+"\n"+\ "Hans Fangohr, Richard Boardman, University of Southampton\n"""+70*"-" #extract command line arguments additions,params = getopt.getopt( sys.argv[1:], 'Vvhbta:', ["verbose","help","add=","binary","text","ascii","surface-effects","version","datascale=","posscale="] ) #Note (fangohr 30/12/2006 20:52): the use of getopt is historic, #and so is the use of the name 'additions'. #default value surfaceEffects = False datascale = 0.0 #0.0 has special meaning -- see help text posscale = 0.0 #0.0 has special meaning -- see help text #provide data from getopt.getopt (additions) in form of hash table options = {} for item in additions: if item[1]=='': options[item[0]] = None else: options[item[0]] = item[1] keys = options.keys() if "--surface-effects" in keys: surfaceEffects = True if "--posscale" in keys: posscale = float(options["--posscale"]) if "--datascale" in keys: datascale = float(options["--datascale"]) if "-v" in keys or "--verbose" in keys: print "running in verbose mode" debug = True else: debug = False if "-h" in keys or "--help" in keys: print __doc__ sys.exit(0) if "-V" in keys or "--version" in keys: print "This is version %s." % ovf2vtk.__version__ sys.exit(0) if len( params ) == 0: print __doc__ print "ERROR: An input file (and an output file need to be specified)." sys.exit(1) else: infile = params[0] if len( params ) == 1: print __doc__ print "ERROR: An input file AND an output file need to be specified." print "specify output file" sys.exit(1) else: outfile = params[1] # okay: it seems the essential parameters are given. Let's check for others: print banner_doc if debug: print "infile = ", infile print "outfile = ",outfile print "additions= ",additions print "options = ",options print "datascale=",datascale print "posscale=",posscale #read data from infile vf = read_structured_omf_file( infile, debug ) #compute magnitude for all cells Ms = magnitude( vf ) # Compute number of cells with non-zero Ms (rpb01r) Ms_num_of_nonzeros = Numeric.sum( Numeric.not_equal( Ms, 0.0 ) ) print "(%5.2f%% of %d cells filled)" % (100.0*Ms_num_of_nonzeros/len(Ms), len(Ms)) #read metadata in data file ovf_run = analyze( infile ) #scale magnetisation data as required: if datascale == 0.0: scale = max( Ms ) print "Will scale data down by %f" % scale else: scale = datascale vf = Numeric.divide( vf, scale ) datatitle = ovf_run["Title:"]+"/%g" % (scale) # #need x, y and z vectors for vtk format # #taking actual spacings for dx, dy and dz results generally in #poor visualisation results (in particular for thin films, one #would like to have some magnification in z-direction). Also:vtk #is not happy with positions on the 10e-9 scale, so one better #scales this to something closer to unity. #extract dimensions from file dimensions = ( int( ovf_run["xnodes:"] ), \ int( ovf_run["ynodes:"] ), \ int( ovf_run["znodes:"] )) if posscale != 0.0: #scale data by given factor #find dx, dy, dz in SI units: Lx = abs(float(ovf_run["xmax:"])-float(ovf_run["xmin:"])) Ly = abs(float(ovf_run["ymax:"])-float(ovf_run["ymin:"])) Lz = abs(float(ovf_run["zmax:"])-float(ovf_run["zmin:"])) dx = Lx / float( ovf_run["xnodes:"] ) dy = Ly / float( ovf_run["ynodes:"] ) dz = Lz / float( ovf_run["znodes:"] ) #find scale factor that OOMMF uses for xstepsize and xnodes, #etc. (Don't know how to get this directly.) xscale = Lx / (float( ovf_run["xnodes:"])*float(ovf_run["xstepsize:"])) yscale = Ly / (float( ovf_run["ynodes:"])*float(ovf_run["ystepsize:"])) zscale = Lz / (float( ovf_run["znodes:"])*float(ovf_run["zstepsize:"])) #extract x, y and z positions from ovf file. xbasevector = [None] * dimensions[0] #create empty vector for i in range( dimensions[0] ): #data is stored for 'centre' of each cuboid, therefore (i+0.5) xbasevector[i] = float( ovf_run["xbase:"] ) +\ (i+0.5) *float( ovf_run["xstepsize:"] )*xscale ybasevector = [None]* dimensions[1] for i in range( dimensions[1] ): ybasevector[i] = float( ovf_run["ybase:"] ) + (i+0.5) *float( ovf_run["ystepsize:"] )*yscale zbasevector = [None]* dimensions[2] for i in range( dimensions[2] ): zbasevector[i] = float( ovf_run["zbase:"] ) + (i+0.5) *float( ovf_run["zstepsize:"] )*zscale #finally, convert list to numerix (need to have this consistent) xbasevector = Numeric.array(xbasevector)/float(posscale) ybasevector = Numeric.array(ybasevector)/float(posscale) zbasevector = Numeric.array(zbasevector)/float(posscale) else: #posscale == 0.0 # #this generally looks better: # xbasevector = Numeric.arange( dimensions[0] ) ybasevector = Numeric.arange( dimensions[1] ) zbasevector = Numeric.arange( dimensions[2] ) # # write ascii or binary vtk-file (default is binary) # vtk_data = 'binary' if '--ascii' in keys or '-t' in keys or '--text' in keys: vtk_data = 'ascii' if debug: print "switching to ascii vtk-data" if '--binary' in keys or '-b' in keys: vtk_data = 'binary' if debug: print "switching to binary vtk-data" # #and now open vtk-file # vtkfilecomment = "Output from ovf2vtk (version %s), %s, infile=%s. " % (ovf2vtk.__version__,\ time.asctime(),\ infile) vtkfilecomment += "Calling command line was '%s' executed in '%s'" % (" ".join(sys.argv),\ os.getcwd()) vtk = pyvtk.VtkData( pyvtk.RectilinearGrid(xbasevector.tolist(),ybasevector.tolist(),zbasevector.tolist()), vtkfilecomment, pyvtk.PointData(pyvtk.Vectors( vf.tolist(), datatitle ) ), format=vtk_data) # # now compute all the additional data such as angles, etc # # check whether we should do all keys = map(lambda x:x[1], additions) if "all" in keys: additions = [] for add in add_features: additions.append(("--add",add)) # when ovf2vtk was re-written using Numeric, I had to group # certain operations to make them fast. Now some switches are # unneccessary. (fangohr 25/08/2003 01:35) # To avoid executing the # same code again, we remember what we have computed already: done_angles = 0 done_comp = 0 for add in additions: if add[0]=="-a" or add[0]=="--add": print "working on",add data=[] #compute observables that need more than one field value, i.e. div, rot if add[1][0:6] == "divrot": #rotation = vorticity, curl (div, rot, rotx, roty, rotz, rotmag) = divergence_and_curl( vf, surfaceEffects, ovf_run ) comment = "curl, x-comp" vtk.point_data.append( pyvtk.Scalars( rotx.tolist() , comment , lookup_table='default') ) Comment = "curl, y-comp" vtk.point_data.append( pyvtk.Scalars( roty.tolist() , comment , lookup_table='default') ) comment = "curl, z-comp" vtk.point_data.append( pyvtk.Scalars( rotz.tolist() , comment , lookup_table='default') ) comment = "curl, magnitude" vtk.point_data.append( pyvtk.Scalars( rotmag.tolist(), comment , lookup_table='default') ) comment = "curl" vtk.point_data.append( pyvtk.Vectors( rot.tolist() , comment ) ) comment = "divergence" vtk.point_data.append( pyvtk.Scalars( div.tolist() , comment , lookup_table='default') ) done_div_rot = True elif add[1] in ["Mx","My","Mz","Ms"]: # components if not done_comp: done_comp = 1 comments = "x-component", "y-component", "z-component" for data, comment in zip( components( vf ), comments): vtk.point_data.append( pyvtk.Scalars( data.tolist(), comment,lookup_table='default' ) ) # magnitude of magnitisation Mmag = magnitude( vf ) vtk.point_data.append( pyvtk.Scalars(Mmag.tolist(), "Magnitude",lookup_table='default' ) ) elif add[1] in ["xy","xz","yz"]: if not done_angles: done_angles = 1 # in-plane angles comments = "xy in-plane angle", "yz in-plane angle", "xz in-plane angle" for data, comment in zip( plane_angles( vf ), comments): vtk.point_data.append( pyvtk.Scalars( data.tolist(), comment, lookup_table='default' ) ) else: print "only xy, xz, Mx, My, Mz, divergence, Ms, or 'all' allowed after -a or --add" print "Current choice is",add print __doc__ sys.exit(1) # #eventually, write the file # print "saving file (%s)" % (outfile) vtk.tofile(outfile,format=vtk_data) print "finished conversion (execution time %5.3s seconds)" % (time.time()-start_time)
def test_divergence_and_curl(): """function takes inputs vf (a Nx3 array), SurfaceEffects (a boolean), and ovf_run (a dictionary of keyword pairs)""" # takes the filename and connects to the product of the files' Nx, Ny, Nz node_products = { os.path.join('..', 'Examples', 'cantedvortex.omf'): 32768, os.path.join('..', 'Examples', 'ellipsoidwrap.omf'): 768, os.path.join('..', 'Examples', 'h2hleftedge.ohf'): 25600, os.path.join('..', 'Examples', 'yoyoleftedge.ohf'): 6000 } filenames = (os.path.join('..', 'Examples', 'cantedvortex.omf'), os.path.join('..', 'Examples', 'ellipsoidwrap.omf'), os.path.join('..', 'Examples', 'h2hleftedge.ohf'), os.path.join('..', 'Examples', 'yoyoleftedge.ohf')) # test that final shapes of returned objects are correct. # 'divflat' shape should be Nx x Ny x Nz whereas... # ...rotflat should be Nx x Ny x Nz x 3 for filename in filenames: # 'vf' and 'ovf_run' are returned in functions within omfread.py divflat = ana.divergence_and_curl( omf.read_structured_omf_file(filename), False, omf.analyze(filename))[0] rotflat = ana.divergence_and_curl( omf.read_structured_omf_file(filename), False, omf.analyze(filename))[1] rotmag = ana.magnitude(rotflat) assert [int(i) for i in divflat.shape] == [node_products[filename]] assert [int(i) for i in rotflat.shape] == [node_products[filename], 3] assert [int(i) for i in rotmag.shape] == [node_products[filename]] for i in range(3): shape = rotflat[:, i].shape assert [int(j) for j in shape] == [node_products[filename]] # test the return types are numpy arrays. objects = [ divflat, rotflat, rotflat[:, 0], rotflat[:, 1], rotflat[:, 2], rotmag ] for obj in objects: assert isinstance(obj, np.ndarray) # test returned objects contain correct data values for both... # ...surfaceeffects=true and surfaceeffects=false for each example file. surfaceEffects = [True, False] for filename in filenames: for boolean in surfaceEffects: # actual result act = ana.divergence_and_curl( omf.read_structured_omf_file(filename), boolean, omf.analyze(filename)) # expected result. The original version of the function... # ... i.e. not refactored exp = analysis_original.divergence_and_curl( omf.read_structured_omf_file(filename), boolean, omf.analyze(filename)) for j in range(len(act)): assert act[j].all() == exp[j].all() # test special 2d case; Nz = 1 dic = { "xnodes:": 3, "ynodes:": 3, "znodes:": 1, "xstepsize:": 0.01, "ystepsize:": 0.01, "zstepsize:": 0.01 } for boolean in surfaceEffects: # actual result act = ana.divergence_and_curl(vfexample2, boolean, dic) # expected result. The original version of the function... # ... i.e. not refactored exp = analysis_original.divergence_and_curl(vfexample2, boolean, dic) for j in range(len(act)): assert act[j].all() == exp[j].all()