def __init__(self, filename): self.id = uuid.uuid1() self.filename = filename self._reader = vtk.vtkNetCDFCFReader() #get test data self._reader.SphericalCoordinatesOff() self._reader.SetOutputTypeToImage() self._reader.ReplaceFillValueWithNanOn() self._reader.SetFileName(filename) self._reader.UpdateInformation()
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This test checks netCDF reader. It uses the COARDS convention. renWin = vtk.vtkRenderWindow() renWin.SetSize(400, 400) ############################################################################# # Case 1: Image type. # Open the file. reader_image = vtk.vtkNetCDFCFReader() reader_image.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc") reader_image.SetOutputTypeToImage() # Set the arrays we want to load. reader_image.UpdateMetaData() reader_image.SetVariableArrayStatus("tos", 1) reader_image.SphericalCoordinatesOff() aa_image = vtk.vtkAssignAttribute() aa_image.SetInputConnection(reader_image.GetOutputPort()) aa_image.Assign("tos", "SCALARS", "POINT_DATA") thresh_image = vtk.vtkThreshold() thresh_image.SetInputConnection(aa_image.GetOutputPort()) thresh_image.ThresholdByLower(10000) surface_image = vtk.vtkDataSetSurfaceFilter() surface_image.SetInputConnection(thresh_image.GetOutputPort()) mapper_image = vtk.vtkPolyDataMapper() mapper_image.SetInputConnection(surface_image.GetOutputPort()) mapper_image.SetScalarRange(270, 310) actor_image = vtk.vtkActor()
import vtk vtk.vtkObject.GlobalWarningDisplayOff() # Disable the VTK warning window '''<Reader>''' reader = vtk.vtkNetCDFCFReader() reader.SetFileName('files/etopo11.nc') reader.SetSphericalCoordinates(0) reader.Update() varName = reader.GetOutput().GetPointData().GetArrayName(0) rangeStart, rangeEnd = reader.GetOutput().GetPointData().GetArray(0).GetRange() '''<Filter>''' reader.GetOutput().GetPointData().SetActiveScalars(varName) # 'Active' Scalar 설정 warp = vtk.vtkWarpScalar() warp.SetInputConnection(reader.GetOutputPort()) warp.SetNormal(0, 0, 1) warp.SetScaleFactor(0.0002) '''<Mapper>''' mapper = vtk.vtkDataSetMapper() mapper.SetInputConnection(warp.GetOutputPort()) mapper.SetScalarRange(rangeStart, rangeEnd) '''<Actor>''' actor = vtk.vtkActor() actor.SetMapper(mapper) '''<Renderer>''' renderer = vtk.vtkRenderer() renderer.AddActor(actor) renderer.SetBackground(1, 1, 1)
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This test checks netCDF CF reader for reading unstructured (p-sided) cells. renWin = vtk.vtkRenderWindow() renWin.SetSize(400,200) ############################################################################# # Case 1: Spherical coordinates off. # Open the file. reader_cartesian = vtk.vtkNetCDFCFReader() reader_cartesian.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/sampleGenGrid3.nc") # Set the arrays we want to load. reader_cartesian.UpdateMetaData() reader_cartesian.SetVariableArrayStatus("sample",1) reader_cartesian.SphericalCoordinatesOff() # Assign the field to scalars. aa_cartesian = vtk.vtkAssignAttribute() aa_cartesian.SetInputConnection(reader_cartesian.GetOutputPort()) aa_cartesian.Assign("sample","SCALARS","CELL_DATA") # Extract a surface that we can render. surface_cartesian = vtk.vtkDataSetSurfaceFilter() surface_cartesian.SetInputConnection(aa_cartesian.GetOutputPort()) mapper_cartesian = vtk.vtkPolyDataMapper() mapper_cartesian.SetInputConnection(surface_cartesian.GetOutputPort()) mapper_cartesian.SetScalarRange(100,2500) actor_cartesian = vtk.vtkActor() actor_cartesian.SetMapper(mapper_cartesian) ren_cartesian = vtk.vtkRenderer()
def serveVTKGeoJSON(self, datasetString): ''' Deliver a geojson encoded serialized vtkpolydata file and render it over the canonical cpipe scene. ''' if vtkOK == False: return """<html><head></head><body>VTK python bindings are not loadable, be sure VTK is installed on the server and its PATHS are set appropriately.</body><html>""" ss = vtk.vtkNetCDFCFReader() #get test data ss.SphericalCoordinatesOff() ss.SetOutputTypeToImage() datadir = cherrypy.request.app.config['/data']['tools.staticdir.dir'] datadir = os.path.join(datadir, 'assets') datafile = os.path.join(datadir, 'clt.nc') ss.SetFileName(datafile) sf = vtk.vtkDataSetSurfaceFilter() #convert to polydata sf.SetInputConnection(ss.GetOutputPort()) cf = vtk.vtkContourFilter() #add some attributes cf.SetInputConnection(sf.GetOutputPort()) cf.SetInputArrayToProcess(0,0,0,"vtkDataObject::FIELD_ASSOCIATION_POINTS", "clt") cf.SetNumberOfContours(10) sf.Update() drange = sf.GetOutput().GetPointData().GetArray(0).GetRange() for x in range(0,10): cf.SetValue(x,x*0.1*(drange[1]-drange[0])+drange[0]) cf.ComputeScalarsOn() ef = vtk.vtkExtractEdges() #make lines to test ef.SetInputConnection(sf.GetOutputPort()) gf = vtk.vtkGlyph3D() #make verts to test pts = vtk.vtkPoints() pts.InsertNextPoint(0,0,0) verts = vtk.vtkCellArray() avert = vtk.vtkVertex() avert.GetPointIds().SetId(0, 0) verts.InsertNextCell(avert) onevertglyph = vtk.vtkPolyData() onevertglyph.SetPoints(pts) onevertglyph.SetVerts(verts) gf.SetSourceData(onevertglyph) gf.SetInputConnection(sf.GetOutputPort()) if datasetString == "points": toshow = gf elif datasetString == "lines": toshow = ef elif datasetString == "contour": toshow = cf else: toshow = sf gw = vtk.vtkGeoJSONWriter() gw.SetInputConnection(toshow.GetOutputPort()) gw.SetScalarFormat(2); if True: gw.SetFileName("/Source/CPIPES/buildogs/deploy/dataset.gj") gw.Write() f = file("/Source/CPIPES/buildogs/deploy/dataset.gj") gj = str(f.readlines()) else: gw.WriteToOutputStringOn() gw.Write() gj = "['"+str(gw.RegisterAndGetOutputString()).replace('\n','')+"']" res = (""" <html> <head> <script type="text/javascript" src="/common/js/gl-matrix.js"></script> <script type="text/javascript" src="/lib/geoweb.min.js"></script> <script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js"></script> <script type="text/javascript"> function makedata() { var datasetString = %(gjfile)s.join('\\n'); var data = new ogs.vgl.geojsonReader().getPrimitives(datasetString); var geoms = data.geoms; var mapper = new ogs.vgl.mapper(); mapper.setGeometryData(geoms[0]); """ + self.gjShader + """ var actor = new ogs.vgl.actor(); actor.setMapper(mapper); actor.setMaterial(mat); return actor; } </script> <script type="text/javascript"> function main() { var mapOptions = { zoom : 1, center : ogs.geo.latlng(0.0, 0.0), source : '/data/assets/land_shallow_topo_2048.png' }; var myMap = ogs.geo.map(document.getElementById("glcanvas"), mapOptions); var planeLayer = ogs.geo.featureLayer({ "opacity" : 1, "showAttribution" : 1, "visible" : 1 }, makedata() ); myMap.addLayer(planeLayer); } </script> <link rel="stylesheet" href="http://code.jquery.com/ui/1.10.1/themes/base/jquery-ui.css" /> <script src="http://code.jquery.com/jquery-1.9.1.js"></script> <script src="http://code.jquery.com/ui/1.10.1/jquery-ui.js"></script> </head> <body onload="main()"> <canvas id="glcanvas" width="800" height="600"></canvas> </body> </html> """) % {'gjfile' :gj} return res
#!/usr/bin/env python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This test checks netCDF reader. It uses the COARDS convention. # Open the file. reader = vtk.vtkNetCDFCFReader() reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc") # Set the arrays we want to load. reader.UpdateMetaData() reader.SetVariableArrayStatus("tos",1) reader.SetSphericalCoordinates(0) aa = vtk.vtkAssignAttribute() aa.SetInputConnection(reader.GetOutputPort()) aa.Assign("tos","SCALARS","POINT_DATA") thresh = vtk.vtkThreshold() thresh.SetInputConnection(aa.GetOutputPort()) thresh.ThresholdByLower(10000) surface = vtk.vtkDataSetSurfaceFilter() surface.SetInputConnection(thresh.GetOutputPort()) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(surface.GetOutputPort()) mapper.SetScalarRange(270,310) actor = vtk.vtkActor() actor.SetMapper(mapper) ren = vtk.vtkRenderer() ren.AddActor(actor) renWin = vtk.vtkRenderWindow() renWin.SetSize(200,200) renWin.AddRenderer(ren)
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This test checks netCDF CF reader for reading unstructured (p-sided) cells. renWin = vtk.vtkRenderWindow() renWin.SetSize(400, 200) ############################################################################# # Case 1: Spherical coordinates off. # Open the file. reader_cartesian = vtk.vtkNetCDFCFReader() reader_cartesian.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/sampleGenGrid3.nc") # Set the arrays we want to load. reader_cartesian.UpdateMetaData() reader_cartesian.SetVariableArrayStatus("sample", 1) reader_cartesian.SphericalCoordinatesOff() # Assign the field to scalars. aa_cartesian = vtk.vtkAssignAttribute() aa_cartesian.SetInputConnection(reader_cartesian.GetOutputPort()) aa_cartesian.Assign("sample", "SCALARS", "CELL_DATA") # Extract a surface that we can render. surface_cartesian = vtk.vtkDataSetSurfaceFilter() surface_cartesian.SetInputConnection(aa_cartesian.GetOutputPort()) mapper_cartesian = vtk.vtkPolyDataMapper() mapper_cartesian.SetInputConnection(surface_cartesian.GetOutputPort()) mapper_cartesian.SetScalarRange(100, 2500) actor_cartesian = vtk.vtkActor() actor_cartesian.SetMapper(mapper_cartesian)
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This test checks netCDF reader. It uses the COARDS convention. renWin = vtk.vtkRenderWindow() renWin.SetSize(400,400) ############################################################################# # Case 1: Image type. # Open the file. reader_image = vtk.vtkNetCDFCFReader() reader_image.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc") reader_image.SetOutputTypeToImage() # Set the arrays we want to load. reader_image.UpdateMetaData() reader_image.SetVariableArrayStatus("tos",1) reader_image.SphericalCoordinatesOff() aa_image = vtk.vtkAssignAttribute() aa_image.SetInputConnection(reader_image.GetOutputPort()) aa_image.Assign("tos","SCALARS","POINT_DATA") thresh_image = vtk.vtkThreshold() thresh_image.SetInputConnection(aa_image.GetOutputPort()) thresh_image.ThresholdByLower(10000) surface_image = vtk.vtkDataSetSurfaceFilter() surface_image.SetInputConnection(thresh_image.GetOutputPort()) mapper_image = vtk.vtkPolyDataMapper() mapper_image.SetInputConnection(surface_image.GetOutputPort()) mapper_image.SetScalarRange(270,310) actor_image = vtk.vtkActor()
def read(filename, vars, rqstTime): ''' Read a file or files from a directory given a wild-card expression ''' # @todo Reading a single file of netcdf cf convention now #cherrypy.log("vtkread " + filename + " " + vars + " " + str(time)) reader = vtk.vtkNetCDFCFReader() #get test data reader.SphericalCoordinatesOff() reader.SetOutputTypeToImage() reader.ReplaceFillValueWithNanOn() reader.SetFileName(filename) reader.UpdateInformation() #obtain temporal information rawTimes = reader.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS()) tunits = reader.GetTimeUnits() converters = attrib_to_converters(tunits) # pick particular timestep if rqstTime is not None and rawTimes is not None: utcconverter = attrib_to_converters("days since 1970-0-0") abs_request_time = utcconverter[0](float(rqstTime)/(1000*60*60*24)) local_request_time = converters[5](abs_request_time) # For now clamp to time range if float(local_request_time) < rawTimes[0]: local_request_time = rawTimes[0] elif float(local_request_time) > rawTimes[-1]: local_request_time = rawTimes[-1] sddp = reader.GetExecutive() sddp.SetUpdateTimeStep(0, local_request_time) # enable only chosen array(s) narrays = reader.GetNumberOfVariableArrays() for x in range(0,narrays): arrayname = reader.GetVariableArrayName(x) if arrayname in vars: #cherrypy.log("Enable " + arrayname) reader.SetVariableArrayStatus(arrayname, 1) else: #cherrypy.log("Disable " + arrayname) reader.SetVariableArrayStatus(arrayname, 0) # wrap around to get the implicit cell extent = reader.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT()) pad = vtk.vtkImageWrapPad() reader.Update() data = reader.GetOutput() da = data.GetPointData().GetArray(0).GetName(); data.GetPointData().SetActiveScalars(da) pad.SetInputData(data) pad.SetOutputWholeExtent(extent[0], extent[1]+1, extent[2], extent[3], extent[4], extent[5]); # Convert to polydata sf = vtk.vtkDataSetSurfaceFilter() sf.SetInputConnection(pad.GetOutputPort()) # Error reading file? if not sf.GetOutput(): raise IOError("Unable to load data file: " + filename) # Convert to GeoJSON gw = vtk.vtkGeoJSONWriter() gw.SetInputConnection(sf.GetOutputPort()) gw.SetScalarFormat(2) gw.WriteToOutputStringOn() gw.Write() gj = str(gw.RegisterAndGetOutputString()).replace('\n','') return gj
def import_file(self, collection, filename, private=True): """Import metadata from a filename into the database This method reads a filename (fullpath) for its metadata and stores it into the specified collection of the database. :param collection: Name of the collection to look for basename. :type collection: str. :param filename: Name of the file with fullpath (eg. /home/clt.nc). :type filename: str. :param private: Should the entry be marked as private. :type private: bool """ if (not (os.path.isfile(filename) and os.path.exists(filename))): raise Exception("File " + filename + " does not exist") # @note Assuming that getting mongo collection everytime # is not going to cause much performance penalty coll = self._db[collection] print 'Begin importing %s into database' % filename variables = [] basename = os.path.basename(filename) filenamesplitted = os.path.splitext(basename) fileprefix = filenamesplitted[0] filesuffix = filenamesplitted[1] if self.is_exists(collection, filename): print 'Data %s already exists' % filename return if filesuffix == ".nc": # VTK is required import vtk reader = vtk.vtkNetCDFCFReader() reader.SphericalCoordinatesOff() reader.SetOutputTypeToImage() reader.ReplaceFillValueWithNanOn() reader.SetFileName(filename) reader.Update() data = reader.GetOutput() # Obtain spatial information bounds = data.GetBounds() # Obtain temporal information timeInfo = {} times = reader.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS()) timeInfo['rawTimes'] = times #time steps in raw format tunits = reader.GetTimeUnits() timeInfo['units'] = tunits #calendar info needed to interpret/convert times converters = attrib_to_converters(tunits) if converters and times: timeInfo['numSteps'] = len(times) nativeStart = converters[3] timeInfo['nativeStart'] = nativeStart stepUnits = converters[2] timeInfo['nativeUnits'] = stepUnits stepSize = 0 if len(times) > 1: stepSize = times[1]-times[0] timeInfo['nativeDelta'] = stepSize stdTimeRange = (converters[0](times[0]), converters[0](times[-1])) timeInfo['nativeRange'] = (times[0], times[-1]) stdTimeDelta = 0 if len(times) > 1: stdTimeDelta = converters[0](times[1]) - converters[0](times[0]) timeInfo['stdDelta'] = stdTimeDelta stdTimeRange = (converters[0](times[0]), converters[0](times[-1])) timeInfo['stdTimeRange'] = stdTimeRange #first and last time as normalized integers dateRange = (converters[1](stdTimeRange[0]), converters[1](stdTimeRange[1])) timeInfo['dateRange'] = dateRange #first and last time in Y,M,D format # Obtain array information pds = data.GetPointData() pdscount = pds.GetNumberOfArrays() if times == None: times = [0] # Go through all timesteps to accumulate global min and max values for t in times: firstTStep = t==times[0] arrayindex = 0 # Go through all arrays for i in range(0, pdscount): pdarray = pds.GetArray(i) if not pdarray: # Got an abstract array continue if firstTStep: # Create new record for this array variable = {} else: # Extend existing record variable = variables[arrayindex] # Tell reader to read data so that we can get info about this time step sddp = reader.GetExecutive() sddp.SetUpdateTimeStep(0,t) sddp.Update() arrayindex = arrayindex + 1 if firstTStep: # Record unchanging meta information variable["name"] = pdarray.GetName() variable["dim"] = [] variable["tags"] = [] variable["units"] = reader.QueryArrayUnits(pdarray.GetName()) # Find min and max for each component of this array at this timestep componentCount = pdarray.GetNumberOfComponents() minmax = [] for j in range(0, componentCount): minmaxJ = [0,-1] pdarray.GetRange(minmaxJ, j) minmax.append(minmaxJ[0]) minmax.append(minmaxJ[1]) if firstTStep: # Remember what we learned about this new array variable["range"] = minmax variables.append(variable) else: # Extend range if necessary from this timesteps range for j in range(0, componentCount): if minmax[j*2+0] < variable["range"][j*2+0]: variable["range"][j*2+0] = minmax[j*2+0] if minmax[j*2+1] > variable["range"][j*2+1]: variable["range"][j*2+1] = minmax[j*2+1] # Record what we've learned in the data base insertId = coll.insert({"name":fileprefix, "basename":filename, "variables":variables, "timeInfo":timeInfo, "spatialInfo":bounds, "private":private}) print 'Done importing %s into database' % filename
def import_directory(seff, server, database, collection, directory): if (not (os.path.isdir(directory) and os.path.exists(directory))): raise Exception("Directory " + directory + " does not exist") conn = pymongo.Connection(server) db = conn[database] coll = db[collection] # Collect filenames under the directory from os import listdir from os.path import isfile, join files = [ f for f in listdir(directory) if isfile(join(directory,f)) ] # Add files to the database for filename in files: print 'Working on %s' % filename variables = [] basename = os.path.basename(filename) filenamesplitted = os.path.splitext(basename) fileprefix = filenamesplitted[0] filesuffix = filenamesplitted[1] if filesuffix == ".nc": import vtk reader = vtk.vtkNetCDFCFReader() reader.SphericalCoordinatesOff() reader.SetOutputTypeToImage() reader.ReplaceFillValueWithNanOn() reader.SetFileName(os.path.join(directory, filename)) reader.Update() data = reader.GetOutput() #obtain spatial information bounds = data.GetBounds() #obtain temporal information timeInfo = {} times = reader.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS()) timeInfo['rawTimes'] = times #time steps in raw format tunits = reader.GetTimeUnits() timeInfo['units'] = tunits #calendar info needed to interpret/convert times converters = attrib_to_converters(tunits) stdTimeRange = None dateRange = None if converters and times: stdTimeRange = (converters[0](times[0]),converters[0](times[-1])) timeInfo['stdTimeRange'] = stdTimeRange #first and last time as normalized integers dateRange = (converters[1](stdTimeRange[0]), converters[1](stdTimeRange[1])) timeInfo['dateRange'] = dateRange #first and last time in Y,M,D format print filename, "tunits:", tunits, "times: ", times, "std time range:", stdTimeRange, "dates: ", dateRange #obtain array information pds = data.GetPointData() pdscount = pds.GetNumberOfArrays() for i in range(0, pdscount): variable = {} pdarray = pds.GetArray(i) if not pdarray: # got an abstract array continue variable["name"] = pdarray.GetName() variable["dim"] = [] variable["tags"] = [] variable["units"] = reader.QueryArrayUnits(pdarray.GetName()) # todo: iterate over all timesteps, default (first) timestep may not be representative variable["time"] = [] componentCount = pdarray.GetNumberOfComponents() minmax = [] for j in range(0, componentCount): minmaxJ = [0,-1] pdarray.GetRange(minmaxJ, j) minmax.append(minmaxJ[0]) minmax.append(minmaxJ[1]) variable["range"] = minmax variables.append(variable) #record what we've learned insertId = coll.insert({"name":fileprefix, "basename":basename, "variables":variables, "timeInfo":timeInfo, "spatialInfo":bounds})