Пример #1
0
    def setupApplication(cls):
        """Setups the default application state."""
        # read data directory.

        root = {"name": "ROOT", "dirs": [], "files": []}
        directory_map = {}
        directory_map[_DataProber.DataPath] = root
        for path, dirs, files in os.walk(_DataProber.DataPath):
            element = directory_map[path]

            for name in dirs:
                item = {"name": name, "dirs": [], "files": []}
                item["name"] = name
                directory_map[os.path.join(path, name)] = item
                element["dirs"].append(item)
            element["files"] = []
            for name in files:
                relpath = os.path.relpath(os.path.join(path, name),
                                          _DataProber.DataPath)
                item = {"name": name, "itemValue": relpath}
                element["files"].append(item)
        cls.Database = root
        cls.View = simple.CreateRenderView()
        simple.Render()

        # setup animation scene
        scene = simple.GetAnimationScene()
        simple.GetTimeTrack()
        scene.PlayMode = "Snap To TimeSteps"
Пример #2
0
    def __init__(self, x):
        #NOTE: 'x' is required in order to instantiate an actor across all nodes by passing
        #a sequence of variables
        import paraview
        paraview.options.batch = True
        paraview.options.symmetric = True
        import paraview.simple as pvs
        self.pvs = pvs

        from mpi4py import MPI
        self.rank, self.size = MPI.COMM_WORLD.Get_rank(
        ), MPI.COMM_WORLD.Get_size()

        import vtk
        from vtk import vtkWindowToImageFilter

        # Create render view and image transfer filter objects
        self.renv = pvs.CreateRenderView()
        self.w2i = vtkWindowToImageFilter()
        self.w2i.ReadFrontBufferOff()
        self.w2i.ShouldRerenderOff()
        self.w2i.SetInput(self.renv.SMProxy.GetRenderWindow())

        # Make sure all ranks have initialized
        MPI.COMM_WORLD.Barrier()
        if self.rank == 0:
            print("All ranks ready for rendering")
    def render(self):
        self.view = paraview.CreateRenderView()
        self.source = paraview.DICOMReaderdirectory(
            FileName=self.directory_path)
        self.display = paraview.Show(self.source, self.view)

        paraview.ResetCamera()
        camera = paraview.GetActiveCamera()
        self.view.CenterOfRotation = camera.GetFocalPoint()
        self.view.CameraParallelProjection = 1
        self.view.Background = [0, 0, 0]

        self.current_slice = self.display.Slice
        self.display.Representation = self.representation
        self.display.ColorArrayName = self.array_name
        paraview.ColorBy(self.display, self.array_name)

        color_map = paraview.GetColorTransferFunction(self.array_name,
                                                      self.display)
        opacity_map = paraview.GetOpacityTransferFunction(
            self.array_name, self.display)

        scale_min = color_map.RGBPoints[0]
        scale_max = color_map.RGBPoints[-4]
        scale_middle = (scale_max - scale_min) / 2
        self.scale_range = (scale_min, scale_max)

        color_map.RGBPoints = [
            scale_min,
            0.0,
            0.0,
            0.0,
            scale_max,
            1.0,
            1.0,
            1.0,
        ]

        opacity_map.Points = [
            scale_min,
            0.0,
            0.5,
            0.0,
            scale_middle,
            0.5,
            0.5,
            0.0,
            scale_max,
            1.0,
            0.5,
            0.0,
        ]

        paraview.Render(self.view)
Пример #4
0
    def createSliceExporter(self):
      self.analysis.register_analysis(
        "slice",                            # id
        "Slice exploration",                # title
        "Perform 10 slice along X",         # description
        "{time}/{sliceColor}_{slicePosition}.jpg", # data structure
        cinema.SliceExplorer.get_data_type())
      nb_slices = 5
      colorByArray = { "velocity": { "lut": self.lut , "type": 'POINT_DATA'} }
      view = simple.CreateRenderView()

      fng = self.analysis.get_file_name_generator("slice")
      exporter = cinema.SliceExplorer(fng, view, input, colorByArray, nb_slices)
      exporter.set_analysis(self.analysis)
      self.exporters.append(exporter)
Пример #5
0
    def updateContents(self, inputPorts):
        if self.view == None:
            self.view = pvsp.CreateRenderView()
            renWin = self.view.GetRenderWindow()
            self.SetRenderWindow(renWin)
            iren = renWin.GetInteractor()
            iren.SetNonInteractiveRenderDelay(0)
            iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())

            # Load the uvcdat logo and use it for overlay
            logoPath = (
                system.vistrails_root_directory() +
                "/gui/uvcdat/resources/images/uvcdat_logo_transparent.png")
            reader = vtk.vtkPNGReader()
            reader.SetFileName(logoPath)
            reader.Update()

            imageActor = vtk.vtkImageActor()
            imageActor.SetInputData(reader.GetOutput())

            self.overlayRenderer = vtk.vtkRenderer()
            self.overlayRenderer.AddActor(imageActor)

            renWin.SetNumberOfLayers(renWin.GetNumberOfLayers() + 1)
            self.overlayRenderer.SetLayer(renWin.GetNumberOfLayers() - 1)
            renWin.AddRenderer(self.overlayRenderer)

            self.overlayRenderer.SetViewport(0.7, 0, 1.0, 0.3)

        del self.view.Representations[:]

        # Fetch variables from the input port
        (location, representations) = inputPorts

        for rep in representations:
            rep.set_view(self.view)
            rep.execute()

        # Set view specific properties
        self.view.CenterAxesVisibility = 0
        self.view.Background = [0.6, 0.6, 0.6]

        self.view.ResetCamera()
        self.view.StillRender()

        QCellWidget.updateContents(self, inputPorts)
Пример #6
0
    def simple360(self):
      self.analysis.register_analysis(
          "360",                                  # id
          "rotation",                             # title
          "Perform 15 contour",                   # description
          "{time}/{theta}_{phi}.jpg", # data structure
          cinema.ThreeSixtyImageStackExporter.get_data_type())
      fng = self.analysis.get_file_name_generator("360")
      arrayName = ('POINT_DATA', 'velocity')
      view = simple.CreateRenderView()

      rep = simple.Show(input, view)
      rep.LookupTable = self.lut
      rep.ColorArrayName = arrayName

      exporter = cinema.ThreeSixtyImageStackExporter(fng, view, self.center_of_rotation, self.distance, self.rotation_axis, [20,45])
      self.exporters.append(exporter)
Пример #7
0
    def updateContents(self, inputPorts):

        if self.view == None:
            self.view = pvsp.CreateRenderView()
            renWin = self.view.GetRenderWindow()
            self.SetRenderWindow(renWin)
            iren = renWin.GetInteractor()
            print type(iren)
            iren.SetNonInteractiveRenderDelay(0)
            iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())

        (representations, ) = inputPorts
        self.view.Representations = []
        for r in representations:
            self.view.Representations.append(r.pvInstance)

        self.view.ResetCamera()
        self.view.StillRender()

        QCellWidget.updateContents(self, inputPorts)
Пример #8
0
def runTest():
    options = servermanager.vtkRemotingCoreConfiguration.GetInstance()
    url = options.GetServerURL()
    smp.Connect(getHost(url), getPort(url))

    r = smp.CreateRenderView()
    r.RemoteRenderThreshold = 20
    s = smp.Sphere()
    s.PhiResolution = 80
    s.ThetaResolution = 80

    d = smp.Show()
    d.Representation = "Wireframe"
    smp.Render()
    r.RemoteRenderThreshold = 0
    smp.Render()
    s.PhiResolution = 8
    s.ThetaResolution = 8
    smp.Render()

    smtesting.ProcessCommandLineArguments()
    if not smtesting.DoRegressionTesting(r.SMProxy):
        raise smtesting.TestError ("Test failed!!!")
    print ("Test Passed")
Пример #9
0
    def test_composite(self):
        pv.Connect()  # get a new context like a normal script would
        print "\nTEST_COMPOSITE"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        s = pv.Wavelet()
        contour = pv.Contour(Input=s, ContourBy='RTData', ComputeScalars=1)
        sliceRep = pv.Show(contour)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_composite/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'composite-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.1'})

        cs.filename_pattern = "results.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_layer(
            "vis", store.make_parameter("vis", ['contour']))
        contours = [50, 100, 150, 200]
        cs.add_control("isoval",
                       store.make_parameter('isoval', contours))
        cs.assign_parameter_dependence("isoval", "vis", ['contour'])
        cs.add_field("color",
                     store.make_field('color',
                                      {'white': 'rgb',
                                       'depth': 'depth',
                                       'lum': 'luminance',
                                       'RTData_1': 'lut'},),
                     "isoval", contours)

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera([0, 0, 0], [0, 1, 0], 75.0, view_proxy)
        showcontour = pv_explorers.SourceProxyInLayer("contour",
                                                      sliceRep, contour)
        layertrack = explorers.Layer("vis", [showcontour])
        filt = pv_explorers.Contour("isoval", contour)

        # additional specification necessary for the color field
        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddLUT('POINTS', 'RTData_1', 'X')
        colorChoice.AddDepth('depth')
        colorChoice.AddLuminance('lum')

        col = pv_explorers.Color("color", colorChoice, sliceRep)

        paramNames = ["phi", "theta", "vis", "isoval", "color"]
        trackList = [cam, layertrack, filt, col]
        e = pv_explorers.ImageExplorer(cs,
                                       paramNames, trackList, view_proxy)

        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded
        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': 30, 'phi': 140}))
        filt.execute(store.Document({'isoval': 100}))
        col.execute(store.Document({'color': 'RTData_1'}))
        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find({'theta': 30, 'phi': 140,
                             'isoval': 100, 'color': 'RTData_1'}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        success = (l2error < 1.0) and (ncc > 0.99)

        if not success:
            print "\n l2-error = ", l2error, " ; ncc = ", ncc, "\n"

        self.assertTrue(success)
        pv.Disconnect()  # using a dedicated server state for each test
Пример #10
0
                   (-4.50, 0.0, 0.0), (-4.0, 0.0, 0.0), (-3.5, 0.0, 0.0),
                   (-3.0, 0.0, 0.0), (3.0, 0.0, 0.0), (3.5, 0.0, 0.0),
                   (4.0, 0.0, 0.0), (4.5, 0.0, 0.0), (5.0, 0.0, 0.0),
                   (5.5, 0.0, 0.0), (6.0, 0.0, 0.0), (6.5, 0.0, 0.0),
                   (6.7, 0.0, 0.0), (6.9, 0.0, 0.0), (7.0, 0.0, 0.0)]
flines = {}
i_integral = {}
dsp = {}
fwd = {}
bkwd = {}
min_sphr = {}
min_sphr_disp = {}
contours = {}
contsphere_b = {}
contsphere_f = {}
rvs = pv.CreateRenderView()
rvs.InteractionMode = '2D'
rvs.CameraPosition = [0, -5, 0]
rvs.CameraFocalPoint = [0, 0, 0]
rvs.CameraViewUp = [0, 0, 1]
rvs.CameraParallelScale = 11
rvs.AxesGrid.Visibility = 1
rvs.AxesGrid.AxesToLabel = 5
rvs.AxesGrid.GridColor = [0.8, 0.8, 0.8]
rvs.AxesGrid.ShowGrid = 1
rvs.AxesGrid.XLabelColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.XTitleColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.ZLabelColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.ZTitleColor = [0.2, 0.2, 0.2]
rvs.OrientationAxesVisibility = 0
rvs.ViewSize = [1920, 1280]
Пример #11
0
    def test_slice(self):
        pv.Connect()  # using a dedicated server state for each test
        print "\nTEST_SLICE"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        s = pv.Sphere()
        sliceFilt = pv.Slice(
            SliceType="Plane", Input=s, SliceOffsetValues=[0.0])
        sliceFilt.SliceType.Normal = [0, 1, 0]
        sliceRep = pv.Show(sliceFilt)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_slice/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{offset}_{color}_slice.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_parameter(
            "offset",
            store.make_parameter('offset', [-.4, -.2, 0, .2, .4]))
        cs.add_parameter(
            "color",
            store.make_parameter(
                'color', ['yellow', 'cyan', "purple"], typechoice='list'))

        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('yellow', [1, 1, 0])
        colorChoice.AddSolidColor('cyan', [0, 1, 1])
        colorChoice.AddSolidColor('purple', [1, 0, 1])

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera([0, 0, 0], [0, 1, 0], 10.0, view_proxy)
        filt = pv_explorers.Slice("offset", sliceFilt)
        col = pv_explorers.Color("color", colorChoice, sliceRep)

        params = ["phi", "theta", "offset", "color"]
        e = pv_explorers.ImageExplorer(
            cs, params, [cam, filt, col], view_proxy)
        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded

        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': -30, 'phi': 120}))
        filt.execute(store.Document({'offset': -.4}))
        col.execute(store.Document({'color': 'cyan'}))
        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding entry
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find(
                {'theta': -30, 'phi': 120, 'offset': -.4, 'color': 'cyan'}):
            docs.append(doc.data)

        # print "gen entry: \n",
        #        imageslice, "\n",
        #        imageslice.shape,"\n",
        #        "loaded: \n",
        #        docs[0], "\n",
        #        docs[0].shape
        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        self.assertTrue((l2error < 1.0) and (ncc > 0.99))
        pv.Disconnect()  # using a dedicated server state for each test
Пример #12
0
import pv_utils as utils
import paraview.simple as pv

rdaTec = 'data/rhoda.tec'
rhcTec = 'data/rhohc.tec'
rnpTec = 'data/rho_fld_np.tec'
surfTec = 'data/rho_surf.tec'
white = [1.0, 1.0, 1.0]
red = [0.67, 0.0, 0.0]
green = [0.0, 0.67, 0.0]
gold = [1.0, 0.85, 0.0]
black = [0.0, 0.0, 0.0]

center = utils.GetCenter(tecFile=rdaTec)
renderView = pv.CreateRenderView(ViewSize=[700, 500], Background=white)
utils.ColorSurface(tecFile=rdaTec, opacity=0.5)
utils.NewContour(tecFile=rdaTec, color=red, opacity=0.5)
utils.NewContour(tecFile=rhcTec, color=green, opacity=0.5)
utils.NewContour(tecFile=rnpTec, color=gold, opacity=1.0)
utils.NewContour(tecFile=surfTec, color=black, opacity=0.5)
utils.SetCameraFocus(tecFile=rdaTec)
utils.SetOrientation(camPosDir=[-1.0, -0.5, 0.3])
pv.SaveScreenshot('contours2.png')
Пример #13
0
        def createPipeline(self):
            self.renderView = pvsimple.CreateRenderView()

            pvsimple.Sphere()
            pvsimple.Show()
Пример #14
0
analysis = cinema.AnalysisManager(work_directory, "Cinema Test",
                                  "Test various cinema explorers.")
analysis.begin()

# === SliceExplorer ===========================================================

analysis.register_analysis(
    "slice",  # id
    "Slice exploration",  # title
    "Perform 10 slice along X",  # description
    "{sliceColor}_{slicePosition}.jpg",  # data structure
    cinema.SliceExplorer.get_data_type())
nb_slices = 5
colorByArray = {"RTData": {"lut": lut, "type": 'POINT_DATA'}}
view = simple.CreateRenderView()

fng = analysis.get_file_name_generator("slice")
exporter = cinema.SliceExplorer(fng, view, data_to_explore, colorByArray,
                                nb_slices)
exporter.set_analysis(analysis)

# Explore
exporter.UpdatePipeline()

# === ContourExplorer + ThreeSixtyImageStackExporter ==========================

analysis.register_analysis(
    "contour-360",  # id
    "Contour",  # title
    "Perform 15 contour",  # description
Пример #15
0
#     else:
#         print "LT: ", localTime, ": ", t96_K0_ds.field_lines[localTime].startLoc
#
# print "Drift Shell [B_mirror(K)] (T96 K1000)"
# for localTime in t96_K1000_ds.field_lines:
#     print "B(K1000): ", t96_K1000_ds.field_lines[localTime].get_B_mirror_for_K(1000)
#     if t96_K1000_ds.field_lines[localTime] is None:
#         print "LT: ", localTime, ": NO LINE FOUND"
#     else:
#         print "LT: ", localTime, ": ", t96_K1000_ds.field_lines[localTime].startLoc
#
#

# Create a paraview render view so we can see visual progress.
# pv._DisableFirstRenderCameraReset()
rvs = pv.CreateRenderView()
rvs.InteractionMode = '2D'
rvs.CameraPosition = [0, -5, 0]
rvs.CameraFocalPoint = [0, 0, 0]
rvs.CameraViewUp = [0, 0, 1]
rvs.CameraParallelScale = 11
rvs.AxesGrid.Visibility = 1
rvs.AxesGrid.AxesToLabel = 5
rvs.AxesGrid.GridColor = [0.8, 0.8, 0.8]
rvs.AxesGrid.ShowGrid = 1
rvs.AxesGrid.XLabelColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.XTitleColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.ZLabelColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.ZTitleColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.YLabelColor = [0.2, 0.2, 0.2]
rvs.AxesGrid.YTitleColor = [0.2, 0.2, 0.2]
Пример #16
0
I = fline.get_i_integrals()

# #################################################
# To active plotting, uncomment everything below #
# #################################################
x, y = ih.dict_to_x_y(I)
fig1 = pl.figure()
pl.plot(x,y)
pl.xlabel("$B_{mirror}$ (nT)")
pl.ylabel("$I(B_{mirror})$")
pl.title("Field Line Geometry Integral $I$ with respect to $B_{mirror}$ \n For Field line that passes through (-40, 0, 0)")
fig1.savefig("IwrtBm.png")

# Create a paraview render view so we can see visual progress.
# pv._DisableFirstRenderCameraReset()
rv128 = pv.CreateRenderView()
rv128.InteractionMode = '2D'
rv128.CameraPosition = [0, -30, 0]
rv128.CameraViewUp = [0, 0, 1]
rv128.CameraParallelScale = 10
rv128.ViewSize = [1280, 1024]
dipoleDisplay = pv.GetDisplayProperties(dipole128, view=rv128)
# pv.Hide(dipole128, view=rv128)

# create a new 'Sphere'
sphere1 = pv.Sphere()
sphere1.Radius = 1.0
sphere1.ThetaResolution = 64
sphere1.PhiResolution = 64
pv.Show(sphere1, rv128)
Пример #17
0
    def test_contour(self):
        pv.Connect()  # using a dedicated server state for each test
        print "\nTEST_CONTOUR"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        view_proxy.ViewSize = [1024, 768]
        s = pv.Wavelet()
        contour = pv.Contour(Input=s, ContourBy='RTData', ComputeScalars=1)
        sliceRep = pv.Show(contour)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_contour/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{contour}_{color}_contour.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_parameter(
            "contour",
            store.make_parameter('contour', [50, 100, 150, 200]))
        cs.add_parameter(
            "color",
            store.make_parameter(
                'color', ['white', 'RTData_1'], typechoice='list'))

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera(
            [0, 0, 0], [0, 1, 0], 75.0, view_proxy)
        filt = pv_explorers.Contour("contour", contour)

        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddLUT('POINTS', 'RTData_1', 'X')
        col = pv_explorers.Color("color", colorChoice, sliceRep)

        params = ["phi", "theta", "contour", "color"]
        e = pv_explorers.ImageExplorer(
            cs, params, [cam, filt, col], view_proxy)

        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded

        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': 30, 'phi': 140}))
        filt.execute(store.Document({'contour': 100}))
        col.execute(store.Document({'color': 'RTData_1'}))

        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find(
                {'theta': 30, 'phi': 140,
                 'contour': 100, 'color': 'RTData_1'}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        success = (l2error < 1.0) and (ncc > 0.99)
        if not success:
            print "\n l2-error = ", l2error, " ; ncc = ", ncc, "\n"
        self.assertTrue(success)
        pv.Disconnect()  # using a dedicated server state for each test
Пример #18
0
def render_frames(
    scene,
    frames_dir=None,
    frame_window=None,
    render_missing_frames=False,
    save_state_to_file=None,
    no_render=False,
    show_preview=False,
    show_progress=False,
    job_id=None,
):
    # Validate scene
    if scene["View"]["ViewSize"][0] % 16 != 0:
        logger.warning(
            "The view width should be a multiple of 16 to be compatible with"
            " QuickTime.")
    if scene["View"]["ViewSize"][1] % 2 != 0:
        logger.warning(
            "The view height should be even to be compatible with QuickTime.")

    render_start_time = time.time()

    # Setup layout
    layout = pv.CreateLayout("Layout")

    # Setup view
    if "Background" in scene["View"]:
        bg_config = scene["View"]["Background"]
        del scene["View"]["Background"]
        if isinstance(bg_config, list):
            if isinstance(bg_config[0], list):
                assert len(bg_config) == 2, (
                    "When 'Background' is a list of colors, it must have 2"
                    " entries.")
                bg_config = dict(
                    BackgroundColorMode="Gradient",
                    Background=parse_as.color(bg_config[0]),
                    Background2=parse_as.color(bg_config[1]),
                )
            else:
                bg_config = dict(
                    BackgroundColorMode="Single Color",
                    Background=parse_as.color(bg_config),
                )
            bg_config["UseColorPaletteForBackground"] = 0
            scene["View"].update(bg_config)
            bg_config = None
    else:
        bg_config = None
    view = pv.CreateRenderView(**scene["View"])
    pv.AssignViewToLayout(view=view, layout=layout, hint=0)

    # Set spherical background texture
    if bg_config is not None:
        bg_config["BackgroundColorMode"] = "Texture"
        skybox_datasource = bg_config["Datasource"]
        del bg_config["Datasource"]
        background_texture = pvserver.rendering.ImageTexture(
            FileName=parse_as.path(scene["Datasources"][skybox_datasource]))
        background_sphere = pv.Sphere(Radius=bg_config["Radius"],
                                      ThetaResolution=100,
                                      PhiResolution=100)
        background_texture_map = pv.TextureMaptoSphere(Input=background_sphere)
        pv.Show(
            background_texture_map,
            view,
            Texture=background_texture,
            BackfaceRepresentation="Cull Frontface",
            Ambient=1.0,
        )

    # Load the waveform data file
    waveform_h5file, waveform_subfile = parse_as.file_and_subfile(
        scene["Datasources"]["Waveform"])
    waveform_data = WaveformDataReader(FileName=waveform_h5file,
                                       Subfile=waveform_subfile)
    pv.UpdatePipeline()

    # Generate volume data from the waveform. Also sets the available time range.
    # TODO: Pull KeepEveryNthTimestep out of datasource
    waveform_to_volume_configs = scene["WaveformToVolume"]
    if isinstance(waveform_to_volume_configs, dict):
        waveform_to_volume_configs = [{
            "Object": waveform_to_volume_configs,
        }]
        if "VolumeRepresentation" in scene:
            waveform_to_volume_configs[0]["VolumeRepresentation"] = scene[
                "VolumeRepresentation"]
    waveform_to_volume_objects = []
    for waveform_to_volume_config in waveform_to_volume_configs:
        volume_data = WaveformToVolume(
            WaveformData=waveform_data,
            SwshCacheDirectory=parse_as.path(
                scene["Datasources"]["SwshCache"]),
            **waveform_to_volume_config["Object"],
        )
        if "Modes" in waveform_to_volume_config["Object"]:
            volume_data.Modes = waveform_to_volume_config["Object"]["Modes"]
        if "Polarizations" in waveform_to_volume_config["Object"]:
            volume_data.Polarizations = waveform_to_volume_config["Object"][
                "Polarizations"]
        waveform_to_volume_objects.append(volume_data)

    # Compute timing and frames information
    time_range_in_M = (
        volume_data.TimestepValues[0],
        volume_data.TimestepValues[-1],
    )
    logger.debug(f"Full available data time range: {time_range_in_M} (in M)")
    if "FreezeTime" in scene["Animation"]:
        frozen_time = scene["Animation"]["FreezeTime"]
        logger.info(f"Freezing time at {frozen_time}.")
        view.ViewTime = frozen_time
        animation = None
    else:
        if "Crop" in scene["Animation"]:
            time_range_in_M = scene["Animation"]["Crop"]
            logger.debug(f"Cropping time range to {time_range_in_M} (in M).")
        animation_speed = scene["Animation"]["Speed"]
        frame_rate = scene["Animation"]["FrameRate"]
        num_frames = animate.num_frames(
            max_animation_length=time_range_in_M[1] - time_range_in_M[0],
            animation_speed=animation_speed,
            frame_rate=frame_rate,
        )
        animation_length_in_seconds = num_frames / frame_rate
        animation_length_in_M = animation_length_in_seconds * animation_speed
        time_per_frame_in_M = animation_length_in_M / num_frames
        logger.info(f"Rendering {animation_length_in_seconds:.2f}s movie with"
                    f" {num_frames} frames ({frame_rate} FPS or"
                    f" {animation_speed:.2e} M/s or"
                    f" {time_per_frame_in_M:.2e} M/frame)...")
        if frame_window is not None:
            animation_window_num_frames = frame_window[1] - frame_window[0]
            animation_window_time_range = (
                time_range_in_M[0] + frame_window[0] * time_per_frame_in_M,
                time_range_in_M[0] +
                (frame_window[1] - 1) * time_per_frame_in_M,
            )
            logger.info(
                f"Restricting rendering to {animation_window_num_frames} frames"
                f" (numbers {frame_window[0]} to {frame_window[1] - 1}).")
        else:
            animation_window_num_frames = num_frames
            animation_window_time_range = time_range_in_M
            frame_window = (0, num_frames)

        # Setup animation so that sources can retrieve the `UPDATE_TIME_STEP`
        animation = pv.GetAnimationScene()
        # animation.UpdateAnimationUsingDataTimeSteps()
        # Since the data can be evaluated at arbitrary times we define the time steps
        # here by setting the number of frames within the full range
        animation.PlayMode = "Sequence"
        animation.StartTime = animation_window_time_range[0]
        animation.EndTime = animation_window_time_range[1]
        animation.NumberOfFrames = animation_window_num_frames
        logger.debug(
            f"Animating from scene time {animation.StartTime} to"
            f" {animation.EndTime} in {animation.NumberOfFrames} frames.")

        def scene_time_from_real(real_time):
            return (real_time / animation_length_in_seconds *
                    animation_length_in_M)

        # For some reason the keyframe time for animations is expected to be within
        # (0, 1) so we need to transform back and forth from this "normalized" time
        def scene_time_from_normalized(normalized_time):
            return animation.StartTime + normalized_time * (
                animation.EndTime - animation.StartTime)

        def normalized_time_from_scene(scene_time):
            return (scene_time - animation.StartTime) / (animation.EndTime -
                                                         animation.StartTime)

        # Setup progress measuring already here so volume data computing for
        # initial frame is measured
        if show_progress and not no_render:
            logging.getLogger().handlers = [TqdmLoggingHandler()]
            animation_window_frame_range = tqdm.trange(
                animation_window_num_frames,
                desc="Rendering",
                unit="frame",
                miniters=1,
                position=job_id,
            )
        else:
            animation_window_frame_range = range(animation_window_num_frames)

        # Set the initial time step
        animation.GoToFirst()

    # Display the volume data. This will trigger computing the volume data at the
    # current time step.
    for volume_data, waveform_to_volume_config in zip(
            waveform_to_volume_objects, waveform_to_volume_configs):
        vol_repr = (waveform_to_volume_config["VolumeRepresentation"]
                    if "VolumeRepresentation" in waveform_to_volume_config else
                    {})
        volume_color_by = config_color.extract_color_by(vol_repr)
        if (vol_repr["VolumeRenderingMode"] == "GPU Based"
                and len(volume_color_by) > 2):
            logger.warning(
                "The 'GPU Based' volume renderer doesn't support multiple"
                " components.")
        volume = pv.Show(volume_data, view, **vol_repr)
        pv.ColorBy(volume, value=volume_color_by)

    if "Slices" in scene:
        for slice_config in scene["Slices"]:
            slice_obj_config = slice_config.get("Object", {})
            slice = pv.Slice(Input=volume_data)
            slice.SliceType = "Plane"
            slice.SliceOffsetValues = [0.0]
            slice.SliceType.Origin = slice_obj_config.get(
                "Origin", [0.0, 0.0, -0.3])
            slice.SliceType.Normal = slice_obj_config.get(
                "Normal", [0.0, 0.0, 1.0])
            slice_rep = pv.Show(slice, view,
                                **slice_config.get("Representation", {}))
            pv.ColorBy(slice_rep, value=volume_color_by)

    # Display the time
    if "TimeAnnotation" in scene:
        time_annotation = pv.AnnotateTimeFilter(volume_data,
                                                **scene["TimeAnnotation"])
        pv.Show(time_annotation, view, **scene["TimeAnnotationRepresentation"])

    # Add spheres
    if "Spheres" in scene:
        for sphere_config in scene["Spheres"]:
            sphere = pv.Sphere(**sphere_config["Object"])
            pv.Show(sphere, view, **sphere_config["Representation"])

    # Add trajectories and objects that follow them
    if "Trajectories" in scene:
        for trajectory_config in scene["Trajectories"]:
            trajectory_name = trajectory_config["Name"]
            radial_scale = (trajectory_config["RadialScale"]
                            if "RadialScale" in trajectory_config else 1.0)
            # Load the trajectory data
            traj_data_reader = TrajectoryDataReader(
                RadialScale=radial_scale,
                **scene["Datasources"]["Trajectories"][trajectory_name],
            )
            # Make sure the data is loaded so we can retrieve timesteps.
            # TODO: This should be fixed in `TrajectoryDataReader` by
            # communicating time range info down the pipeline, but we had issues
            # with that (see also `WaveformDataReader`).
            traj_data_reader.UpdatePipeline()
            if "Objects" in trajectory_config:
                with animate.restore_animation_state(animation):
                    follow_traj = FollowTrajectory(
                        TrajectoryData=traj_data_reader)
                for traj_obj_config in trajectory_config["Objects"]:
                    for traj_obj_key in traj_obj_config:
                        if traj_obj_key in [
                                "Representation",
                                "Visibility",
                                "TimeShift",
                                "Glyph",
                        ]:
                            continue
                        traj_obj_type = getattr(pv, traj_obj_key)
                        traj_obj_glyph = traj_obj_type(
                            **traj_obj_config[traj_obj_key])
                    follow_traj.UpdatePipeline()
                    traj_obj = pv.Glyph(Input=follow_traj,
                                        GlyphType=traj_obj_glyph)
                    # Can't set this in the constructor for some reason
                    traj_obj.ScaleFactor = 1.0
                    for glyph_property in (traj_obj_config["Glyph"] if "Glyph"
                                           in traj_obj_config else []):
                        setattr(
                            traj_obj,
                            glyph_property,
                            traj_obj_config["Glyph"][glyph_property],
                        )
                    traj_obj.UpdatePipeline()
                    if "TimeShift" in traj_obj_config:
                        traj_obj = animate.apply_time_shift(
                            traj_obj, traj_obj_config["TimeShift"])
                    pv.Show(traj_obj, view,
                            **traj_obj_config["Representation"])
                    if "Visibility" in traj_obj_config:
                        animate.apply_visibility(
                            traj_obj,
                            traj_obj_config["Visibility"],
                            normalized_time_from_scene,
                            scene_time_from_real,
                        )
            if "Tail" in trajectory_config:
                with animate.restore_animation_state(animation):
                    traj_tail = TrajectoryTail(TrajectoryData=traj_data_reader)
                if "TimeShift" in trajectory_config:
                    traj_tail = animate.apply_time_shift(
                        traj_tail, trajectory_config["TimeShift"])
                tail_config = trajectory_config["Tail"]
                traj_color_by = config_color.extract_color_by(tail_config)
                if "Visibility" in tail_config:
                    tail_visibility_config = tail_config["Visibility"]
                    del tail_config["Visibility"]
                else:
                    tail_visibility_config = None
                tail_rep = pv.Show(traj_tail, view, **tail_config)
                pv.ColorBy(tail_rep, value=traj_color_by)
                if tail_visibility_config is not None:
                    animate.apply_visibility(
                        traj_tail,
                        tail_visibility_config,
                        normalized_time_from_scene=normalized_time_from_scene,
                        scene_time_from_real=scene_time_from_real,
                    )
            if "Move" in trajectory_config:
                move_config = trajectory_config["Move"]
                logger.debug(
                    f"Animating '{move_config['guiName']}' along trajectory.")
                with h5py.File(trajectory_file, "r") as traj_data_file:
                    trajectory_data = np.array(
                        traj_data_file[trajectory_subfile])
                if radial_scale != 1.0:
                    trajectory_data[:, 1:] *= radial_scale
                logger.debug(f"Trajectory data shape: {trajectory_data.shape}")
                animate.follow_path(
                    gui_name=move_config["guiName"],
                    trajectory_data=trajectory_data,
                    num_keyframes=move_config["NumKeyframes"],
                    scene_time_range=time_range_in_M,
                    normalized_time_from_scene=normalized_time_from_scene,
                )

    # Add non-spherical horizon shapes (instead of spherical objects following
    # trajectories)
    if "Horizons" in scene:
        for horizon_config in scene["Horizons"]:
            with animate.restore_animation_state(animation):
                horizon = pv.PVDReader(FileName=scene["Datasources"]
                                       ["Horizons"][horizon_config["Name"]])
                if horizon_config.get("InterpolateTime", False):
                    horizon = pv.TemporalInterpolator(
                        Input=horizon, DiscreteTimeStepInterval=0)
            if "TimeShift" in horizon_config:
                horizon = animate.apply_time_shift(horizon,
                                                   horizon_config["TimeShift"],
                                                   animation)
            # Try to make horizon surfaces smooth. At low angular resoluton
            # they still show artifacts, so perhaps more can be done.
            horizon = pv.ExtractSurface(Input=horizon)
            horizon = pv.GenerateSurfaceNormals(Input=horizon)
            horizon_rep_config = horizon_config.get("Representation", {})
            if "Representation" not in horizon_rep_config:
                horizon_rep_config["Representation"] = "Surface"
            if "AmbientColor" not in horizon_rep_config:
                horizon_rep_config["AmbientColor"] = [0.0, 0.0, 0.0]
            if "DiffuseColor" not in horizon_rep_config:
                horizon_rep_config["DiffuseColor"] = [0.0, 0.0, 0.0]
            if "Specular" not in horizon_rep_config:
                horizon_rep_config["Specular"] = 0.2
            if "SpecularPower" not in horizon_rep_config:
                horizon_rep_config["SpecularPower"] = 10
            if "SpecularColor" not in horizon_rep_config:
                horizon_rep_config["SpecularColor"] = [1.0, 1.0, 1.0]
            if "ColorBy" in horizon_rep_config:
                horizon_color_by = config_color.extract_color_by(
                    horizon_rep_config)
            else:
                horizon_color_by = None
            horizon_rep = pv.Show(horizon, view, **horizon_rep_config)
            if horizon_color_by is not None:
                pv.ColorBy(horizon_rep, value=horizon_color_by)
            # Animate visibility
            if "Visibility" in horizon_config:
                animate.apply_visibility(
                    horizon,
                    horizon_config["Visibility"],
                    normalized_time_from_scene=normalized_time_from_scene,
                    scene_time_from_real=scene_time_from_real,
                )
            if "Contours" in horizon_config:
                for contour_config in horizon_config["Contours"]:
                    contour = pv.Contour(Input=horizon,
                                         **contour_config["Object"])
                    contour_rep = pv.Show(contour, view,
                                          **contour_config["Representation"])
                    pv.ColorBy(contour_rep, None)
                    if "Visibility" in horizon_config:
                        animate.apply_visibility(
                            contour,
                            horizon_config["Visibility"],
                            normalized_time_from_scene=
                            normalized_time_from_scene,
                            scene_time_from_real=scene_time_from_real,
                        )

    # Configure transfer functions
    if "TransferFunctions" in scene:
        for tf_config in scene["TransferFunctions"]:
            colored_field = tf_config["Field"]
            transfer_fctn = pv.GetColorTransferFunction(colored_field)
            opacity_fctn = pv.GetOpacityTransferFunction(colored_field)
            tf.configure_transfer_function(transfer_fctn, opacity_fctn,
                                           tf_config["TransferFunction"])

    # Save state file before configuring camera keyframes.
    # TODO: Make camera keyframes work with statefile
    if save_state_to_file is not None:
        pv.SaveState(save_state_to_file + ".pvsm")

    # Camera shots
    # TODO: Make this work with freezing time while the camera is swinging
    if animation is None:
        for i, shot in enumerate(scene["CameraShots"]):
            if (i == len(scene["CameraShots"]) - 1 or
                (shot["Time"] if "Time" in shot else 0.0) >= view.ViewTime):
                camera_motion.apply(shot)
                break
    else:
        camera_motion.apply_swings(
            scene["CameraShots"],
            scene_time_range=time_range_in_M,
            scene_time_from_real=scene_time_from_real,
            normalized_time_from_scene=normalized_time_from_scene,
        )

    # Report time
    if animation is not None:
        report_time_cue = pv.PythonAnimationCue()
        report_time_cue.Script = """
def start_cue(self): pass

def tick(self):
    import paraview.simple as pv
    import logging
    logger = logging.getLogger('Animation')
    scene_time = pv.GetActiveView().ViewTime
    logger.info(f"Scene time: {scene_time}")

def end_cue(self): pass
"""
        animation.Cues.append(report_time_cue)

    if show_preview and animation is not None:
        animation.PlayMode = "Real Time"
        animation.Duration = 10
        animation.Play()
        animation.PlayMode = "Sequence"

    if no_render:
        logger.info("No rendering requested. Total time:"
                    f" {time.time() - render_start_time:.2f}s")
        return

    if frames_dir is None:
        raise RuntimeError("Trying to render but `frames_dir` is not set.")
    if os.path.exists(frames_dir):
        logger.warning(
            f"Output directory '{frames_dir}' exists, files may be overwritten."
        )
    else:
        os.makedirs(frames_dir)

    if animation is None:
        pv.Render()
        pv.SaveScreenshot(os.path.join(frames_dir, "frame.png"))
    else:
        # Iterate over frames manually to support filling in missing frames.
        # If `pv.SaveAnimation` would support that, here's how it could be
        # invoked:
        # pv.SaveAnimation(
        #     os.path.join(frames_dir, 'frame.png'),
        #     view,
        #     animation,
        #     FrameWindow=frame_window,
        #     SuffixFormat='.%06d')
        # Note that `FrameWindow` appears to be buggy, so we set up the
        # `animation` according to the `frame_window` above so the frame files
        # are numberd correctly.
        for animation_window_frame_i in animation_window_frame_range:
            frame_i = frame_window[0] + animation_window_frame_i
            frame_file = os.path.join(frames_dir, f"frame.{frame_i:06d}.png")
            if render_missing_frames and os.path.exists(frame_file):
                continue
            logger.debug(f"Rendering frame {frame_i}...")
            animation.AnimationTime = (
                animation.StartTime +
                time_per_frame_in_M * animation_window_frame_i)
            pv.Render()
            pv.SaveScreenshot(frame_file)
            logger.info(f"Rendered frame {frame_i}.")

    logger.info(
        f"Rendering done. Total time: {time.time() - render_start_time:.2f}s")
Пример #19
0
import pv_utils as utils
import paraview.simple as pv

center = utils.GetCenter(tecFile='data/rhoda.tec')
renderView = pv.CreateRenderView(ViewSize=[700, 500],
                                 Background=[1.0, 1.0, 1.0])
utils.ColorSurface(tecFile='data/rhoda.tec', view=renderView, opacity=1.0)
utils.SetCameraFocus(tecFile='data/rhoda.tec', view=renderView, camFoc=center)
utils.SetOrientation(view=renderView, camPosDir=[-1.0, -0.5, 0.3])
pv.SaveScreenshot('surface.png', magnification=1, quality=100, view=renderView)
Пример #20
0
    def __init__(self, filepath='.'):
        self.filepath = filepath
        self.time = 0.0
        self.surfaceColorMode = 0  # Local range
        self.subSurfaceColorMode = 0  # Local range

        # Surface View
        self.viewSurface = simple.CreateRenderView(True)
        self.viewSurface.EnableRenderOnInteraction = 0
        self.viewSurface.OrientationAxesVisibility = 0
        self.viewSurface.Background = [0.9, 0.9, 0.9]
        self.viewSurface.InteractionMode = '2D'
        self.viewSurface.CameraParallelProjection = 1

        # SubSurface view
        self.viewSubSurface = simple.CreateRenderView(True)
        self.viewSubSurface.EnableRenderOnInteraction = 0
        self.viewSubSurface.OrientationAxesVisibility = 0
        self.viewSubSurface.Background = [0.9, 0.9, 0.9]
        self.viewSubSurface.InteractionMode = '2D'
        self.viewSubSurface.CameraParallelProjection = 1

        # Read dataset
        self.reader = simple.ParFlowReader(FileName=filepath, DeflectTerrain=1)
        self.readerSurface = simple.OutputPort(self.reader, 1)
        self.readerSubSurface = simple.OutputPort(self.reader, 0)

        # Water table depth
        self.waterTableDepth = simple.WaterTableDepth(
            Subsurface=self.readerSubSurface, Surface=self.readerSurface)
        self.cellCenter = simple.CellCenters(Input=self.waterTableDepth)
        self.wtdVectCalc = simple.Calculator(Input=self.cellCenter)
        self.wtdVectCalc.ResultArrayName = 'wtdVect'
        self.wtdVectCalc.Function = 'iHat + jHat + kHat * water table depth'

        self.waterTableDepthGlyph = simple.Glyph(
            Input=self.wtdVectCalc,
            GlyphType='Cylinder',
            ScaleFactor=500,
            GlyphMode='All Points',
            GlyphTransform='Transform2',
            ScaleArray=['POINTS', 'wtdVect'],
            VectorScaleMode='Scale by Components',
        )
        self.waterTableDepthGlyph.GlyphTransform.Rotate = [90.0, 0.0, 0.0]
        self.waterTableDepthGlyph.GlyphType.Resolution = 12
        self.waterTableDepthGlyph.GlyphType.Radius = 0.25
        self.waterTableRepresentation = simple.Show(self.waterTableDepthGlyph,
                                                    self.viewSubSurface)
        self.waterTableRepresentation.Visibility = 0

        # Water balance
        self.waterBalance = simple.WaterBalance(
            Subsurface=self.readerSubSurface, Surface=self.readerSurface)
        self.waterBalanceOverTime = simple.PlotGlobalVariablesOverTime(
            Input=self.waterBalance)

        # Surface representation
        self.surfaceRepresentation = simple.Show(self.readerSurface,
                                                 self.viewSurface)
        self.surfaceRepresentation.SetScalarBarVisibility(
            self.viewSurface, True)

        # SubSurface representation + slice extract
        self.reader.UpdatePipeline()
        self.voi = self.reader.GetClientSideObject().GetOutputDataObject(
            0).GetExtent()
        self.extractSubset = simple.ExtractSubset(Input=self.readerSubSurface)
        self.subSurfaceRepresentation = simple.Show(self.extractSubset,
                                                    self.viewSubSurface)
        self.subSurfaceRepresentation.Representation = 'Surface'

        # Reset camera + center of rotation
        simple.Render(self.viewSurface)
        simple.ResetCamera(self.viewSurface)
        self.viewSurface.CenterOfRotation = self.viewSurface.CameraFocalPoint
        simple.Render(self.viewSubSurface)
        simple.ResetCamera(self.viewSubSurface)
        self.viewSubSurface.CenterOfRotation = self.viewSubSurface.CameraFocalPoint

        # Time management
        self.animationScene = simple.GetAnimationScene()
        self.animationScene.UpdateAnimationUsingDataTimeSteps()