Exemplo n.º 1
0
def sample(dataDir, outputDir):
    convert(os.path.join(dataDir, 'Data/bot2.wrl'), outputDir, True, True)
    convert(os.path.join(dataDir, 'Data/can.ex2'), outputDir)
    convert(os.path.join(dataDir, 'Data/can.ex2'), outputDir, True, True,
            'can_MS.ex2')
    convert(os.path.join(dataDir, 'Data/can.ex2'), outputDir, True, False,
            'can_M.ex2')
    convert(os.path.join(dataDir, 'Data/can.ex2'), outputDir, False, True,
            'can_S.ex2')
    convert(os.path.join(dataDir, 'Data/disk_out_ref.ex2'), outputDir, True,
            False, 'disk_out_ref_M.ex2')
    convert(os.path.join(dataDir, 'Data/disk_out_ref.ex2'), outputDir)
    convert(os.path.join(dataDir, 'Data/RectGrid2.vtk'), outputDir)

    # Create image data based on the Wavelet source
    wavelet = simple.Wavelet()
    wavelet.UpdatePipeline()
    imageData = wavelet.GetClientSideObject().GetOutputDataObject(0)
    writeDataSet('Wavelet.vti', imageData, outputDir)

    # Create a table based on the disk_out_ref
    diskout = simple.ExtractSurface(
        simple.MergeBlocks(
            simple.OpenDataFile(os.path.join(dataDir,
                                             'Data/disk_out_ref.ex2'))))
    diskout.UpdatePipeline()
    unstructuredGrid = diskout.GetClientSideObject().GetOutputDataObject(0)
    table = vtkTable()
    _nbFields = unstructuredGrid.GetPointData().GetNumberOfArrays()
    for i in range(_nbFields):
        table.AddColumn(unstructuredGrid.GetPointData().GetArray(i))
    writeDataSet('table', table, outputDir)
 def GetMapper(self, obj, var=None, scale=[1., 1., 1.], lut=None):
     '''
     Map the ParaView object to a VTKPolyMapper by extacting the surface.
     Generally, points inside of the mesh (volume) are not important.
     '''
     # Scale the object
     tsfm = pv.Transform(Input=obj)
     tsfm.Transform = 'Transform'
     tsfm.Transform.Scale = scale
     # Convert to point data
     cd2pd = pv.CellDatatoPointData(Input=tsfm)
     # Fetch the data from a Proxy
     vtkObj = pv.servermanager.Fetch(cd2pd)
     # In case of a MultiBlockDataSet, extract block
     if vtkObj.IsA('vtkMultiBlockDataSet'):
         vtkObj = vtkObj.GetBlock(0)
     # A vtkPolyData is needed, if not found extract
     # the surface data only
     if not vtkObj.IsA('vtkPolyData'):
         vtkObj = pv.servermanager.Fetch(pv.ExtractSurface(Input=cd2pd))
     # vtkPolyData should have been obtained now...
     if not vtkObj.IsA('vtkPolyData'):
         raise ValueError('Cannot obtain a vtkPolyData')
     # Map the user defined variable as scalar array
     if not var == None:
         vtkObj.GetPointData().SetScalars(
             vtkObj.GetPointData().GetArray(var))
     # Create the PolyDataMapper
     vtkObjMapper = vtk.vtkPolyDataMapper()
     vtkObjMapper.SetInputData(vtkObj)
     # Add the Lookup Table if needed
     if not lut == None:
         vtkObjMapper.SetLookupTable(self.GetLUT(lut))
     # Return
     return vtkObjMapper
Exemplo n.º 3
0
def add_scene_item(scene, name, proxy, view):
    hasNormal = False
    hasColor = False
    colors = {}
    representation = {}
    rep = simple.GetRepresentation(proxy, view)

    # Skip hidden object or volume
    if not rep.Visibility or rep.Representation == 'Volume':
        return

    for prop in ['Representation']:
        representation[prop] = rep.GetProperty(prop).GetData()

    pdInfo = proxy.GetPointDataInformation()
    numberOfPointArrays = pdInfo.GetNumberOfArrays()
    for idx in range(numberOfPointArrays):
        array = pdInfo.GetArray(idx)
        rangeValues = array.GetRange(-1)
        if array.Name == 'Normals':
            hasNormal = True
        if array.Name not in ['vtkValidPointMask', 'Normals']:
            hasColor = True
            if rangeValues[0] == rangeValues[1]:
                colors[array.Name] = {'constant': rangeValues[0]}
            else:
                colors[array.Name] = {
                    'location': 'POINT_DATA',
                    'range': [i for i in rangeValues]
                }

    # Get information about cell data arrays
    cdInfo = proxy.GetCellDataInformation()
    numberOfCellArrays = cdInfo.GetNumberOfArrays()
    for idx in range(numberOfCellArrays):
        array = cdInfo.GetArray(idx)
        hasColor = True
        colors[array.Name] = {
            'location': 'CELL_DATA',
            'range': array.GetRange(-1)
        }

    # Make sure Normals are available if lighting by normals
    source = proxy
    if not hasColor or rep.Representation == 'Outline':
        colors = {'solid': {'constant': 0}}
    elif 'normal' in scene['light'] and not hasNormal:
        rep.Visibility = 0
        surface = simple.ExtractSurface(Input=proxy)
        surfaceWithNormals = simple.GenerateSurfaceNormals(Input=surface)
        source = surfaceWithNormals

    scene['scene'].append({
        'name': name,
        'source': source,
        'colors': colors,
        'representation': representation
    })
Exemplo n.º 4
0
        class Pipeline:

            # Create data source "input" (provides simulation fields)
            simData = coprocessor.CreateProducer(datadescription, "input")

            # Extract the grid patches from the AMR grid and convert to
            # VTK polydata - this is required as a workaround for live
            # visualisation, which does not currently support AMR container
            # grid formats
            extractLevel = pvs.ExtractLevel(Input=simData)
            extractLevel.Levels = [0]
            extractSurface = pvs.ExtractSurface(Input=extractLevel)
Exemplo n.º 5
0
def convert(inputFile, outputDir, merge=False, extract=False, newName=None):
    print(inputFile, outputDir)
    reader = simple.OpenDataFile(inputFile)
    activeSource = reader

    if merge:
        activeSource = simple.MergeBlocks(activeSource)

    if extract:
        activeSource = simple.ExtractSurface(activeSource)

    activeSource.UpdatePipeline()
    dataObject = activeSource.GetClientSideObject().GetOutputDataObject(0)

    if 'TimestepValues' in reader.ListProperties():
        if len(reader.TimestepValues) == 0:
            writeDataSet(inputFile, dataObject, outputDir, newName)
        else:
            writeTimeDataSource(inputFile, reader, activeSource, outputDir,
                                newName)
    else:
        writeDataSet(inputFile, dataObject, outputDir, newName)
Exemplo n.º 6
0
    def reload_models(self):
        self._clean_vizard_models()

        # Apply clip filter
        data = self._apply_clip(self.vtk_data)

        # Iterate over every material
        material_range = self.vtk_data_local.GetCellData().GetArray(
            cfg.material_name).GetRange()
        for material in range(int(material_range[0]),
                              int(material_range[1]) + 1):
            # Separate the data by material
            material_data = pv.Threshold(Input=data)
            self._filters += [material_data]
            # Settings are generated mostly by ParaView Trace function
            material_data.Scalars = ['CELLS', cfg.material_name]
            material_data.ThresholdRange = [material, material]

            # Generate cloud
            local = pv.servermanager.Fetch(material_data)
            self.cloud_materials[material] = self.generate_cloud(
                local, cfg.coloring_name)
            self.cloud_materials[material].setParent(self.cloud_node)

            # Apply ExtractSurface and Triangulate filter
            extractSurface = pv.ExtractSurface(Input=material_data)
            self._filters += [extractSurface]
            # Settings are generated by ParaView Trace function
            extractSurface.PieceInvariant = 1
            extractSurface.NonlinearSubdivisionLevel = 1
            triangulate = pv.Triangulate(Input=extractSurface)
            self._filters += [triangulate]

            # Generate surface
            local = pv.servermanager.Fetch(triangulate)
            self.surface_materials[material] = self.generate_surface(
                local, cfg.coloring_name)
            self.surface_materials[material].setParent(self.surface_node)
Exemplo n.º 7
0
            "range": [34, 38]
        }
    }
}

# -----------------------------------------------------------------------------

from paraview import simple
from paraview.web.dataset_builder import *

# -----------------------------------------------------------------------------
# Pipeline creation
# -----------------------------------------------------------------------------

core = simple.OpenDataFile(earthCore)
coreSurface = simple.ExtractSurface(Input=core)
coreWithNormals = simple.GenerateSurfaceNormals(Input=coreSurface)

reader = simple.OpenDataFile(inputFile % time[0])
reader.CellArrayStatus = ['temperature', 'salinity']

dataCleanUp = simple.Threshold(Input=reader,
                               Scalars=['CELLS', 'temperature'],
                               ThresholdRange=[-1000.0, 50.0])
dataToPoints = simple.CellDatatoPointData(Input=dataCleanUp)

sceneDescription = {
    'size': [500, 500],
    'light': ['intensity', 'normal'],
    'camera': {
        'CameraViewUp': [0.0, 0.0, 1.0],
Exemplo n.º 8
0
    def writeData(self, time=0):
        if not self.dataHandler.can_write:
            return

        currentScene = []
        for data in self.config['scene']:
            currentData = {'name': data['name'], 'fields': {}}
            currentScene.append(currentData)
            if self.surfaceExtract:
                self.merge.Input = data['source']
            else:
                self.merge = simple.MergeBlocks(Input=data['source'],
                                                MergePoints=0)
                self.surfaceExtract = simple.ExtractSurface(Input=self.merge)

            # Extract surface
            self.surfaceExtract.UpdatePipeline(time)
            ds = self.surfaceExtract.SMProxy.GetClientSideObject(
            ).GetOutputDataObject(0)
            originalDS = data['source'].SMProxy.GetClientSideObject(
            ).GetOutputDataObject(0)

            originalPoints = ds.GetPoints()

            # Points
            points = vtkFloatArray()
            nbPoints = originalPoints.GetNumberOfPoints()
            points.SetNumberOfComponents(3)
            points.SetNumberOfTuples(nbPoints)
            for idx in range(nbPoints):
                coord = originalPoints.GetPoint(idx)
                points.SetTuple3(idx, coord[0], coord[1], coord[2])

            pBuffer = buffer(points)
            pMd5 = hashlib.md5(pBuffer).hexdigest()
            pPath = os.path.join(self.dataHandler.getBasePath(), 'points',
                                 "%s.Float32Array" % pMd5)
            currentData['points'] = 'points/%s.Float32Array' % pMd5
            with open(pPath, 'wb') as f:
                f.write(pBuffer)

            # Polys
            poly = ds.GetPolys()
            nbCells = poly.GetNumberOfCells()
            cellLocation = 0
            idList = vtkIdList()
            topo = vtkTypeUInt32Array()
            topo.Allocate(poly.GetData().GetNumberOfTuples())

            for cellIdx in range(nbCells):
                poly.GetCell(cellLocation, idList)
                cellSize = idList.GetNumberOfIds()
                cellLocation += cellSize + 1
                if cellSize == 3:
                    topo.InsertNextValue(idList.GetId(0))
                    topo.InsertNextValue(idList.GetId(1))
                    topo.InsertNextValue(idList.GetId(2))
                elif cellSize == 4:
                    topo.InsertNextValue(idList.GetId(0))
                    topo.InsertNextValue(idList.GetId(1))
                    topo.InsertNextValue(idList.GetId(3))
                    topo.InsertNextValue(idList.GetId(1))
                    topo.InsertNextValue(idList.GetId(2))
                    topo.InsertNextValue(idList.GetId(3))
                else:
                    print "Cell size of", cellSize, "not supported"

            iBuffer = buffer(topo)
            iMd5 = hashlib.md5(iBuffer).hexdigest()
            iPath = os.path.join(self.dataHandler.getBasePath(), 'index',
                                 "%s.Uint32Array" % iMd5)
            currentData['index'] = 'index/%s.Uint32Array' % iMd5
            with open(iPath, 'wb') as f:
                f.write(iBuffer)

            # Grow object side
            self.objSize[data['name']]['points'] = max(
                self.objSize[data['name']]['points'], nbPoints)
            self.objSize[data['name']]['index'] = max(
                self.objSize[data['name']]['index'], topo.GetNumberOfTuples())

            # Colors / FIXME
            for fieldName, fieldInfo in data['colors'].iteritems():
                array = ds.GetPointData().GetArray(fieldName)
                tupleSize = array.GetNumberOfComponents()
                arraySize = array.GetNumberOfTuples()
                outputField = vtkFloatArray()
                outputField.SetNumberOfTuples(arraySize)
                if tupleSize == 1:
                    for i in range(arraySize):
                        outputField.SetValue(i, array.GetValue(i))
                else:
                    # compute magnitude
                    tupleIdxs = range(tupleSize)
                    for i in range(arraySize):
                        magnitude = 0
                        for j in tupleIdxs:
                            magnitude += math.pow(
                                array.GetValue(i * tupleSize + j), 2)

                        outputField.SetValue(i, math.sqrt(magnitude))

                fBuffer = buffer(outputField)
                fMd5 = hashlib.md5(fBuffer).hexdigest()
                fPath = os.path.join(self.dataHandler.getBasePath(), 'fields',
                                     "%s_%s.Float32Array" % (fieldName, fMd5))
                with open(fPath, 'wb') as f:
                    f.write(fBuffer)

                currentData['fields'][
                    fieldName] = 'fields/%s_%s.Float32Array' % (fieldName,
                                                                fMd5)

        # Write scene
        with open(self.dataHandler.getDataAbsoluteFilePath('scene'), 'w') as f:
            f.write(json.dumps(currentScene, indent=4))
# -----------------------------------------------------------------------------

from paraview import simple
from paraview.web.dataset_builder import *

# -----------------------------------------------------------------------------
# Pipeline creation
# -----------------------------------------------------------------------------

reader = simple.OpenDataFile(inputFile)
reader.PointVariables = ['Temp', 'V', 'Pres', 'AsH3', 'GaMe3', 'CH4', 'H2']

clip = simple.Clip(Input=reader)
clip.ClipType.Normal = [0.0, 1.0, 0.0]
clipSurface = simple.ExtractSurface(Input=clip)
clipWithNormals = simple.GenerateSurfaceNormals(Input=clipSurface)

streamLines = simple.StreamTracer(Input=reader,
                                  SeedType="High Resolution Line Source",
                                  Vectors=['POINTS', 'V'],
                                  MaximumStreamlineLength=20.16)
streamLines.SeedType.Point2 = [5.75, 5.75, 10.15999984741211]
streamLines.SeedType.Point1 = [-5.75, -5.75, -10.0]
streamTubes = simple.Tube(Input=streamLines, Radius=0.2)
streamSurface = simple.ExtractSurface(Input=streamTubes)
streamWithNormals = simple.GenerateSurfaceNormals(Input=streamSurface)

sceneDescription = {
    'size': [500, 500],
    'light': ['intensity', 'normal'],  # 'normal'
Exemplo n.º 10
0
def render_frames(
    scene,
    frames_dir=None,
    frame_window=None,
    render_missing_frames=False,
    save_state_to_file=None,
    no_render=False,
    show_preview=False,
    show_progress=False,
    job_id=None,
):
    # Validate scene
    if scene["View"]["ViewSize"][0] % 16 != 0:
        logger.warning(
            "The view width should be a multiple of 16 to be compatible with"
            " QuickTime.")
    if scene["View"]["ViewSize"][1] % 2 != 0:
        logger.warning(
            "The view height should be even to be compatible with QuickTime.")

    render_start_time = time.time()

    # Setup layout
    layout = pv.CreateLayout("Layout")

    # Setup view
    if "Background" in scene["View"]:
        bg_config = scene["View"]["Background"]
        del scene["View"]["Background"]
        if isinstance(bg_config, list):
            if isinstance(bg_config[0], list):
                assert len(bg_config) == 2, (
                    "When 'Background' is a list of colors, it must have 2"
                    " entries.")
                bg_config = dict(
                    BackgroundColorMode="Gradient",
                    Background=parse_as.color(bg_config[0]),
                    Background2=parse_as.color(bg_config[1]),
                )
            else:
                bg_config = dict(
                    BackgroundColorMode="Single Color",
                    Background=parse_as.color(bg_config),
                )
            bg_config["UseColorPaletteForBackground"] = 0
            scene["View"].update(bg_config)
            bg_config = None
    else:
        bg_config = None
    view = pv.CreateRenderView(**scene["View"])
    pv.AssignViewToLayout(view=view, layout=layout, hint=0)

    # Set spherical background texture
    if bg_config is not None:
        bg_config["BackgroundColorMode"] = "Texture"
        skybox_datasource = bg_config["Datasource"]
        del bg_config["Datasource"]
        background_texture = pvserver.rendering.ImageTexture(
            FileName=parse_as.path(scene["Datasources"][skybox_datasource]))
        background_sphere = pv.Sphere(Radius=bg_config["Radius"],
                                      ThetaResolution=100,
                                      PhiResolution=100)
        background_texture_map = pv.TextureMaptoSphere(Input=background_sphere)
        pv.Show(
            background_texture_map,
            view,
            Texture=background_texture,
            BackfaceRepresentation="Cull Frontface",
            Ambient=1.0,
        )

    # Load the waveform data file
    waveform_h5file, waveform_subfile = parse_as.file_and_subfile(
        scene["Datasources"]["Waveform"])
    waveform_data = WaveformDataReader(FileName=waveform_h5file,
                                       Subfile=waveform_subfile)
    pv.UpdatePipeline()

    # Generate volume data from the waveform. Also sets the available time range.
    # TODO: Pull KeepEveryNthTimestep out of datasource
    waveform_to_volume_configs = scene["WaveformToVolume"]
    if isinstance(waveform_to_volume_configs, dict):
        waveform_to_volume_configs = [{
            "Object": waveform_to_volume_configs,
        }]
        if "VolumeRepresentation" in scene:
            waveform_to_volume_configs[0]["VolumeRepresentation"] = scene[
                "VolumeRepresentation"]
    waveform_to_volume_objects = []
    for waveform_to_volume_config in waveform_to_volume_configs:
        volume_data = WaveformToVolume(
            WaveformData=waveform_data,
            SwshCacheDirectory=parse_as.path(
                scene["Datasources"]["SwshCache"]),
            **waveform_to_volume_config["Object"],
        )
        if "Modes" in waveform_to_volume_config["Object"]:
            volume_data.Modes = waveform_to_volume_config["Object"]["Modes"]
        if "Polarizations" in waveform_to_volume_config["Object"]:
            volume_data.Polarizations = waveform_to_volume_config["Object"][
                "Polarizations"]
        waveform_to_volume_objects.append(volume_data)

    # Compute timing and frames information
    time_range_in_M = (
        volume_data.TimestepValues[0],
        volume_data.TimestepValues[-1],
    )
    logger.debug(f"Full available data time range: {time_range_in_M} (in M)")
    if "FreezeTime" in scene["Animation"]:
        frozen_time = scene["Animation"]["FreezeTime"]
        logger.info(f"Freezing time at {frozen_time}.")
        view.ViewTime = frozen_time
        animation = None
    else:
        if "Crop" in scene["Animation"]:
            time_range_in_M = scene["Animation"]["Crop"]
            logger.debug(f"Cropping time range to {time_range_in_M} (in M).")
        animation_speed = scene["Animation"]["Speed"]
        frame_rate = scene["Animation"]["FrameRate"]
        num_frames = animate.num_frames(
            max_animation_length=time_range_in_M[1] - time_range_in_M[0],
            animation_speed=animation_speed,
            frame_rate=frame_rate,
        )
        animation_length_in_seconds = num_frames / frame_rate
        animation_length_in_M = animation_length_in_seconds * animation_speed
        time_per_frame_in_M = animation_length_in_M / num_frames
        logger.info(f"Rendering {animation_length_in_seconds:.2f}s movie with"
                    f" {num_frames} frames ({frame_rate} FPS or"
                    f" {animation_speed:.2e} M/s or"
                    f" {time_per_frame_in_M:.2e} M/frame)...")
        if frame_window is not None:
            animation_window_num_frames = frame_window[1] - frame_window[0]
            animation_window_time_range = (
                time_range_in_M[0] + frame_window[0] * time_per_frame_in_M,
                time_range_in_M[0] +
                (frame_window[1] - 1) * time_per_frame_in_M,
            )
            logger.info(
                f"Restricting rendering to {animation_window_num_frames} frames"
                f" (numbers {frame_window[0]} to {frame_window[1] - 1}).")
        else:
            animation_window_num_frames = num_frames
            animation_window_time_range = time_range_in_M
            frame_window = (0, num_frames)

        # Setup animation so that sources can retrieve the `UPDATE_TIME_STEP`
        animation = pv.GetAnimationScene()
        # animation.UpdateAnimationUsingDataTimeSteps()
        # Since the data can be evaluated at arbitrary times we define the time steps
        # here by setting the number of frames within the full range
        animation.PlayMode = "Sequence"
        animation.StartTime = animation_window_time_range[0]
        animation.EndTime = animation_window_time_range[1]
        animation.NumberOfFrames = animation_window_num_frames
        logger.debug(
            f"Animating from scene time {animation.StartTime} to"
            f" {animation.EndTime} in {animation.NumberOfFrames} frames.")

        def scene_time_from_real(real_time):
            return (real_time / animation_length_in_seconds *
                    animation_length_in_M)

        # For some reason the keyframe time for animations is expected to be within
        # (0, 1) so we need to transform back and forth from this "normalized" time
        def scene_time_from_normalized(normalized_time):
            return animation.StartTime + normalized_time * (
                animation.EndTime - animation.StartTime)

        def normalized_time_from_scene(scene_time):
            return (scene_time - animation.StartTime) / (animation.EndTime -
                                                         animation.StartTime)

        # Setup progress measuring already here so volume data computing for
        # initial frame is measured
        if show_progress and not no_render:
            logging.getLogger().handlers = [TqdmLoggingHandler()]
            animation_window_frame_range = tqdm.trange(
                animation_window_num_frames,
                desc="Rendering",
                unit="frame",
                miniters=1,
                position=job_id,
            )
        else:
            animation_window_frame_range = range(animation_window_num_frames)

        # Set the initial time step
        animation.GoToFirst()

    # Display the volume data. This will trigger computing the volume data at the
    # current time step.
    for volume_data, waveform_to_volume_config in zip(
            waveform_to_volume_objects, waveform_to_volume_configs):
        vol_repr = (waveform_to_volume_config["VolumeRepresentation"]
                    if "VolumeRepresentation" in waveform_to_volume_config else
                    {})
        volume_color_by = config_color.extract_color_by(vol_repr)
        if (vol_repr["VolumeRenderingMode"] == "GPU Based"
                and len(volume_color_by) > 2):
            logger.warning(
                "The 'GPU Based' volume renderer doesn't support multiple"
                " components.")
        volume = pv.Show(volume_data, view, **vol_repr)
        pv.ColorBy(volume, value=volume_color_by)

    if "Slices" in scene:
        for slice_config in scene["Slices"]:
            slice_obj_config = slice_config.get("Object", {})
            slice = pv.Slice(Input=volume_data)
            slice.SliceType = "Plane"
            slice.SliceOffsetValues = [0.0]
            slice.SliceType.Origin = slice_obj_config.get(
                "Origin", [0.0, 0.0, -0.3])
            slice.SliceType.Normal = slice_obj_config.get(
                "Normal", [0.0, 0.0, 1.0])
            slice_rep = pv.Show(slice, view,
                                **slice_config.get("Representation", {}))
            pv.ColorBy(slice_rep, value=volume_color_by)

    # Display the time
    if "TimeAnnotation" in scene:
        time_annotation = pv.AnnotateTimeFilter(volume_data,
                                                **scene["TimeAnnotation"])
        pv.Show(time_annotation, view, **scene["TimeAnnotationRepresentation"])

    # Add spheres
    if "Spheres" in scene:
        for sphere_config in scene["Spheres"]:
            sphere = pv.Sphere(**sphere_config["Object"])
            pv.Show(sphere, view, **sphere_config["Representation"])

    # Add trajectories and objects that follow them
    if "Trajectories" in scene:
        for trajectory_config in scene["Trajectories"]:
            trajectory_name = trajectory_config["Name"]
            radial_scale = (trajectory_config["RadialScale"]
                            if "RadialScale" in trajectory_config else 1.0)
            # Load the trajectory data
            traj_data_reader = TrajectoryDataReader(
                RadialScale=radial_scale,
                **scene["Datasources"]["Trajectories"][trajectory_name],
            )
            # Make sure the data is loaded so we can retrieve timesteps.
            # TODO: This should be fixed in `TrajectoryDataReader` by
            # communicating time range info down the pipeline, but we had issues
            # with that (see also `WaveformDataReader`).
            traj_data_reader.UpdatePipeline()
            if "Objects" in trajectory_config:
                with animate.restore_animation_state(animation):
                    follow_traj = FollowTrajectory(
                        TrajectoryData=traj_data_reader)
                for traj_obj_config in trajectory_config["Objects"]:
                    for traj_obj_key in traj_obj_config:
                        if traj_obj_key in [
                                "Representation",
                                "Visibility",
                                "TimeShift",
                                "Glyph",
                        ]:
                            continue
                        traj_obj_type = getattr(pv, traj_obj_key)
                        traj_obj_glyph = traj_obj_type(
                            **traj_obj_config[traj_obj_key])
                    follow_traj.UpdatePipeline()
                    traj_obj = pv.Glyph(Input=follow_traj,
                                        GlyphType=traj_obj_glyph)
                    # Can't set this in the constructor for some reason
                    traj_obj.ScaleFactor = 1.0
                    for glyph_property in (traj_obj_config["Glyph"] if "Glyph"
                                           in traj_obj_config else []):
                        setattr(
                            traj_obj,
                            glyph_property,
                            traj_obj_config["Glyph"][glyph_property],
                        )
                    traj_obj.UpdatePipeline()
                    if "TimeShift" in traj_obj_config:
                        traj_obj = animate.apply_time_shift(
                            traj_obj, traj_obj_config["TimeShift"])
                    pv.Show(traj_obj, view,
                            **traj_obj_config["Representation"])
                    if "Visibility" in traj_obj_config:
                        animate.apply_visibility(
                            traj_obj,
                            traj_obj_config["Visibility"],
                            normalized_time_from_scene,
                            scene_time_from_real,
                        )
            if "Tail" in trajectory_config:
                with animate.restore_animation_state(animation):
                    traj_tail = TrajectoryTail(TrajectoryData=traj_data_reader)
                if "TimeShift" in trajectory_config:
                    traj_tail = animate.apply_time_shift(
                        traj_tail, trajectory_config["TimeShift"])
                tail_config = trajectory_config["Tail"]
                traj_color_by = config_color.extract_color_by(tail_config)
                if "Visibility" in tail_config:
                    tail_visibility_config = tail_config["Visibility"]
                    del tail_config["Visibility"]
                else:
                    tail_visibility_config = None
                tail_rep = pv.Show(traj_tail, view, **tail_config)
                pv.ColorBy(tail_rep, value=traj_color_by)
                if tail_visibility_config is not None:
                    animate.apply_visibility(
                        traj_tail,
                        tail_visibility_config,
                        normalized_time_from_scene=normalized_time_from_scene,
                        scene_time_from_real=scene_time_from_real,
                    )
            if "Move" in trajectory_config:
                move_config = trajectory_config["Move"]
                logger.debug(
                    f"Animating '{move_config['guiName']}' along trajectory.")
                with h5py.File(trajectory_file, "r") as traj_data_file:
                    trajectory_data = np.array(
                        traj_data_file[trajectory_subfile])
                if radial_scale != 1.0:
                    trajectory_data[:, 1:] *= radial_scale
                logger.debug(f"Trajectory data shape: {trajectory_data.shape}")
                animate.follow_path(
                    gui_name=move_config["guiName"],
                    trajectory_data=trajectory_data,
                    num_keyframes=move_config["NumKeyframes"],
                    scene_time_range=time_range_in_M,
                    normalized_time_from_scene=normalized_time_from_scene,
                )

    # Add non-spherical horizon shapes (instead of spherical objects following
    # trajectories)
    if "Horizons" in scene:
        for horizon_config in scene["Horizons"]:
            with animate.restore_animation_state(animation):
                horizon = pv.PVDReader(FileName=scene["Datasources"]
                                       ["Horizons"][horizon_config["Name"]])
                if horizon_config.get("InterpolateTime", False):
                    horizon = pv.TemporalInterpolator(
                        Input=horizon, DiscreteTimeStepInterval=0)
            if "TimeShift" in horizon_config:
                horizon = animate.apply_time_shift(horizon,
                                                   horizon_config["TimeShift"],
                                                   animation)
            # Try to make horizon surfaces smooth. At low angular resoluton
            # they still show artifacts, so perhaps more can be done.
            horizon = pv.ExtractSurface(Input=horizon)
            horizon = pv.GenerateSurfaceNormals(Input=horizon)
            horizon_rep_config = horizon_config.get("Representation", {})
            if "Representation" not in horizon_rep_config:
                horizon_rep_config["Representation"] = "Surface"
            if "AmbientColor" not in horizon_rep_config:
                horizon_rep_config["AmbientColor"] = [0.0, 0.0, 0.0]
            if "DiffuseColor" not in horizon_rep_config:
                horizon_rep_config["DiffuseColor"] = [0.0, 0.0, 0.0]
            if "Specular" not in horizon_rep_config:
                horizon_rep_config["Specular"] = 0.2
            if "SpecularPower" not in horizon_rep_config:
                horizon_rep_config["SpecularPower"] = 10
            if "SpecularColor" not in horizon_rep_config:
                horizon_rep_config["SpecularColor"] = [1.0, 1.0, 1.0]
            if "ColorBy" in horizon_rep_config:
                horizon_color_by = config_color.extract_color_by(
                    horizon_rep_config)
            else:
                horizon_color_by = None
            horizon_rep = pv.Show(horizon, view, **horizon_rep_config)
            if horizon_color_by is not None:
                pv.ColorBy(horizon_rep, value=horizon_color_by)
            # Animate visibility
            if "Visibility" in horizon_config:
                animate.apply_visibility(
                    horizon,
                    horizon_config["Visibility"],
                    normalized_time_from_scene=normalized_time_from_scene,
                    scene_time_from_real=scene_time_from_real,
                )
            if "Contours" in horizon_config:
                for contour_config in horizon_config["Contours"]:
                    contour = pv.Contour(Input=horizon,
                                         **contour_config["Object"])
                    contour_rep = pv.Show(contour, view,
                                          **contour_config["Representation"])
                    pv.ColorBy(contour_rep, None)
                    if "Visibility" in horizon_config:
                        animate.apply_visibility(
                            contour,
                            horizon_config["Visibility"],
                            normalized_time_from_scene=
                            normalized_time_from_scene,
                            scene_time_from_real=scene_time_from_real,
                        )

    # Configure transfer functions
    if "TransferFunctions" in scene:
        for tf_config in scene["TransferFunctions"]:
            colored_field = tf_config["Field"]
            transfer_fctn = pv.GetColorTransferFunction(colored_field)
            opacity_fctn = pv.GetOpacityTransferFunction(colored_field)
            tf.configure_transfer_function(transfer_fctn, opacity_fctn,
                                           tf_config["TransferFunction"])

    # Save state file before configuring camera keyframes.
    # TODO: Make camera keyframes work with statefile
    if save_state_to_file is not None:
        pv.SaveState(save_state_to_file + ".pvsm")

    # Camera shots
    # TODO: Make this work with freezing time while the camera is swinging
    if animation is None:
        for i, shot in enumerate(scene["CameraShots"]):
            if (i == len(scene["CameraShots"]) - 1 or
                (shot["Time"] if "Time" in shot else 0.0) >= view.ViewTime):
                camera_motion.apply(shot)
                break
    else:
        camera_motion.apply_swings(
            scene["CameraShots"],
            scene_time_range=time_range_in_M,
            scene_time_from_real=scene_time_from_real,
            normalized_time_from_scene=normalized_time_from_scene,
        )

    # Report time
    if animation is not None:
        report_time_cue = pv.PythonAnimationCue()
        report_time_cue.Script = """
def start_cue(self): pass

def tick(self):
    import paraview.simple as pv
    import logging
    logger = logging.getLogger('Animation')
    scene_time = pv.GetActiveView().ViewTime
    logger.info(f"Scene time: {scene_time}")

def end_cue(self): pass
"""
        animation.Cues.append(report_time_cue)

    if show_preview and animation is not None:
        animation.PlayMode = "Real Time"
        animation.Duration = 10
        animation.Play()
        animation.PlayMode = "Sequence"

    if no_render:
        logger.info("No rendering requested. Total time:"
                    f" {time.time() - render_start_time:.2f}s")
        return

    if frames_dir is None:
        raise RuntimeError("Trying to render but `frames_dir` is not set.")
    if os.path.exists(frames_dir):
        logger.warning(
            f"Output directory '{frames_dir}' exists, files may be overwritten."
        )
    else:
        os.makedirs(frames_dir)

    if animation is None:
        pv.Render()
        pv.SaveScreenshot(os.path.join(frames_dir, "frame.png"))
    else:
        # Iterate over frames manually to support filling in missing frames.
        # If `pv.SaveAnimation` would support that, here's how it could be
        # invoked:
        # pv.SaveAnimation(
        #     os.path.join(frames_dir, 'frame.png'),
        #     view,
        #     animation,
        #     FrameWindow=frame_window,
        #     SuffixFormat='.%06d')
        # Note that `FrameWindow` appears to be buggy, so we set up the
        # `animation` according to the `frame_window` above so the frame files
        # are numberd correctly.
        for animation_window_frame_i in animation_window_frame_range:
            frame_i = frame_window[0] + animation_window_frame_i
            frame_file = os.path.join(frames_dir, f"frame.{frame_i:06d}.png")
            if render_missing_frames and os.path.exists(frame_file):
                continue
            logger.debug(f"Rendering frame {frame_i}...")
            animation.AnimationTime = (
                animation.StartTime +
                time_per_frame_in_M * animation_window_frame_i)
            pv.Render()
            pv.SaveScreenshot(frame_file)
            logger.info(f"Rendered frame {frame_i}.")

    logger.info(
        f"Rendering done. Total time: {time.time() - render_start_time:.2f}s")