예제 #1
0
    def test_XDMF_shape(self, tmp_path, single_phase):
        os.chdir(tmp_path)

        single_phase.export_XDMF()
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '.xdmf'
        reader_xdmf = vtk.vtkXdmfReader()
        reader_xdmf.SetFileName(fname)
        reader_xdmf.Update()
        dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
        bounds_xdmf = reader_xdmf.GetOutput().GetBounds()

        single_phase.view('increments', 0).export_VTK()
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '_inc00.vti'
        for i in range(10):  # waiting for parallel IO
            reader_vti = vtk.vtkXMLImageDataReader()
            reader_vti.SetFileName(fname)
            reader_vti.Update()
            dim_vti = reader_vti.GetOutput().GetDimensions()
            bounds_vti = reader_vti.GetOutput().GetBounds()
            if dim_vti == dim_xdmf and bounds_vti == bounds_xdmf:
                return
            time.sleep(.5)

        assert False
예제 #2
0
파일: mesh_io.py 프로젝트: nschloe/nosh
def read(filenames, timestep=None):
    '''Reads an unstructured mesh with added data.

    :param filenames: The files to read from.
    :type filenames: str
    :param timestep: Time step to read from, in case of an Exodus input mesh.
    :type timestep: int, optional
    :returns mesh{2,3}d: The mesh data.
    :returns point_data: Point data read from file.
    :type point_data: dict
    :returns field_data: Field data read from file.
    :type field_data: dict
    '''
    if isinstance(filenames, (list, tuple)) and len(filenames)==1:
        filenames = filenames[0]

    if isinstance(filenames, basestring):
        filename = filenames
        # serial files
        extension = os.path.splitext(filename)[1]

        import re
        # setup the reader
        # TODO Most readers have CanReadFile() -- use that.
        if extension == '.vtu':
            from vtk import vtkXMLUnstructuredGridReader
            reader = vtkXMLUnstructuredGridReader()
            vtk_mesh = _read_vtk_mesh(reader, filename)
        elif extension == '.vtk':
            from vtk import vtkUnstructuredGridReader
            reader = vtkUnstructuredGridReader()
            vtk_mesh = _read_vtk_mesh(reader, filename)
        elif extension == '.xmf':
            from vtk import vtkXdmfReader
            reader = vtkXdmfReader()
            vtk_mesh = _read_vtk_mesh(reader, filename)
        elif extension in [ '.ex2', '.exo', '.e' ]:
            from vtk import vtkExodusIIReader
            reader = vtkExodusIIReader()
            reader.SetFileName( filename )
            vtk_mesh = _read_exodusii_mesh(reader, timestep=timestep)
        elif re.match('[^\.]*\.e\.\d+\.\d+', filename):
            # Parallel Exodus files.
            # TODO handle with vtkPExodusIIReader
            from vtk import vtkExodusIIReader
            reader = vtkExodusIIReader()
            reader.SetFileName( filenames[0] )
            vtk_mesh = _read_exodusii_mesh(reader, timestep=timestep)
        else:
            raise RuntimeError( 'Unknown file type \'%s\'.' % filename )
    else:
        # Parallel files.
        # Assume Exodus format as we don't know anything else yet.
        from vtk import vtkPExodusIIReader
        # TODO Guess the file pattern or whatever.
        reader = vtkPExodusIIReader()
        reader.SetFileNames( filenames )
        vtk_mesh = _read_exodusii_mesh(reader, filename, timestep=timestep)

    return vtk_mesh
예제 #3
0
def to_vtk_files(filepath):
    reader = vtk.vtkXdmfReader()
    reader.SetFileName(filepath)
    reader.Update()

    info = reader.GetOutputInformation(0)
    timestamps = info.Get(vtk.vtkCompositeDataPipeline.TIME_STEPS())

    grid = reader.GetOutput()

    writer = vtk.vtkXMLUnstructuredGridWriter()

    dir_name = 'proteus_vtu'

    if not os.path.exists(dir_name):
        os.makedirs(dir_name)

    for index, time in enumerate(timestamps):
        reader.UpdateTimeStep(time)

        unstructured_grid = grid.GetBlock(0).GetBlock(0)

        # Clean useless data for visualization
        cell_data = unstructured_grid.GetCellData()

        cell_data.RemoveArray('CellMapL2G')
        cell_data.RemoveArray('elementMaterialTypes')

        point_data = unstructured_grid.GetPointData()

        point_data.RemoveArray('pInit')
        point_data.RemoveArray('phi_sp0')
        point_data.RemoveArray('pInc')
        point_data.RemoveArray('quantDOFs_for_clsvof0')
        point_data.RemoveArray('vof0')
        point_data.RemoveArray('nodeMaterialTypes')
        point_data.RemoveArray('NodeMapL2G')

        writer.SetInputData(unstructured_grid)
        writer.SetFileName('{}/proteus_{}.vtu'.format(dir_name, index))

        writer.Write()
예제 #4
0
    def test_XDMF_shape(self, tmp_path, single_phase):
        os.chdir(tmp_path)

        single_phase.export_XDMF()
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '.xdmf'
        reader_xdmf = vtk.vtkXdmfReader()
        reader_xdmf.SetFileName(fname)
        reader_xdmf.Update()
        dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
        bounds_xdmf = reader_xdmf.GetOutput().GetBounds()

        single_phase.view(increments=0).export_VTK(parallel=False)
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '_inc00.vti'
        reader_vti = vtk.vtkXMLImageDataReader()
        reader_vti.SetFileName(fname)
        reader_vti.Update()
        dim_vti = reader_vti.GetOutput().GetDimensions()
        bounds_vti = reader_vti.GetOutput().GetBounds()
        assert dim_vti == dim_xdmf and bounds_vti == bounds_xdmf
예제 #5
0
def read(filetype, filename):
    import vtk
    from vtk.util import numpy_support

    def _read_data(data):
        """Extract numpy arrays from a VTK data set.
        """
        # Go through all arrays, fetch data.
        out = {}
        for k in range(data.GetNumberOfArrays()):
            array = data.GetArray(k)
            if array:
                array_name = array.GetName()
                out[array_name] = numpy.copy(
                    vtk.util.numpy_support.vtk_to_numpy(array))
        return out

    def _read_cells(vtk_mesh):
        data = numpy.copy(
            vtk.util.numpy_support.vtk_to_numpy(vtk_mesh.GetCells().GetData()))
        offsets = numpy.copy(
            vtk.util.numpy_support.vtk_to_numpy(
                vtk_mesh.GetCellLocationsArray()))
        types = numpy.copy(
            vtk.util.numpy_support.vtk_to_numpy(vtk_mesh.GetCellTypesArray()))

        # `data` is a one-dimensional vector with
        # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
        # Translate it into the cells dictionary.
        cells = {}
        for vtk_type, meshio_type in vtk_to_meshio_type.items():
            # Get all offsets for vtk_type
            os = offsets[numpy.argwhere(types == vtk_type).transpose()[0]]
            num_cells = len(os)
            if num_cells > 0:
                if meshio_type == "polygon":
                    for idx_cell in range(num_cells):
                        num_pts = data[os[idx_cell]]
                        cell = data[os[idx_cell] + 1:os[idx_cell] + 1 +
                                    num_pts]
                        key = meshio_type + str(num_pts)
                        if key in cells:
                            cells[key] = numpy.vstack([cells[key], cell])
                        else:
                            cells[key] = cell
                else:
                    num_pts = data[os[0]]
                    # instantiate the array
                    arr = numpy.empty((num_cells, num_pts), dtype=int)
                    # store the num_pts entries after the offsets into the columns
                    # of arr
                    for k in range(num_pts):
                        arr[:, k] = data[os + k + 1]
                    cells[meshio_type] = arr

        return cells

    if filetype in ["vtk", "vtk-ascii", "vtk-binary"]:
        reader = vtk.vtkUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.SetReadAllNormals(1)
        reader.SetReadAllScalars(1)
        reader.SetReadAllTensors(1)
        reader.SetReadAllVectors(1)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype in ["vtu", "vtu-ascii", "vtu-binary"]:
        reader = vtk.vtkXMLUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype in ["xdmf", "xdmf2"]:
        reader = vtk.vtkXdmfReader()
        reader.SetFileName(filename)
        reader.SetReadAllColorScalars(1)
        reader.SetReadAllFields(1)
        reader.SetReadAllNormals(1)
        reader.SetReadAllScalars(1)
        reader.SetReadAllTCoords(1)
        reader.SetReadAllTensors(1)
        reader.SetReadAllVectors(1)
        reader.Update()
        vtk_mesh = reader.GetOutputDataObject(0)
    elif filetype == "xdmf3":
        reader = vtk.vtkXdmf3Reader()
        reader.SetFileName(filename)
        reader.SetReadAllColorScalars(1)
        reader.SetReadAllFields(1)
        reader.SetReadAllNormals(1)
        reader.SetReadAllScalars(1)
        reader.SetReadAllTCoords(1)
        reader.SetReadAllTensors(1)
        reader.SetReadAllVectors(1)
        reader.Update()
        vtk_mesh = reader.GetOutputDataObject(0)
    else:
        assert filetype == "exodus", "Unknown file type '{}'.".format(filename)
        reader = vtk.vtkExodusIIReader()
        reader.SetFileName(filename)
        vtk_mesh = _read_exodusii_mesh(reader)

    # Explicitly extract points, cells, point data, field data
    points = numpy.copy(
        numpy_support.vtk_to_numpy(vtk_mesh.GetPoints().GetData()))
    cells = _read_cells(vtk_mesh)

    point_data = _read_data(vtk_mesh.GetPointData())
    field_data = _read_data(vtk_mesh.GetFieldData())

    cell_data = _read_data(vtk_mesh.GetCellData())
    # split cell_data by the cell type
    cd = {}
    index = 0
    for cell_type in cells:
        num_cells = len(cells[cell_type])
        cd[cell_type] = {}
        for name, array in cell_data.items():
            cd[cell_type][name] = array[index:index + num_cells]
        index += num_cells
    cell_data = cd

    return Mesh(points,
                cells,
                point_data=point_data,
                cell_data=cell_data,
                field_data=field_data)
예제 #6
0
def main():
    colors = vtk.vtkNamedColors()
    ifn, index = get_program_parameters()

    # Prepare to read the file.
    readerVolume = vtk.vtkXdmfReader()
    readerVolume.SetFileName(ifn)
    readerVolume.Update()

    # Extract the region of interest.
    voi = vtk.vtkExtractVOI()
    voi.SetInputConnection(readerVolume.GetOutputPort())
    voi.SetVOI(0, 1023, 0, 1023, 0, 1023)
    voi.SetSampleRate(1, 1, 1)
    voi.Update()  # Necessary for GetScalarRange().
    srange = voi.GetOutput().GetScalarRange()  # Needs Update() before!
    print("Range", srange)

    # Prepare surface generation.
    contour = vtk.vtkDiscreteMarchingCubes()  # For label images.
    contour.SetInputConnection(voi.GetOutputPort())
    # contour.ComputeNormalsOn()

    print("Doing label", index)

    contour.SetValue(0, index)
    contour.Update()  # Needed for GetNumberOfPolys()!!!

    print("Done contour")

    smoother = vtk.vtkWindowedSincPolyDataFilter()
    smoother.SetInputConnection(contour.GetOutputPort())
    smoother.SetNumberOfIterations(20)  # This has little effect on the error!
    smoother.BoundarySmoothingOff()
    smoother.FeatureEdgeSmoothingOff()
    # smoother.SetFeatureAngle(120.0)
    smoother.SetPassBand(.001)  # This increases the error a lot!
    smoother.NonManifoldSmoothingOn()
    smoother.NormalizeCoordinatesOn()
    smoother.GenerateErrorScalarsOn()
    # smoother.GenerateErrorVectorsOn()
    smoother.Update()

    smoothed_polys = smoother.GetOutput()
    print(smoothed_polys)
    smoother_error = smoothed_polys.GetPointData().GetScalars()

    writer = vtk.vtkXMLDataSetWriter()
    writer.SetFileName("out.vtp")
    writer.SetInputData(smoothed_polys)
    writer.Write()

    # Find min and max z.
    se_range = smoother_error.GetRange()
    print("Smoother error range:", se_range)
    minz = se_range[0]  # min(smoother_error)
    maxz = se_range[1]  # max(smoother_error)
    if maxz > 1:
        print("Big smoother error: min/max:", minz, maxz)
    # minz = 0.3  # This way colours of different particles are comparable.
    # maxz = 1
    minz = 0.3
    maxz = 0.6

    #smoothed_polys = contour.GetOutput()

    # Create the color map.
    colorLookupTable = vtk.vtkLookupTable()
    colorLookupTable.SetTableRange(
        minz,
        maxz)  # This does nothing, use mapper.SetScalarRange(minz, maxz).
    colorLookupTable.SetHueRange(2 / 3.0, 1)
    # colorLookupTable.SetSaturationRange(0, 0)
    # colorLookupTable.SetValueRange(1, 0)
    # colorLookupTable.SetNumberOfColors(256) #256 default
    colorLookupTable.Build()

    # Calculate cell normals.
    triangleCellNormals = vtk.vtkPolyDataNormals()
    triangleCellNormals.SetInputData(smoothed_polys)
    triangleCellNormals.ComputeCellNormalsOn()
    triangleCellNormals.ComputePointNormalsOff()
    triangleCellNormals.ConsistencyOn()
    triangleCellNormals.AutoOrientNormalsOn()
    triangleCellNormals.Update()  # Creates vtkPolyData.

    mapper = vtk.vtkPolyDataMapper()
    # mapper.SetInput(smoothed_polys) # This has no normals.
    mapper.SetInputConnection(
        triangleCellNormals.GetOutputPort())  # this is better for vis;-)
    mapper.ScalarVisibilityOn()  # Show colour.
    mapper.SetScalarRange(minz, maxz)
    # mapper.SetScalarModeToUseCellData() # Contains the label eg. 31
    mapper.SetScalarModeToUsePointData(
    )  # The smoother error relates to the verts.
    mapper.SetLookupTable(colorLookupTable)

    # Take the isosurface data and create geometry.
    actor = vtk.vtkLODActor()
    actor.SetNumberOfCloudPoints(100000)
    actor.SetMapper(mapper)

    # Create the renderer.
    ren = vtk.vtkRenderer()
    ren.SetBackground(colors.GetColor3d("DimGray"))
    ren.AddActor(actor)

    # Create a window for the renderer of size 600X600
    renWin = vtk.vtkRenderWindow()
    renWin.AddRenderer(ren)
    renWin.SetSize(600, 600)

    # Set a user interface interactor for the render window.
    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renWin)

    # Start the initialization and rendering.
    iren.Initialize()
    renWin.Render()

    ren.GetActiveCamera().SetPosition(255, 255, -128)
    ren.GetActiveCamera().SetFocalPoint(255, 255, 256)
    ren.GetActiveCamera().SetViewUp(0.844464, 0.227883, 0.484716)
    ren.ResetCameraClippingRange()
    renWin.Render()

    iren.Start()
예제 #7
0
def read(filetype, filename):
    import vtk
    from vtk.util import numpy_support

    def _read_data(data):
        '''Extract numpy arrays from a VTK data set.
        '''
        # Go through all arrays, fetch data.
        out = {}
        for k in range(data.GetNumberOfArrays()):
            array = data.GetArray(k)
            if array:
                array_name = array.GetName()
                out[array_name] = vtk.util.numpy_support.vtk_to_numpy(array)

        return out

    def _read_cells(vtk_mesh):
        data = vtk.util.numpy_support.vtk_to_numpy(
            vtk_mesh.GetCells().GetData())
        offsets = vtk.util.numpy_support.vtk_to_numpy(
            vtk_mesh.GetCellLocationsArray())
        types = vtk.util.numpy_support.vtk_to_numpy(
            vtk_mesh.GetCellTypesArray())

        vtk_to_meshio_type = {
            vtk.VTK_VERTEX: 'vertex',
            vtk.VTK_LINE: 'line',
            vtk.VTK_TRIANGLE: 'triangle',
            vtk.VTK_QUAD: 'quad',
            vtk.VTK_TETRA: 'tetra',
            vtk.VTK_HEXAHEDRON: 'hexahedron',
            vtk.VTK_WEDGE: 'wedge',
            vtk.VTK_PYRAMID: 'pyramid'
        }

        # `data` is a one-dimensional vector with
        # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
        # Translate it into the cells dictionary.
        cells = {}
        for vtk_type, meshio_type in vtk_to_meshio_type.items():
            # Get all offsets for vtk_type
            os = offsets[numpy.argwhere(types == vtk_type).transpose()[0]]
            num_cells = len(os)
            if num_cells > 0:
                num_pts = data[os[0]]
                # instantiate the array
                arr = numpy.empty((num_cells, num_pts), dtype=int)
                # sort the num_pts entries after the offsets into the columns
                # of arr
                for k in range(num_pts):
                    arr[:, k] = data[os + k + 1]
                cells[meshio_type] = arr

        return cells

    if filetype == 'vtk':
        reader = vtk.vtkUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype == 'vtu':
        reader = vtk.vtkXMLUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype == 'xdmf':
        reader = vtk.vtkXdmfReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutputDataObject(0)
    elif filetype == 'exodus':
        reader = vtk.vtkExodusIIReader()
        reader.SetFileName(filename)
        vtk_mesh = _read_exodusii_mesh(reader)
    else:
        raise RuntimeError('Unknown file type \'%s\'.' % filename)

    # Explicitly extract points, cells, point data, field data
    points = vtk.util.numpy_support.vtk_to_numpy(
        vtk_mesh.GetPoints().GetData())
    cells = _read_cells(vtk_mesh)
    point_data = _read_data(vtk_mesh.GetPointData())
    cell_data = _read_data(vtk_mesh.GetCellData())
    field_data = _read_data(vtk_mesh.GetFieldData())

    return points, cells, point_data, cell_data, field_data
def process_one_chunk(filename, out_filename):
    print(f"Processing file {filename}")
    xdmf_template = "chunk_template.xdmf"
    _, basename = os.path.split(filename)
    xdmf_out = f"xdmf/{basename}.xdmf"

    copy_and_replace(xdmf_template, xdmf_out, replacement='../' + filename)

    cube = h5py.File(filename, 'r')
    neuron_ids = np.array(cube['neuron_ids'])
    cube.close()

    # Prepare to read the file.
    readerVolume = vtk.vtkXdmfReader()
    readerVolume.SetFileName(xdmf_out)
    readerVolume.Update()

    # Extract the region of interest.
    # voi = vtk.vtkExtractVOI()
    # voi.SetInputConnection(readerVolume.GetOutputPort())
    # voi.SetVOI(0, 1023, 0, 1023, 0, 1023)
    # voi.SetSampleRate(1, 1, 1)
    # voi.Update()  # Necessary for GetScalarRange().

    for index in neuron_ids:
        full_out_name = out_filename % index
        if os.path.exists(full_out_name):
            print("Skipping neuron %d" % index)
            continue
        print("Processing neuron %d" % index)

        # Prepare surface generation.
        contour = vtk.vtkDiscreteMarchingCubes()  # For label images.
        contour.SetInputConnection(readerVolume.GetOutputPort())
        contour.SetValue(0, index)
        contour.Update()  # Needed for GetNumberOfPolys()!!!

        smoother = vtk.vtkWindowedSincPolyDataFilter()
        smoother.SetInputConnection(contour.GetOutputPort())
        smoother.SetNumberOfIterations(15)
        smoother.BoundarySmoothingOff()
        smoother.FeatureEdgeSmoothingOff()
        smoother.SetPassBand(.01)
        smoother.NonManifoldSmoothingOn()
        smoother.NormalizeCoordinatesOn()
        smoother.GenerateErrorScalarsOn()
        smoother.Update()

        decimate = vtk.vtkDecimatePro()
        decimate.SetInputData(smoother.GetOutput())
        decimate.SetTargetReduction(.8)
        decimate.PreserveTopologyOn()
        decimate.BoundaryVertexDeletionOff()
        decimate.Update()

        smoothed_polys = decimate.GetOutput()

        writer = vtk.vtkXMLDataSetWriter()
        writer.SetFileName(full_out_name)
        writer.SetInputData(smoothed_polys)
        writer.Write()

    return 1
예제 #9
0
파일: vtk_io.py 프로젝트: nschloe/meshio
def read(filetype, filename):
    import vtk
    from vtk.util import numpy_support

    def _read_data(data):
        '''Extract numpy arrays from a VTK data set.
        '''
        # Go through all arrays, fetch data.
        out = {}
        for k in range(data.GetNumberOfArrays()):
            array = data.GetArray(k)
            if array:
                array_name = array.GetName()
                out[array_name] = vtk.util.numpy_support.vtk_to_numpy(array)

        return out

    def _read_cells(vtk_mesh):
        data = vtk.util.numpy_support.vtk_to_numpy(
                vtk_mesh.GetCells().GetData()
                )
        offsets = vtk.util.numpy_support.vtk_to_numpy(
                vtk_mesh.GetCellLocationsArray()
                )
        types = vtk.util.numpy_support.vtk_to_numpy(
                vtk_mesh.GetCellTypesArray()
                )

        vtk_to_meshio_type = {
            vtk.VTK_VERTEX: 'vertex',
            vtk.VTK_LINE: 'line',
            vtk.VTK_TRIANGLE: 'triangle',
            vtk.VTK_QUAD: 'quad',
            vtk.VTK_TETRA: 'tetra',
            vtk.VTK_HEXAHEDRON: 'hexahedron',
            vtk.VTK_WEDGE: 'wedge',
            vtk.VTK_PYRAMID: 'pyramid'
            }

        # `data` is a one-dimensional vector with
        # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
        # Translate it into the cells dictionary.
        cells = {}
        for vtk_type, meshio_type in vtk_to_meshio_type.items():
            # Get all offsets for vtk_type
            os = offsets[numpy.argwhere(types == vtk_type).transpose()[0]]
            num_cells = len(os)
            if num_cells > 0:
                num_pts = data[os[0]]
                # instantiate the array
                arr = numpy.empty((num_cells, num_pts), dtype=int)
                # sort the num_pts entries after the offsets into the columns
                # of arr
                for k in range(num_pts):
                    arr[:, k] = data[os+k+1]
                cells[meshio_type] = arr

        return cells

    if filetype == 'vtk':
        reader = vtk.vtkUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype == 'vtu':
        reader = vtk.vtkXMLUnstructuredGridReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutput()
    elif filetype == 'xdmf':
        reader = vtk.vtkXdmfReader()
        reader.SetFileName(filename)
        reader.Update()
        vtk_mesh = reader.GetOutputDataObject(0)
    elif filetype == 'exodus':
        reader = vtk.vtkExodusIIReader()
        reader.SetFileName(filename)
        vtk_mesh = _read_exodusii_mesh(reader)
    else:
        raise RuntimeError('Unknown file type \'%s\'.' % filename)

    # Explicitly extract points, cells, point data, field data
    points = vtk.util.numpy_support.vtk_to_numpy(
            vtk_mesh.GetPoints().GetData()
            )
    cells = _read_cells(vtk_mesh)
    point_data = _read_data(vtk_mesh.GetPointData())
    cell_data = _read_data(vtk_mesh.GetCellData())
    field_data = _read_data(vtk_mesh.GetFieldData())

    return points, cells, point_data, cell_data, field_data
예제 #10
0
class TestResult:
    def test_self_report(self, default):
        print(default)

    def test_view_all(self, default):
        a = default.view(increments=True).get('F')

        assert dict_equal(a, default.view(increments='*').get('F'))
        assert dict_equal(
            a,
            default.view(increments=default.increments_in_range(
                0,
                np.iinfo(int).max)).get('F'))

        assert dict_equal(a, default.view(times=True).get('F'))
        assert dict_equal(a, default.view(times='*').get('F'))
        assert dict_equal(
            a,
            default.view(times=default.times_in_range(0.0, np.inf)).get('F'))

    @pytest.mark.parametrize('what',
                             ['increments', 'times', 'phases', 'fields']
                             )  # ToDo: discuss homogenizations
    def test_view_none(self, default, what):
        n0 = default.view(what, False)
        n1 = default.view(what, [])

        label = 'increments' if what == 'times' else what

        assert n0.get('F') is n1.get('F') is None and \
               len(n0.visible[label]) == len(n1.visible[label]) == 0

    @pytest.mark.parametrize('what',
                             ['increments', 'times', 'phases', 'fields']
                             )  # ToDo: discuss homogenizations
    def test_view_more(self, default, what):
        empty = default.view(what, False)

        a = empty.view_more(what, '*').get('F')
        b = empty.view_more(what, True).get('F')

        assert dict_equal(a, b)

    @pytest.mark.parametrize('what',
                             ['increments', 'times', 'phases', 'fields']
                             )  # ToDo: discuss homogenizations
    def test_view_less(self, default, what):
        full = default.view(what, True)

        n0 = full.view_less(what, '*')
        n1 = full.view_less(what, True)

        label = 'increments' if what == 'times' else what

        assert n0.get('F') is n1.get('F') is None and \
               len(n0.visible[label]) == len(n1.visible[label]) == 0

    def test_view_invalid(self, default):
        with pytest.raises(AttributeError):
            default.view('invalid', True)

    def test_add_invalid(self, default):
        default.add_absolute('xxxx')

    def test_add_absolute(self, default):
        default.add_absolute('F_e')
        in_memory = np.abs(default.place('F_e'))
        in_file = default.place('|F_e|')
        assert np.allclose(in_memory, in_file)

    @pytest.mark.parametrize('mode', [
        'direct',
        pytest.param('function',
                     marks=pytest.mark.xfail(sys.platform == 'darwin',
                                             reason='n/a'))
    ])
    def test_add_calculation(self, default, tmp_path, mode):

        if mode == 'direct':
            default.add_calculation('2.0*np.abs(#F#)-1.0', 'x', '-',
                                    'my notes')
        else:
            with open(tmp_path / 'f.py', 'w') as f:
                f.write(
                    "import numpy as np\ndef my_func(field):\n  return 2.0*np.abs(field)-1.0\n"
                )
            sys.path.insert(0, str(tmp_path))
            import f
            default.enable_user_function(f.my_func)
            default.add_calculation('my_func(#F#)', 'x', '-', 'my notes')

        in_memory = 2.0 * np.abs(default.place('F')) - 1.0
        in_file = default.place('x')
        assert np.allclose(in_memory, in_file)

    def test_add_calculation_invalid(self, default):
        default.add_calculation('np.linalg.norm(#F#,axis=0)', 'wrong_dim')
        assert default.get('wrong_dim') is None

    def test_add_stress_Cauchy(self, default):
        default.add_stress_Cauchy('P', 'F')
        in_memory = mechanics.stress_Cauchy(default.place('P'),
                                            default.place('F'))
        in_file = default.place('sigma')
        assert np.allclose(in_memory, in_file)

    def test_add_determinant(self, default):
        default.add_determinant('P')
        in_memory = np.linalg.det(default.place('P'))
        in_file = default.place('det(P)')
        assert np.allclose(in_memory, in_file)

    def test_add_deviator(self, default):
        default.add_deviator('P')
        in_memory = tensor.deviatoric(default.place('P'))
        in_file = default.place('s_P')
        assert np.allclose(in_memory, in_file)

    @pytest.mark.parametrize('eigenvalue,function', [('max', np.amax),
                                                     ('min', np.amin)])
    def test_add_eigenvalue(self, default, eigenvalue, function):
        default.add_stress_Cauchy('P', 'F')
        default.add_eigenvalue('sigma', eigenvalue)
        in_memory = function(tensor.eigenvalues(default.place('sigma')),
                             axis=1)
        in_file = default.place(f'lambda_{eigenvalue}(sigma)')
        assert np.allclose(in_memory, in_file)

    @pytest.mark.parametrize('eigenvalue,idx', [('max', 2), ('mid', 1),
                                                ('min', 0)])
    def test_add_eigenvector(self, default, eigenvalue, idx):
        default.add_stress_Cauchy('P', 'F')
        default.add_eigenvector('sigma', eigenvalue)
        in_memory = tensor.eigenvectors(default.place('sigma'))[:, idx]
        in_file = default.place(f'v_{eigenvalue}(sigma)')
        assert np.allclose(in_memory, in_file)

    @pytest.mark.parametrize('d', [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    def test_add_IPF_color(self, default, d):
        default.add_IPF_color(d, 'O')
        qu = default.place('O')
        crystal_structure = qu.dtype.metadata['lattice']
        c = Orientation(rotation=qu, lattice=crystal_structure)
        in_memory = np.uint8(c.IPF_color(np.array(d)) * 255)
        in_file = default.place('IPFcolor_({} {} {})'.format(*d))
        assert np.allclose(in_memory, in_file)

    def test_add_maximum_shear(self, default):
        default.add_stress_Cauchy('P', 'F')
        default.add_maximum_shear('sigma')
        in_memory = mechanics.maximum_shear(default.place('sigma'))
        in_file = default.place('max_shear(sigma)')
        assert np.allclose(in_memory, in_file)

    def test_add_Mises_strain(self, default):
        t = ['V', 'U'][np.random.randint(0, 2)]
        m = np.random.random() * 2.0 - 1.0
        default.add_strain('F', t, m)
        label = f'epsilon_{t}^{m}(F)'
        default.add_equivalent_Mises(label)
        in_memory = mechanics.equivalent_strain_Mises(default.place(label))
        in_file = default.place(label + '_vM')
        assert np.allclose(in_memory, in_file)

    def test_add_Mises_stress(self, default):
        default.add_stress_Cauchy('P', 'F')
        default.add_equivalent_Mises('sigma')
        in_memory = mechanics.equivalent_stress_Mises(default.place('sigma'))
        in_file = default.place('sigma_vM')
        assert np.allclose(in_memory, in_file)

    def test_add_Mises_invalid(self, default):
        default.add_stress_Cauchy('P', 'F')
        default.add_calculation('#sigma#', 'sigma_y', unit='y')
        default.add_equivalent_Mises('sigma_y')
        assert default.get('sigma_y_vM') is None

    def test_add_Mises_stress_strain(self, default):
        default.add_stress_Cauchy('P', 'F')
        default.add_calculation('#sigma#', 'sigma_y', unit='y')
        default.add_calculation('#sigma#', 'sigma_x', unit='x')
        default.add_equivalent_Mises('sigma_y', kind='strain')
        default.add_equivalent_Mises('sigma_x', kind='stress')
        assert not np.allclose(default.place('sigma_y_vM'),
                               default.place('sigma_x_vM'))

    @pytest.mark.parametrize('ord', [1, 2])
    @pytest.mark.parametrize('dataset,axis', [('F', (1, 2)), ('xi_sl', (1, ))])
    def test_add_norm(self, default, ord, dataset, axis):
        default.add_norm(dataset, ord)
        in_memory = np.linalg.norm(default.place(dataset),
                                   ord=ord,
                                   axis=axis,
                                   keepdims=True)
        in_file = default.place(f'|{dataset}|_{ord}')
        assert np.allclose(in_memory, in_file)

    def test_add_stress_second_Piola_Kirchhoff(self, default):
        default.add_stress_second_Piola_Kirchhoff('P', 'F')
        in_memory = mechanics.stress_second_Piola_Kirchhoff(
            default.place('P'), default.place('F'))
        in_file = default.place('S')
        assert np.allclose(in_memory, in_file)

    @pytest.mark.parametrize('options', [{
        'uvw': [1, 0, 0],
        'with_symmetry': False
    }, {
        'hkl': [0, 1, 1],
        'with_symmetry': True
    }])
    def test_add_pole(self, default, options):
        default.add_pole(**options)
        rot = default.place('O')
        in_memory = Orientation(
            rot, lattice=rot.dtype.metadata['lattice']).to_pole(**options)
        brackets = ['[[]', '[]]'] if 'uvw' in options.keys() else [
            '(', ')'
        ]  # escape fnmatch
        label = '{}{} {} {}{}'.format(brackets[0],
                                      *(list(options.values())[0]),
                                      brackets[1])
        in_file = default.place(f'p^{label}')
        print(in_file - in_memory)
        assert np.allclose(in_memory, in_file)

    def test_add_rotation(self, default):
        default.add_rotation('F')
        in_memory = mechanics.rotation(default.place('F')).as_matrix()
        in_file = default.place('R(F)')
        assert np.allclose(in_memory, in_file)

    def test_add_spherical(self, default):
        default.add_spherical('P')
        in_memory = tensor.spherical(default.place('P'), False)
        in_file = default.place('p_P')
        assert np.allclose(in_memory, in_file)

    def test_add_strain(self, default):
        t = ['V', 'U'][np.random.randint(0, 2)]
        m = np.random.random() * 2.0 - 1.0
        default.add_strain('F', t, m)
        label = f'epsilon_{t}^{m}(F)'
        in_memory = mechanics.strain(default.place('F'), t, m)
        in_file = default.place(label)
        assert np.allclose(in_memory, in_file)

    def test_add_stretch_right(self, default):
        default.add_stretch_tensor('F', 'U')
        in_memory = mechanics.stretch_right(default.place('F'))
        in_file = default.place('U(F)')
        assert np.allclose(in_memory, in_file)

    def test_add_stretch_left(self, default):
        default.add_stretch_tensor('F', 'V')
        in_memory = mechanics.stretch_left(default.place('F'))
        in_file = default.place('V(F)')
        assert np.allclose(in_memory, in_file)

    def test_add_invalid_dataset(self, default):
        with pytest.raises(TypeError):
            default.add_calculation('#invalid#*2')

    def test_add_generic_grid_invalid(self, ref_path):
        result = Result(ref_path / '4grains2x4x3_compressionY.hdf5')
        with pytest.raises(NotImplementedError):
            result.add_curl('F')

    @pytest.mark.parametrize('shape', ['vector', 'tensor'])
    def test_add_curl(self, default, shape):
        if shape == 'vector':
            default.add_calculation('#F#[:,:,0]', 'x', '1', 'just a vector')
        if shape == 'tensor':
            default.add_calculation('#F#[:,:,:]', 'x', '1', 'just a tensor')
        x = default.place('x')
        default.add_curl('x')
        in_file = default.place('curl(x)')
        in_memory = grid_filters.curl(
            default.size, x.reshape(tuple(default.cells) +
                                    x.shape[1:])).reshape(in_file.shape)
        assert (in_file == in_memory).all()

    @pytest.mark.parametrize('shape', ['vector', 'tensor'])
    def test_add_divergence(self, default, shape):
        if shape == 'vector':
            default.add_calculation('#F#[:,:,0]', 'x', '1', 'just a vector')
        if shape == 'tensor':
            default.add_calculation('#F#[:,:,:]', 'x', '1', 'just a tensor')
        x = default.place('x')
        default.add_divergence('x')
        in_file = default.place('divergence(x)')
        in_memory = grid_filters.divergence(
            default.size, x.reshape(tuple(default.cells) +
                                    x.shape[1:])).reshape(in_file.shape)
        assert (in_file == in_memory).all()

    @pytest.mark.parametrize('shape', ['scalar', 'pseudo_scalar', 'vector'])
    def test_add_gradient(self, default, shape):
        if shape == 'pseudo_scalar':
            default.add_calculation('#F#[:,0,0:1]', 'x', '1',
                                    'a pseudo scalar')
        if shape == 'scalar':
            default.add_calculation('#F#[:,0,0]', 'x', '1', 'just a scalar')
        if shape == 'vector':
            default.add_calculation('#F#[:,:,1]', 'x', '1', 'just a vector')
        x = default.place('x').reshape((np.product(default.cells), -1))
        default.add_gradient('x')
        in_file = default.place('gradient(x)')
        in_memory = grid_filters.gradient(
            default.size, x.reshape(tuple(default.cells) +
                                    x.shape[1:])).reshape(in_file.shape)
        assert (in_file == in_memory).all()

    @pytest.mark.parametrize('overwrite', ['off', 'on'])
    def test_add_overwrite(self, default, overwrite):
        last = default.view(increments=-1)

        last.add_stress_Cauchy()

        created_first = last.place('sigma').dtype.metadata['created']
        created_first = datetime.strptime(created_first, '%Y-%m-%d %H:%M:%S%z')

        if overwrite == 'on':
            last = last.view(protected=False)
        else:
            last = last.view(protected=True)

        time.sleep(2.)
        try:
            last.add_calculation('#sigma#*0.0+311.', 'sigma',
                                 'not the Cauchy stress')
        except ValueError:
            pass

        created_second = last.place('sigma').dtype.metadata['created']
        created_second = datetime.strptime(created_second,
                                           '%Y-%m-%d %H:%M:%S%z')

        if overwrite == 'on':
            assert created_first < created_second and np.allclose(
                last.place('sigma'), 311.)
        else:
            assert created_first == created_second and not np.allclose(
                last.place('sigma'), 311.)

    @pytest.mark.parametrize('allowed', ['off', 'on'])
    def test_rename(self, default, allowed):
        if allowed == 'on':
            F = default.place('F')
            default = default.view(protected=False)
            default.rename('F', 'new_name')
            assert np.all(F == default.place('new_name'))
            default = default.view(protected=True)

        with pytest.raises(PermissionError):
            default.rename('P', 'another_new_name')

    @pytest.mark.parametrize('allowed', ['off', 'on'])
    def test_remove(self, default, allowed):
        if allowed == 'on':
            unsafe = default.view(protected=False)
            unsafe.remove('F')
            assert unsafe.get('F') is None
        else:
            with pytest.raises(PermissionError):
                default.remove('F')

    @pytest.mark.parametrize('mode', ['cell', 'node'])
    def test_coordinates(self, default, mode):
        if mode == 'cell':
            a = grid_filters.coordinates0_point(default.cells, default.size,
                                                default.origin)
            b = default.coordinates0_point.reshape(tuple(default.cells) +
                                                   (3, ),
                                                   order='F')
        elif mode == 'node':
            a = grid_filters.coordinates0_node(default.cells, default.size,
                                               default.origin)
            b = default.coordinates0_node.reshape(tuple(default.cells + 1) +
                                                  (3, ),
                                                  order='F')
        assert np.allclose(a, b)

    @pytest.mark.parametrize('output', ['F', '*', ['P'], ['P', 'F']],
                             ids=range(4))
    @pytest.mark.parametrize('fname', ['12grains6x7x8_tensionY.hdf5'],
                             ids=range(1))
    @pytest.mark.parametrize('inc', [4, 0], ids=range(2))
    @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0]) < 9,
                       reason='missing "Direction" attribute')
    def test_vtk(self, request, tmp_path, ref_path, update,
                 patch_execution_stamp, patch_datetime_now, output, fname,
                 inc):
        result = Result(ref_path / fname).view(increments=inc)
        os.chdir(tmp_path)
        result.export_VTK(output, parallel=False)
        fname = fname.split(
            '.')[0] + f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti'
        v = VTK.load(tmp_path / fname)
        v.set_comments('n/a')
        v.save(tmp_path / fname, parallel=False)
        with open(fname) as f:
            cur = hashlib.md5(f.read().encode()).hexdigest()
        if update:
            with open((ref_path / 'export_VTK' /
                       request.node.name).with_suffix('.md5'), 'w') as f:
                f.write(cur + '\n')
        with open((ref_path / 'export_VTK' /
                   request.node.name).with_suffix('.md5')) as f:
            assert cur == f.read().strip('\n')

    @pytest.mark.parametrize('mode', ['point', 'cell'])
    @pytest.mark.parametrize('output', [False, True])
    def test_vtk_marc(self, tmp_path, ref_path, mode, output):
        os.chdir(tmp_path)
        result = Result(ref_path / 'check_compile_job1.hdf5')
        result.export_VTK(output, mode)

    def test_marc_coordinates(self, ref_path):
        result = Result(ref_path /
                        'check_compile_job1.hdf5').view(increments=-1)
        c_n = result.coordinates0_node + result.get('u_n')
        c_p = result.coordinates0_point + result.get('u_p')
        assert len(c_n) > len(c_p)

    @pytest.mark.parametrize('mode', ['point', 'cell'])
    def test_vtk_mode(self, tmp_path, single_phase, mode):
        os.chdir(tmp_path)
        single_phase.export_VTK(mode=mode)

    def test_vtk_invalid_mode(self, single_phase):
        with pytest.raises(ValueError):
            single_phase.export_VTK(mode='invalid')

    def test_XDMF_datatypes(self, tmp_path, single_phase, update, ref_path):
        for shape in [('scalar', ()), ('vector', (3, )), ('tensor', (3, 3)),
                      ('matrix', (12, ))]:
            for dtype in [
                    'f4', 'f8', 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'
            ]:
                single_phase.add_calculation(
                    f"np.ones(np.shape(#F#)[0:1]+{shape[1]},'{dtype}')",
                    f'{shape[0]}_{dtype}')
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '.xdmf'
        os.chdir(tmp_path)
        single_phase.export_XDMF()
        if update:
            shutil.copy(tmp_path / fname, ref_path / fname)

        assert sorted(open(tmp_path / fname).read()) == sorted(
            open(ref_path / fname).read())  # XML is not ordered

    @pytest.mark.skipif(not (hasattr(vtk, 'vtkXdmfReader')
                             and hasattr(vtk.vtkXdmfReader(), 'GetOutput')),
                        reason='https://discourse.vtk.org/t/2450')
    def test_XDMF_shape(self, tmp_path, single_phase):
        os.chdir(tmp_path)

        single_phase.export_XDMF()
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '.xdmf'
        reader_xdmf = vtk.vtkXdmfReader()
        reader_xdmf.SetFileName(fname)
        reader_xdmf.Update()
        dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
        bounds_xdmf = reader_xdmf.GetOutput().GetBounds()

        single_phase.view(increments=0).export_VTK(parallel=False)
        fname = os.path.splitext(os.path.basename(
            single_phase.fname))[0] + '_inc00.vti'
        reader_vti = vtk.vtkXMLImageDataReader()
        reader_vti.SetFileName(fname)
        reader_vti.Update()
        dim_vti = reader_vti.GetOutput().GetDimensions()
        bounds_vti = reader_vti.GetOutput().GetBounds()
        assert dim_vti == dim_xdmf and bounds_vti == bounds_xdmf

    def test_XDMF_invalid(self, default):
        with pytest.raises(TypeError):
            default.export_XDMF()

    @pytest.mark.parametrize(
        'view,output,flatten,prune',
        [({}, ['F', 'P', 'F', 'L_p', 'F_e', 'F_p'], True, True),
         ({
             'increments': 3
         }, 'F', True, True),
         ({
             'increments': [1, 8, 3, 4, 5, 6, 7]
         }, ['F', 'P'], True, True),
         ({
             'phases': ['A', 'B']
         }, ['F', 'P'], True, True),
         ({
             'phases': ['A', 'C'],
             'homogenizations': False
         }, ['F', 'P', 'O'], True, True),
         ({
             'phases': False,
             'homogenizations': False
         }, ['F', 'P', 'O'], True, True),
         ({
             'phases': False
         }, ['Delta_V'], True, True), ({}, ['u_p', 'u_n'], False, False)],
        ids=list(range(8)))
    def test_get(self, update, request, ref_path, view, output, flatten,
                 prune):
        result = Result(ref_path / '4grains2x4x3_compressionY.hdf5')
        for key, value in view.items():
            result = result.view(key, value)

        fname = request.node.name
        cur = result.get(output, flatten, prune)
        if update:
            with bz2.BZ2File((ref_path / 'get' / fname).with_suffix('.pbz2'),
                             'w') as f:
                pickle.dump(cur, f)

        with bz2.BZ2File((ref_path / 'get' / fname).with_suffix('.pbz2')) as f:
            ref = pickle.load(f)
            assert cur is None if ref is None else dict_equal(cur, ref)

    @pytest.mark.parametrize(
        'view,output,flatten,constituents,prune',
        [({}, ['F', 'P', 'F', 'L_p', 'F_e', 'F_p'], True, True, None),
         ({
             'increments': 3
         }, 'F', True, True, [0, 1, 2, 3, 4, 5, 6, 7]),
         ({
             'increments': [1, 8, 3, 4, 5, 6, 7]
         }, ['F', 'P'], True, True, 1),
         ({
             'phases': ['A', 'B']
         }, ['F', 'P'], True, True, [1, 2]),
         ({
             'phases': ['A', 'C'],
             'homogenizations': False
         }, ['F', 'P', 'O'], True, True, [0, 7]),
         ({
             'phases': False,
             'homogenizations': False
         }, ['F', 'P', 'O'], True, True, [1, 2, 3, 4]),
         ({
             'phases': False
         }, ['Delta_V'], True, True, [1, 2, 4]),
         ({}, ['u_p', 'u_n'], False, False, None)],
        ids=list(range(8)))
    def test_place(self, update, request, ref_path, view, output, flatten,
                   prune, constituents):
        result = Result(ref_path / '4grains2x4x3_compressionY.hdf5')
        for key, value in view.items():
            result = result.view(key, value)

        fname = request.node.name
        cur = result.place(output, flatten, prune, constituents)
        if update:
            with bz2.BZ2File((ref_path / 'place' / fname).with_suffix('.pbz2'),
                             'w') as f:
                pickle.dump(cur, f)

        with bz2.BZ2File(
            (ref_path / 'place' / fname).with_suffix('.pbz2')) as f:
            ref = pickle.load(f)
            assert cur is None if ref is None else dict_equal(cur, ref)

    @pytest.mark.parametrize('fname', [
        '4grains2x4x3_compressionY.hdf5',
        '6grains6x7x8_single_phase_tensionY.hdf5'
    ])
    @pytest.mark.parametrize('output', ['material.yaml', '*'])
    @pytest.mark.parametrize('overwrite', [True, False])
    def test_export_setup(self, ref_path, tmp_path, fname, output, overwrite):
        os.chdir(tmp_path)
        r = Result(ref_path / fname)
        r.export_setup(output, overwrite)
        r.export_setup(output, overwrite)
예제 #11
0
    def operateInternal(self):
        # Access the DEFORM point and element files
        point_file = self.parameters().find('point-file').value(0)
        element_file = self.parameters().find('element-file').value(0)

        # Access the timestep to process
        timestep = self.parameters().find('timestep').value(0)

        # Access the Dream3D PipelineRunner exectuable
        pipeline_executable = self.parameters().find(
            'pipeline-executable').value(0)

        # Access the name of the attribute to use for zoning
        attribute = self.parameters().find('attribute').value(0)

        # Access the microscale statistics parameters
        stats = self.parameters().find('stats')

        # Access the Dream3D ouptut file
        output_file = self.parameters().find('output-file').value(0)

        # Create a resource and session
        resource = smtk.session.multiscale.Resource.create()
        session = smtk.session.multiscale.Session.create()
        resource.setLocation(point_file + '.smtk')
        resource.setSession(session)

        # The location of the template pipeline is hard-coded w.r.t. the AFRL
        # directory
        template_pipeline_file = AFRLDir.description.replace('\n', '') + \
            '/Dream3DPipelines/Pipelines/DREAM3D_Phase1_Pipeline.json'

        # Extract the parameters into python lists
        mu = []
        sigma = []
        min_cutoff = []
        max_cutoff = []

        for i in range(stats.numberOfGroups()):
            for p in ['mu', 'sigma', 'min_cutoff', 'max_cutoff']:
                eval(p).append(stats.find(i, p).value(0))

        # Access the Dream3D output file
        output_file = self.parameters().find('output-file').value(0)

        # Ensure that the executable is, in fact, an executable
        pipeline = Dream3DPipeline.which(pipeline_executable)
        if pipeline is None:
            print('Cannot find PipelineRunner at \'',
                  pipeline_executable, '\'')
            return self.createResult(smtk.operation.Operation.Outcome.FAILED)

        # Generate the Dream3D pipeline for this operation
        pipeline_file = \
            Dream3DPipeline.generate_pipeline(template_pipeline_file,
                                              point_file, timestep,
                                              element_file,
                                              attribute, mu, sigma,
                                              min_cutoff, max_cutoff,
                                              output_file)
        # Execute the Dream3D pipeline
        pipelineargs = [pipeline, '-p', '%s' % os.path.abspath(pipeline_file)]
        f = open('/Users/tjcorona/Desktop/out.txt', 'w')
        subprocess.call(pipelineargs, shell=True)

        # Remove the pipeline file
#        os.remove(pipeline_file)

        # Check for the resulting xdmf file
        if not os.path.isfile(output_file):
            smtk.ErrorMessage(
                smtk.io.Logger.instance(), "No DREAM3D pipeline output")
            return self.createResult(smtk.operation.Operation.Outcome.FAILED)

        # Read DREAM3D Xdmf file as a VTK data object
        xdmfReader = vtk.vtkXdmfReader()
        xdmfReader.SetFileName(os.path.splitext(output_file)[0] + '.xdmf')
        xdmfReader.Update()

        dataNames = [xdmfReader.GetOutputDataObject(0).GetMetaData(i)
                     .Get(vtk.vtkCompositeDataSet.NAME()) for i in
                     range(xdmfReader.GetNumberOfGrids())]

        volumeDataContainer = xdmfReader.GetOutputDataObject(0).GetBlock(
            dataNames.index("VolumeDataContainer"))

        # Import the vtk data object as an SMTK mesh
        cnvrt = smtk.io.vtk.ImportVTKData()
        meshResource = cnvrt(volumeDataContainer, resource.meshes(), 'ZoneIds')

        # Ensure that the import succeeded
        if not meshResource or not meshResource.isValid():
            return self.createResult(smtk.operation.Operation.Outcome.FAILED)

        # Assign its model manager to the one associated with this session
        meshResource.modelResource = resource
        meshResource.name("DEFORM mesh")

        # Construct the topology
        session.addTopology(smtk.session.mesh.Topology(meshResource))

        # Our mesh resources will already have a UUID, so here we create a model
        # given the model manager and UUID
        model = resource.insertModel(
            meshResource.entity(), 2, 2, "DEFORM model")

        # Declare the model as "dangling" so it will be transcribed
        session.declareDanglingEntity(model)

        # Set the model's session to point to the current session
        model.setSession(smtk.model.SessionRef(resource, session.sessionId()))

        meshResource.associateToModel(model.entity())

        # If we don't call "transcribe" ourselves, it never gets called.
        session.transcribe(model, smtk.model.SESSION_EVERYTHING, False)

        result = self.createResult(smtk.operation.Operation.Outcome.SUCCEEDED)

        created = result.findResource("resource")
        created.setValue(resource)

        resultModels = result.findComponent("model")
        resultModels.setValue(model.component())

        created = result.findComponent("created")
        created.setNumberOfValues(1)
        created.setValue(model.component())
        created.setIsEnabled(True)

        result.findComponent("mesh_created").setValue(model.component())

        # Return with success
        return result