def set_mesh(self, V, T, **kwargs):
        self.V = V
        self.T = T
        self._k = np.zeros(V.shape[0], dtype=np.int32)

        model_poly_data = numpy_to_vtkPolyData(V, faces.faces_to_cell_array(T))

        idFilter = vtk.vtkIdFilter()
        idFilter.SetInput(model_poly_data)
        idFilter.SetIdsArrayName('i')
        idFilter.Update()

        labelled_poly_data = idFilter.GetOutput()

        self.pipeline['color_points'].SetInput(model_poly_data)
        self.pipeline['color_faces'].SetInput(model_poly_data)
        self.pipeline['visible'].SetInput(labelled_poly_data)

        enorm = lambda V: np.sqrt(np.sum(V * V, axis=-1))
        edge_lengths = np.r_[enorm(V[T[:, 0]] - V[T[:, 1]]),
                             enorm(V[T[:, 1]] - V[T[:, 2]]),
                             enorm(V[T[:, 2]] - V[T[:, 0]])]

        length = 0.2 * np.mean(edge_lengths)
        self.pipeline['source'].SetXLength(length)
        self.pipeline['source'].SetYLength(length)
        self.pipeline['source'].SetZLength(length)

        self.pipeline['vertices_actor'].VisibilityOn()
        self.pipeline['model_actor'].VisibilityOn()

        self.pipeline['ren'].ResetCamera()

        self.update()
Esempio n. 2
0
def main():
    if not HAS_VTK:
        return

    V, T = box_model(5, 10, 1.0, 1.0)
    poly_data = numpy_to_vtkPolyData(V, faces_to_cell_array(T))
    view_vtkPolyData(poly_data)
Esempio n. 3
0
    def get_arap_solver(self):
        if not hasattr(self, '_adjW'):
            self._adjW = weights(self._s.V,
                                 faces.faces_to_cell_array(self.T),
                                 weights_type='uniform')

        adj, W = self._adjW
        return ARAPVertexSolver(adj, W, self._s.V.copy())
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input', type=str)
    parser.add_argument('-c', dest='camera_actions', type=str, default=[],
                        action='append', help='camera actions')

    args = parser.parse_args()
    z = np.load(args.input)

    vis = VisualiseMesh()

    # V0 (purple)
    V0 = z['V0']
    vis.add_mesh(V0, z['T'], actor_name='V0')
    lut = vis.actors['V0'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(255, 0, 255))

    # V0 w/ X (green)
    T_ = faces_to_cell_array(z['T'])
    adj, W = weights.weights(V0, T_, weights_type='cotan')
    solveV0_X = ARAPVertexSolver(adj, W, V0)

    rotM = lambda x: quat.rotationMatrix(quat.quat(x))
    V0_X = solveV0_X(map(rotM, z['X']))
    V0_X += register.displacement(V0_X, z['V'])

    vis.add_mesh(V0_X, z['T'], actor_name='V0_X')
    lut = vis.actors['V0_X'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(0, 255, 0))

    # V0 w/ Xg (yellow)
    if 'Xg' in z.keys():
        Xg = z['Xg'][0]
        qg = quat.quat(Xg)
        Q = [quat.quatMultiply(quat.quat(x), qg) for x in z['X']]
        V0_Xg = solveV0_X([quat.rotationMatrix(q) for q in Q])
        V0_Xg += register.displacement(V0_Xg, z['V'])
        vis.add_mesh(V0_Xg, z['T'], actor_name='V0_Xg')
        lut = vis.actors['V0_Xg'].GetMapper().GetLookupTable()
        lut.SetTableValue(0, *int2dbl(255, 255, 0))

    # V (blue)
    vis.add_mesh(z['V'], z['T'], actor_name='V')

    # input frame
    vis.add_image(z['input_frame'])

    # projection constraints
    vis.add_projection(z['C'], z['P'])

    # apply camera actions sequentially
    for action in args.camera_actions:
        method, tup, save_after = parse_camera_action(action)
        print '%s(*%s), save_after=%s' % (method, tup, save_after)
        vis.camera_actions((method, tup))

    vis.execute()
    def _add_sectioned_arap(self):
        if self.core_V is None:
            return

        # calculate `Xi`
        ki = self['ki']
        Xb = self['Xb']
        y = self['y']
        X = self['X']

        N = self.core_V.shape[0]
        Xi = np.zeros((N, 3), dtype=np.float64)

        iter_ki = iter(ki)

        for i in xrange(N):
            n = next(iter_ki)
            if n == 0:
                pass
            elif n < 0:
                Xi[i, :] = X[next(iter_ki)]
            else:
                yi, Xbi = [], []
                for j in xrange(n):
                    Xbi.append(Xb[next(iter_ki)])
                    yi.append(y[next(iter_ki)])

                # FIXME Potential wrong order and will need to check when
                # multiple local basis rotations
                Xi[i, :] = reduce(add, map(mul, yi, Xbi))
                    
        # setup `solve_V`
        T = self['T'] 
        core_V = self.core_V.copy()
        adj, W = weights(core_V, faces_to_cell_array(T), weights_type='uniform')

        solve_V = ARAPVertexSolver(adj, W, core_V)

        # solve for `V1`
        rotM = lambda x: rotationMatrix(quat(x))
        V1 = solve_V(map(lambda x: rotM(x), Xi))

        # apply global rotation and scale
        A = self['s'] * rotM(np.ravel(self['Xg']))

        V1 = np.dot(V1, np.transpose(A))

        # register by translation to the instance vertices
        V1 += register.displacement(V1, self[self.vertices_key])

        # add as orange actor
        self.vis.add_mesh(V1, T, actor_name='arap', 
                          is_base=False, 
                          compute_normals=self.compute_normals, 
                          color=(255, 170, 0))
    def _add_deprecated_sectioned_arap(self):
        if self.core_V is None:
            return

        # calculate `Xi`
        K = self['K']
        Xb = self['Xb']
        X = self['X']
        y = self['y']

        N = self.core_V.shape[0]
        Xi = np.zeros((N, 3), dtype=np.float64)
        for i in xrange(N):
            if K[i, 0] == 0:
                pass
            elif K[i, 0] < 0:
                Xi[i,:] = X[K[i, 1]]
            else:
                Xi[i,:] = axScale(y[K[i, 0] - 1], Xb[K[i, 1]])

        # setup `solve_V`
        T = self['T'] 
        core_V = self.core_V.copy()
        adj, W = weights(core_V, faces_to_cell_array(T), weights_type='uniform')

        solve_V = ARAPVertexSolver(adj, W, core_V)

        # solve for `V1`
        rotM = lambda x: rotationMatrix(quat(x))
        V1 = solve_V(map(lambda x: rotM(x), Xi))

        # apply global rotation and scale
        A = self['s'] * rotM(np.ravel(self['Xg']))

        V1 = np.dot(V1, np.transpose(A))

        # register by translation to the instance vertices
        V1 += register.displacement(V1, self[self.vertices_key])

        # add as orange actor
        self.vis.add_mesh(V1, T, actor_name='arap', 
                          is_base=False, 
                          color=(255, 170, 0))
    def _add_regular_arap(self):
        if self['y'] is not None or self.core_V is None:
            return

        T = self['T'] 
        core_V = self.core_V.copy()
        adj, W = weights(core_V, faces_to_cell_array(T), weights_type='uniform')

        solve_V = ARAPVertexSolver(adj, W, core_V)

        rotM = lambda x: rotationMatrix(quat(x))
        V1 = solve_V(map(lambda x: rotM(x), self['X']))

        xg = np.ravel(self['Xg'])
        A = self['s'] * rotM(xg)

        V1 = np.dot(V1, np.transpose(A))
        V1 += register.displacement(V1, self[self.vertices_key])

        self.vis.add_mesh(V1, T, actor_name='arap', is_base=False)

        lut = self.vis.actors['arap'].GetMapper().GetLookupTable()
        lut.SetTableValue(0, 1., 0.667, 0.)
    def _add_basis_arap(self):
        if self.core_V is None:
            return

        X = self['X']
        y = self['y']
        if not isinstance(y, np.ndarray) or len(X) != y.shape[0]:
            return

        T = self['T'] 
        core_V = self.core_V.copy()
        adj, W = weights(core_V, faces_to_cell_array(T), weights_type='uniform')

        solve_V = ARAPVertexSolver(adj, W, core_V)

        scaled_X = []
        for y_, X_ in izip(y, X):
            scaled_X.append(map(lambda x: axScale(y_, x), X_))

        N = core_V.shape[0]
        X = []
        for i in xrange(N):
            X.append(reduce(axAdd, [X_[i] for X_ in scaled_X]))

        rotM = lambda x: rotationMatrix(quat(x))
        V1 = solve_V(map(lambda x: rotM(x), X))

        xg = np.ravel(self['Xg'])
        A = self['s'] * rotM(xg)

        V1 = np.dot(V1, np.transpose(A))

        V1 += register.displacement(V1, self[self.vertices_key])

        self.vis.add_mesh(V1, T, actor_name='arap', is_base=False)
        lut = self.vis.actors['arap'].GetMapper().GetLookupTable()
        lut.SetTableValue(0, 1., 0., 1.)
def test_vtkInteractorStyleRubberBandPick():
    ren = vtk.vtkRenderer()
    ren.SetBackground(1.0, 1.0, 1.0)

    V, T = box_model(5, 10, 1.0, 1.0)
    poly_data = numpy_to_vtkPolyData(V, faces.faces_to_cell_array(T))

    idFilter = vtk.vtkIdFilter()
    idFilter.SetInput(poly_data)
    idFilter.SetIdsArrayName('i')
    idFilter.Update()

    poly_data = idFilter.GetOutput()

    mapper = vtk.vtkPolyDataMapper()
    mapper.SetInput(poly_data)
    mapper.SetScalarVisibility(False)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)
    actor.GetProperty().SetColor(0.0, 0.6, 0.3)

    ren.AddActor(actor)

    visible = vtk.vtkSelectVisiblePoints()
    visible.SetInput(poly_data)
    visible.SetRenderer(ren)

    # highlight
    sphere = vtk.vtkSphereSource()
    sphere.SetRadius(0.2)

    highlight_poly_data = vtk.vtkPolyData()

    highlight_glyph = vtk.vtkGlyph3D()
    highlight_glyph.SetInput(highlight_poly_data)
    highlight_glyph.SetSourceConnection(sphere.GetOutputPort())

    highlight_mapper = vtk.vtkPolyDataMapper()
    highlight_mapper.SetInputConnection(highlight_glyph.GetOutputPort())

    highlight_actor = vtk.vtkActor()
    highlight_actor.SetMapper(highlight_mapper)
    highlight_actor.GetProperty().SetColor(1.0, 0.0, 0.0)
    highlight_actor.VisibilityOff()
    highlight_actor.PickableOff()
    ren.AddActor(highlight_actor)

    # render window
    render_window = vtk.vtkRenderWindow()
    render_window.AddRenderer(ren)
    render_window.SetSize(400, 400)

    picker = vtk.vtkAreaPicker()

    def pickCallback(obj, event):
        props = obj.GetProp3Ds()
        if props.GetNumberOfItems() <= 0:
            return

        extract_geometry = vtk.vtkExtractGeometry()
        extract_geometry.SetImplicitFunction(picker.GetFrustum())
        extract_geometry.SetInput(props.GetLastProp3D().GetMapper().GetInput())
        extract_geometry.Update()

        unstructured_grid = extract_geometry.GetOutput()

        if unstructured_grid.GetPoints().GetNumberOfPoints() <= 0:
            return

        visible.Update()
        if visible.GetOutput().GetPoints().GetNumberOfPoints() <= 0:
            return

        i = np.intersect1d(
            vtk_to_numpy(unstructured_grid.GetPointData().GetArray('i')),
            vtk_to_numpy(visible.GetOutput().GetPointData().GetArray('i')))

        if i.shape[0] <= 0:
            return

        vtk_points = vtk.vtkPoints()
        vtk_points.SetNumberOfPoints(i.shape[0])
        vtk_points_data = vtk_to_numpy(vtk_points.GetData())
        vtk_points_data.flat = np.require(V[i], np.float32, 'C')

        highlight_poly_data.SetPoints(vtk_points)
        highlight_actor.VisibilityOn()

    picker.AddObserver('EndPickEvent', pickCallback)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(render_window)
    iren.SetPicker(picker)

    ren.ResetCamera()

    render_window.Render()

    style = vtk.vtkInteractorStyleRubberBandPick()
    style.SetCurrentRenderer(ren)
    iren.SetInteractorStyle(style)

    iren.Initialize()
    iren.Start()
Esempio n. 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input', type=str)
    parser.add_argument('-c',
                        dest='camera_actions',
                        type=str,
                        default=[],
                        action='append',
                        help='camera actions')
    parser.add_argument('--mean_centre', action='store_true', default=False)

    args = parser.parse_args()
    print 'args.input:', args.input
    files = filter(lambda s: s.endswith('.npz'), os.listdir(args.input))

    def sort_key(filename):
        root, ext = os.path.splitext(filename)
        try:
            return int(root)
        except ValueError:
            return -1

    files = map(lambda f: os.path.join(args.input, f),
                sorted(files, key=sort_key))

    print 'input:'
    pprint(files)

    # `V0` taken from first frame
    print 'V0 <- ', files[0]
    z = np.load(files[0])
    V0 = z['V']
    if args.mean_centre:
        V0 -= np.mean(V0, axis=0)
    T = z['T']

    vis = VisualiseMesh()

    # V0 (purple)
    vis.add_mesh(V0, T, actor_name='V0')
    lut = vis.actors['V0'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(255, 0, 255))

    # setup solveV0_X
    T_ = faces_to_cell_array(z['T'])
    adj, W = weights.weights(V0, T_, weights_type='cotan')
    solveV0_X = ARAPVertexSolver(adj, W, V0)

    # setup color map
    cmap = cm.jet(np.linspace(0., 1., len(files) - 1))

    # function to translate axis-angle to rotation matrices
    rotM = lambda x: quat.rotationMatrix(quat.quat(x))

    # add each actor
    for i, file_ in enumerate(files[1:]):
        # load instance file
        print 'V0_X[%d] <- ' % i, file_
        z = np.load(file_)

        # output scale and global rotation (axis-angle)
        def safe_print(var):
            try:
                v = z[var].squeeze()
            except KeyError:
                return

            print ' `%s`:' % var, np.around(v, decimals=3)

        safe_print('s')
        safe_print('Xg')

        # solve for the new coordinates and mean centre
        V0_X = solveV0_X(map(rotM, z['X']))
        if args.mean_centre:
            V0_X -= np.mean(V0_X, axis=0)

        # add actor and adjust lookup table for visualisation
        actor_name = 'V0_X_%d' % i
        vis.add_mesh(V0_X, z['T'], actor_name=actor_name)

        lut = vis.actors[actor_name].GetMapper().GetLookupTable()
        lut.SetTableValue(0, *cmap[i, :3])

    # apply camera actions sequentially
    for action in args.camera_actions:
        method, tup, save_after = parse_camera_action(action)
        print '%s(*%s), save_after=%s' % (method, tup, save_after)
        vis.camera_actions((method, tup))

    vis.execute()

    return

    # V0 w/ X (green)

    V0_X = solveV0_X([quat.rotationMatrix(quat.quat(x)) for x in z['X']])
    vis.add_mesh(V0_X, z['T'], actor_name='V0_X')
    lut = vis.actors['V0_X'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(0, 255, 0))

    # V0 w/ Xg (yellow)
    qg = quat.quat(z['Xg'][0])
    Q = [quat.quatMultiply(quat.quat(x), qg) for x in z['X']]
    V0_Xg = solveV0_X([quat.rotationMatrix(q) for q in Q])
    vis.add_mesh(V0_Xg, z['T'], actor_name='V0_Xg')
    lut = vis.actors['V0_Xg'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(255, 255, 0))

    # V (blue)
    vis.add_mesh(z['V'], z['T'], actor_name='V')

    # input frame
    vis.add_image(z['input_frame'])

    # projection constraints
    vis.add_projection(z['C'], z['P'])
Esempio n. 11
0
def main():
    # Input
    lambdas = np.r_[1e0, 1e0].astype(np.float64)
    V, T = box_model(3, 20, 1.0, 1.0)

    # Setup `solve_V_X`
    adj, W = weights.weights(V,
                             faces.faces_to_cell_array(T),
                             weights_type='cotan')
    solve_V_X = ARAPVertexSolver(adj, W, V)

    # Setup `solve_single`
    def solve_single(C, P):
        X = np.zeros_like(V)
        Xg = np.zeros((1, 3), dtype=np.float64)
        s = np.ones((1, 1), dtype=np.float64)
        V1 = V.copy()

        status = solve_single_rigid_arap_proj(V,
                                              T,
                                              X,
                                              Xg,
                                              s,
                                              V1,
                                              C,
                                              P,
                                              lambdas,
                                              uniformWeights=False,
                                              maxIterations=100,
                                              updateThreshold=1e-6,
                                              gradientThreshold=1e-6,
                                              improvementThreshold=1e-6)

        return X, V1

    # Setup `C`, `P1` and `P2`
    heights = np.unique(V[:, 2])

    C1 = np.argwhere(V[:, 2] >= heights[-3]).flatten()
    L1 = C1.shape[0]
    C2 = np.argwhere(V[:, 2] <= heights[2]).flatten()
    L2 = C2.shape[0]
    C = np.r_[C1, C2]

    P1 = V[C, :2].copy()
    P1[:L1, 1] += 10.0

    P2 = V[C, :2].copy()
    P2[:L1, 0] += 10.0

    # Solve for `X1` and `X2`
    X1, V1 = solve_single(C, P1)
    X2, V2 = solve_single(C, P2)

    # Setup `V_from_X1_X2` which "interpolates" `X1` and `X2`
    def V_from_X1_X2(a, b):
        X12 = map(lambda x: axis_angle.axMakeInterpolated(a, x[0], b, x[1]),
                  izip(X1, X2))

        V12 = solve_V_X(map(lambda x: quat.rotationMatrix(quat.quat(x)), X12))

        # Register bottom fixed layers
        A, d = right_multiply_rigid_transform(V12[C[L1:]], V[C[L1:]])
        V12 = np.dot(V12, A) + d

        return V12

    # Visualise the interpolation of the distortions
    t = np.linspace(-2., 2., 5, endpoint=True)
    N = t.shape[0] * t.shape[0]
    cmap = cm.jet(np.linspace(0., 1., N, endpoint=True))

    vis = VisualiseMesh()

    for i, (u, v) in enumerate(product(t, t)):
        actor_name = 'V12_%d' % i

        V12 = V_from_X1_X2(u, v)
        vis.add_mesh(V12, T, actor_name=actor_name)

        # Change color of actor
        lut = vis.actors[actor_name].GetMapper().GetLookupTable()
        lut.SetTableValue(0, *cmap[i, :3])

        # Change actor to dense surface (instead of default wireframe)
        vis.actor_properties(actor_name, ('SetRepresentation', (3, )))

    vis.camera_actions(('SetParallelProjection', (True, )))
    vis.execute(magnification=4)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input', type=str)
    parser.add_argument('aux_scales', type=str)
    parser.add_argument('-c',
                        dest='camera_actions',
                        type=str,
                        default=[],
                        action='append',
                        help='camera actions')
    parser.add_argument('-o',
                        dest='output_directory',
                        type=str,
                        default=None,
                        help='output directory')
    parser.add_argument('--rotations_index',
                        type=int,
                        default=None,
                        help='rotations index')
    parser.add_argument('--magnification',
                        type=int,
                        default=1,
                        help='magnification')
    parser.add_argument('--rigidly_register',
                        action='store_true',
                        default=False)
    parser.add_argument('--normalise_rotations',
                        action='store_true',
                        default=False)

    args = parser.parse_args()

    print ctime()
    print 'args:'
    pprint(args.__dict__)

    z = np.load(args.input)

    vis = VisualiseMesh()

    # V0 (purple)
    V0 = z['V']
    vis.add_mesh(V0, z['T'], actor_name='V0')
    lut = vis.actors['V0'].GetMapper().GetLookupTable()
    lut.SetTableValue(0, *int2dbl(255, 0, 255))

    # setup `solveV0_X`
    T_ = faces_to_cell_array(z['T'])
    adj, W = weights.weights(V0, T_, weights_type='cotan')
    solveV0_X = ARAPVertexSolver(adj, W, V0)

    # load X
    base_X = z['X']
    if args.rotations_index is not None:
        base_X = base_X[args.rotations_index]
    if args.normalise_rotations:
        max_ = np.amax(np.sqrt(np.sum(base_X * base_X, axis=1)))
        base_X /= max_

    # show additional scales
    aux_scales = eval(args.aux_scales)
    print 'aux_scales: ', aux_scales

    rotM = lambda x: quat.rotationMatrix(quat.quat(x))

    N = len(aux_scales)
    cmap = cm.jet(np.linspace(0., 1., N, endpoint=True))

    for i, scale in enumerate(aux_scales):
        print 'scale:', scale
        X = map(lambda x: axScale(scale, x), base_X)
        V0_X = solveV0_X(map(rotM, X))

        if args.rigidly_register:
            A, d = right_multiply_rigid_uniform_scale_transform(V0_X, V0)
            V0_X = np.dot(V0_X, A) + d

        actor_name = 'V0_X_%d' % i
        print 'actor_name:', actor_name
        vis.add_mesh(V0_X, z['T'], actor_name=actor_name)
        lut = vis.actors[actor_name].GetMapper().GetLookupTable()
        lut.SetTableValue(0, *cmap[i, :3])
        vis.actor_properties(actor_name, ('SetRepresentation', (3, )))

    # input frame
    try:
        input_frame = z['input_frame']
    except KeyError:
        pass
    else:
        vis.add_image(input_frame)

    # is visualisation interface or to file?
    interactive_session = args.output_directory is None

    # setup output directory
    if not interactive_session and not os.path.exists(args.output_directory):
        print 'Creating directory: ', args.output_directory
        os.makedirs(args.output_directory)

    # n is the index for output files
    n = count(0)

    # apply camera actions sequentially
    for action in args.camera_actions:
        method, tup, save_after = parse_camera_action(action)
        print '%s(*%s), save_after=%s' % (method, tup, save_after)

        vis.camera_actions((method, tup))

        # save if required
        if not interactive_session and save_after:
            full_path = os.path.join(args.output_directory, '%d.png' % next(n))
            print 'Output: ', full_path
            vis.write(full_path, magnification=args.magnification)

    # show if interactive
    if interactive_session:
        print 'Interactive'
        vis.execute(magnification=args.magnification)