def __init__(self, sensor_type="OrbbecAstra", sensor_scale=1.0): app.Canvas.__init__(self, keys='interactive', size=(1280, 1024)) self.title = 'RGBDMultiView' self.quaternion = Quaternion() # for mouse movement self.cam_params = GetCameraParameters(sensor_type, sensor_scale) self.fov = 30 ps = self.pixel_scale self.playbackSpeed = 1 # view and model control variables self.tshift = 0.05 # the amount to translate by self.rshift = 0.05 # amount to increase rotation by self.scaleFactor = 1000 # the amount to scale the input model by self.default_camera_view() self.view = np.dot(self.rotMat, self.transMat) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.GL_progs = {} # name : data for a single frame self.apply_zoom() gloo.set_state('translucent', clear_color='gray') self.timer = app.Timer('auto', connect=self.on_timer, start=False)
def arcball(x, y, w, h): r = (w + h) / 2.0 x = -(2.0 * x - w) / r y = (2.0 * y - h) / r h = math.hypot(x, y) if h > 1: return Quaternion(0, x / h, y / h, 0) else: return Quaternion(0, x, y, math.sqrt(1 - h**2))
def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = BoxVisual(1.0, 0.5, 0.25, color='red', edge_color='black') self.cube.transform = transforms.MatrixTransform() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.quaternion = Quaternion() self.show()
def sample_quat(rx, ry, rz, degrees=False): """ Classmethod to create a quaternion given the euler angles. """ if degrees: rx, ry, rz = np.radians([rx, ry, rz]) # Obtain quaternions qx = Quaternion(np.cos(rx / 2), np.sin(rx / 2), 0, 0) qy = Quaternion(np.cos(ry / 2), 0, np.sin(ry / 2), 0) qz = Quaternion(np.cos(rz / 2), 0, 0, np.sin(rz / 2)) # Almost done return qx * qy * qz
def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.cube.transform.matrix = self.quaternion.get_matrix() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.update()
def test_quaternion2euler(qtbot): """Test quaternion to euler angle conversion.""" # Test roundtrip degrees angles = (12, 53, 92) q = Quaternion.create_from_euler_angles(*angles, degrees=True) ea = quaternion2euler(q, degrees=True) np.testing.assert_allclose(ea, angles) # Test roundtrip radians angles = (0.1, -0.2, 1.2) q = Quaternion.create_from_euler_angles(*angles) ea = quaternion2euler(q) np.testing.assert_allclose(ea, angles)
def transformCube(self, t, q): quaternion = Quaternion(*q) self._transform.reset() self._transform.matrix = quaternion.get_matrix() self._transform.translate(t) x, y, z = t self._xPos = np.append(self._xPos, x) self._yPos = np.append(self._yPos, y) self._zPos = np.append(self._zPos, z) self._plot.set_data((self._xPos.transpose(), self._yPos.transpose(), self._zPos.transpose()), symbol=None)
def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.rotMat = self.quaternion.get_matrix() self.view = np.dot(self.rotMat, self.transMat) for k, prog in self.GL_progs.items(): prog['u_view'] = self.view prog['u_model'] = self.model self.update()
def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = CubeVisual((1.0, 0.5, 0.25), color='red', edge_color='black') self.quaternion = Quaternion() # Create a TransformSystem that will tell the visual how to draw self.cube_transform = transforms.AffineTransform() self.cube_transform.scale((100, 100, 0.001)) self.cube_transform.translate((200, 200)) self.tr_sys = transforms.TransformSystem(self) self.tr_sys.visual_to_document = self.cube_transform self.show()
class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = CubeVisual((1.0, 0.5, 0.25), color='red', edge_color='black') self.quaternion = Quaternion() # Create a TransformSystem that will tell the visual how to draw self.cube_transform = transforms.AffineTransform() self.cube_transform.scale((100, 100, 0.001)) self.cube_transform.translate((200, 200)) self.tr_sys = transforms.TransformSystem(self) self.tr_sys.visual_to_document = self.cube_transform self.show() def on_draw(self, event): gloo.set_viewport(0, 0, *self.physical_size) gloo.clear('white') self.tr_sys.auto_configure() self.cube.draw(self.tr_sys) def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.cube_transform.matrix = self.quaternion.get_matrix() self.cube_transform.scale((100, 100, 0.001)) self.cube_transform.translate((200, 200)) self.update()
class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = BoxVisual(1.0, 0.5, 0.25, color='red', edge_color='black') self.cube.transform = transforms.MatrixTransform() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.quaternion = Quaternion() self.show() def on_resize(self, event): vp = (0, 0, self.physical_size[0], self.physical_size[1]) self.context.set_viewport(*vp) self.cube.transforms.configure(canvas=self, viewport=vp) def on_draw(self, event): self.context.clear('white') self.cube.draw() def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.cube.transform.matrix = self.quaternion.get_matrix() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.update()
class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = CubeVisual((1.0, 0.5, 0.25), color='red', edge_color='black') self.cube.transform = transforms.MatrixTransform() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.quaternion = Quaternion() self.show() def on_resize(self, event): vp = (0, 0, self.physical_size[0], self.physical_size[1]) self.context.set_viewport(*vp) self.cube.transforms.configure(canvas=self, viewport=vp) def on_draw(self, event): self.context.clear('white') self.cube.draw() def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.cube.transform.matrix = self.quaternion.get_matrix() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.update()
def __init__(self, path_to_script = None): """Standard __init__ method. Parameters ---------- path_to_script : str path to script Attributes ---------- command_series : list list of script commands end: int max frame command_list: list list of dictionaries with each element reprenting one operation. Keys are: 'start': int start from of operation 'end': int start from of operation 'operation': str type of operation, e.g. 'zoom', 'rotate' 'params': list list of parameters needed for operation e.g [2] for 'zoom' states_dict : list list of dictionaries defining napari viewer states for each frame. Dictionaries have keys: 'frame': int, frame 'rotate': vispy quaternion, camera rotation 'translate': tuple, camera center 'zoom': float, camera zooming factor 'vis': list of booleans, visibility of layers 'time': int, time-point key_frames: list same as states_dict but only elements containing a change are conserved """ if path_to_script is None: raise TypeError('You need to pass a string with a path to a script') else: self.path_to_script = path_to_script #instantiate Quaternion object self.q = Quaternion()
def update(ev): global y, p, tx, ty, tz, q1, q2, signx, signy, signz q1 = Quaternion.create_from_axis_angle(y, 0, 0, 1, degrees=True) q2 = Quaternion.create_from_axis_angle(p, 0, 1, 0, degrees=True) q = q1 * q2 mp.transformCube((tx, ty, tz), (q.w, q.x, q.y, q.z)) mp.update() y += 1 p += 3 tx += signx * 0.08 ty += signy * 0.05 tz += signy * 0.02 if abs(tx) > 3: signx *= -1 if abs(ty) > 3: signy *= -1 if abs(tz) > 3: signz *= -1
def test_quaternion2euler(angles, degrees): """Test quaternion to euler angle conversion.""" # Test for degrees q = Quaternion.create_from_euler_angles(*angles, degrees) ea = quaternion2euler(q, degrees=degrees) q_p = Quaternion.create_from_euler_angles(*ea, degrees=degrees) # We now compare the corresponding quaternions ; they should be equals or opposites (as they're already unit ones) q_values = np.array([q.w, q.x, q.y, q.z]) q_p_values = np.array([q_p.w, q_p.x, q_p.y, q_p.z]) nn_zero_ind = np.argmax((q_values != 0) & (q_p_values != 0)) q_values *= np.sign(q_values[nn_zero_ind]) q_p_values *= np.sign(q_p_values[nn_zero_ind]) np.testing.assert_allclose(q_values, q_p_values)
def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.model = self.quaternion.get_matrix() self.program['u_model'] = self.model self.update() elif event.button == 2 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos self.translate += (y1 - y0) self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate), dtype=np.float32) self.program['u_view'] = self.view self.update()
def __init__(self, pdbdata, mode='cpk'): #Startup app.Canvas.__init__(self, keys='interactive', size=(W, H)) #Loading shaders self.program = gloo.Program(vertex, fragment) #Analyze pdb file self.parser = PDBParser(QUIET=True, PERMISSIVE=True) self.structure = self.parser.get_structure('model', pdbdata) #Mode selection if mode not in Canvas.visualization_modes: raise Exception('Not recognized visualization mode %s' % mode) self.mode = mode #Get the data of our atoms self.atom_information() #Camera settings self.translate = max(abs(np.concatenate(self.coordinates))) + 40 self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate), dtype=np.float32) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.program['u_projection'] = self.projection self.quaternion = Quaternion() #Load data depending on the mode self.apply_zoom() self.load_data() #self.lines = visuals.LinePlotVisual(self.chain_coords[1], width= 5.0, marker_size= 0.0, color=self.chain_colors[1]) gloo.set_state(depth_test=True, clear_color='white') self.show()
def __init__(self, rview, zview=None, zoom_speed=0.1, move_speed=0.01): """ Constructor Parameters ---------- rview : float Radius of inscribed sphere within the field of view. zview : float Distance to the z-clip plane from the origin. zoom_speed : float How sensitive the field of view to mouse wheel rolling. move_speed : float How sensitive the translation of view to mouse dragging. """ self.rview = rview self.zview = zview if zview else rview self.zoom_speed = zoom_speed self.move_speed = move_speed self._translation = (0.0, 0.0, 0.0) self._quaternion = Quaternion()
def __init__(self): app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400)) self.cube = CubeVisual((1.0, 0.5, 0.25), color='red', edge_color='black') self.cube.transform = transforms.MatrixTransform() self.cube.transform.scale((100, 100, 0.001)) self.cube.transform.translate((200, 200)) self.quaternion = Quaternion() self.show()
def set_view(self, view): """(Re-)set camera position. Parameters ---------- view : XY | XZ | YZ """ if isinstance(view, Quaternion): q = view elif view == 'XY': q = Quaternion(w=1, x=.4, y=0, z=0) elif view == 'XZ': q = Quaternion(w=1, x=-.4, y=0, z=0) elif view == 'YZ': q = Quaternion(w=.6, x=0.5, y=0.5, z=-.4) else: raise TypeError(f'Unable to set view from {type(view)}') self.camera3d._quaternion = q # This is necessary to force a redraw self.camera3d.set_range()
def handle_mouse(self, last_pos, cur_pos): va = _get_arcball_vector(*cur_pos, *self.size) vb = _get_arcball_vector(*last_pos, *self.size) angle = min(np.arccos(min(1.0, np.dot(va, vb))) * self.rotate_speed, self.max_speed) axis_in_camera_coord = np.cross(va, vb) cam_to_world = self.view_mat()[:3, :3].T axis_in_world_coord = cam_to_world.dot(axis_in_camera_coord) rotation_quat = Quaternion.create_from_axis_angle(angle, *axis_in_world_coord) self.position = rotation_quat.rotate_point(self.position)
def __init__(self, pdbdata, mode='cpk'): #Startup app.Canvas.__init__(self, keys='interactive', size=(W, H)) #Loading shaders self.program = gloo.Program(vertex, fragment) #Analyze pdb file self.parser = PDBParser(QUIET=True, PERMISSIVE=True) self.structure = self.parser.get_structure('model', pdbdata) #DSSP prediction self.pmodel = self.structure[0] self.dssp = DSSP(self.pmodel, pdbdata) #Mode selection if mode not in Canvas.visualization_modes: raise Exception('Not recognized visualization mode %s' % mode) self.mode = mode #Camera settings self.translate = 50 self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate), dtype=np.float32) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.program['u_projection'] = self.projection self.quaternion = Quaternion() #Load data depending on the mdoe self.apply_zoom() self.atom_information() self.load_data() self.show()
def getOrientation(self): if self.QuadState == None: # if not connected to gs-control.py, being run as stand alone script self.orientation_quat = ( vis_util.sample_quat(0, 0.5, 0, degrees=True) * self.orientation_quat).normalize() #self.orientation_quat = Quaternion(0.9659258262890683, -0.25881904510252074, 0, 0) * self.orientation_quat return self.orientation_quat else: hh = self.QuadState.heading gg = Quaternion() gg.w = hh.w gg.x = hh.z gg.y = hh.y gg.z = -hh.x return gg.normalize()
def Quadcopter(self, nr, nc, r, l): quad_frame, colors = vis_util.create_quad_frame(rows=nr, cols=nc, radius=r, length=l, offset=True) quad_mesh = vis_util.MyMeshData() quad_mesh.set_vertices(quad_frame.get_vertices()) quad_mesh.set_faces(quad_frame.get_faces()) quad_mesh.set_vertex_colors(colors) vertices, filled, outline = quad_mesh.get_glTriangles() # make vertex array, etc self.filled_buf = gloo.IndexBuffer(filled) self.outline_buf = gloo.IndexBuffer(outline) self.vertices_buff = gloo.VertexBuffer(vertices) # orientation if self.QuadState == None: self.orientation_quat = Quaternion(1, 0, 0, 0)
def getOrientation(self): if self.QuadState == None: # if not connected to gs-control.py, being run as stand alone script self.orientation_quat = (vis_util.sample_quat(0, 0.5, 0, degrees=True) * self.orientation_quat).normalize() #self.orientation_quat = Quaternion(0.9659258262890683, -0.25881904510252074, 0, 0) * self.orientation_quat return self.orientation_quat else: hh = self.QuadState.heading gg = Quaternion() gg.w = hh.w gg.x = hh.z gg.y = hh.y gg.z = -hh.x return gg.normalize()
def estimate(quat_packet, ns_qstate): """ quat_packet: list of 8 raw bytes convert to signed int16; converted to float32; update ns_qstate.heading; """ for i in range(0, 8, 2): data[i // 2] = (quat_packet[i] << 8) | quat_packet[i + 1] if data[i // 2] & 0x8000: data[i // 2] = data[i // 2] - 0x10000 qq = np.array([ float(data[0]) / 16384.0, float(data[1]) / 16384.0, float(data[2]) / 16384.0, float(data[3]) / 16384.0 ], dtype=np.float32) # quat_packet -> data -> quaternion # The order is weird, right? # That's because of the way GroundStation and Quadcopter Coordinate Systems are aligned # This weird order just converts rotation in Quad-system to GS-system ns_qstate.heading = Quaternion(qq[0], -qq[1], -qq[3], qq[2]) ns_qstate.rpy = getRPY(ns_qstate.heading) #print(ns_qstate.heading) print(ns_qstate.rpy * 180.0 / np.pi)
def __init__(self, vertices, colors, K, nH, nW, faces = 0, image_grid = 0, labels = 0, nframes = 1, edge_thresh = 1000): app.Canvas.__init__(self, keys='interactive', size=(nW, nH)) numPnts = vertices.shape[0] / nframes self.frame = 0 self.nframes = nframes self.npoints = numPnts self.has_labels = 0 self.per_frame_label = 0 self.faces_buf = {} if( isinstance(labels, np.ndarray) ): self.per_frame_label = 1 self.has_labels = 1 seg_colors = np.zeros([np.prod(labels.shape), 4]).astype(np.float32) cmap = cmx.rainbow(np.linspace(0, 1, np.maximum( np.max(labels) + 1, len(np.unique(labels) ) ) ) ) # cmap = np.array([[1.0000, 0.2857, 0, 1.0], # [1.0000, 0.5714, 0, 1.0], # [1.0000, 0.8571, 0, 1.0], # [0.8571, 1.0000, 0, 1.0], # [0.5714, 1.0000, 0, 1.0], # [ 0, 1.0000, 0.8571, 1.0], # [ 0, 0.8571, 1.0000, 1.0], # [ 0, 0.5714, 1.0000, 1.0], # [ 0, 0.2857, 1.0000, 1.0], # [ 0, 0, 1.0000, 1.0], # [0.2857, 0, 1.0000, 1.0], # [0.8571, 0, 1.0000, 1.0]]) for i in np.unique(labels): seg_colors[:,3] = 1.0 if(i != -1): mask = labels == i seg_colors[mask.reshape(-1),:] = cmap[i,:] self.seg_colors = seg_colors self.has_faces = 0 self.draw_points = 1 if( isinstance(faces, dict) ): self.has_faces = 1 for f in range(nframes): self.faces = faces[f] vertices_f = vertices[f*numPnts:(f+1)*numPnts,:] mask = self.remove_long_edges(self.faces, vertices_f, edge_thresh) faces_temp = self.faces[mask, :] self.faces_buf[f] = gloo.IndexBuffer(faces_temp) elif(image_grid): self.has_faces = 1 ### we have two types of edges indices = np.array(range(nH*nW)).astype(np.uint32).reshape((-1,1)) triangles_type1 = np.hstack((indices, indices+1, indices + nW)) triangles_type2 = np.hstack((indices, indices+nW-1, indices + nW)) mask1 = np.ones((nH,nW),dtype=bool) mask2 = np.ones((nH,nW),dtype=bool) mask1[:,-1] = False mask1[-1,:] = False mask2[:, 0] = False mask2[-1,:] = False self.faces = np.vstack(( triangles_type1[mask1.reshape(-1), :], triangles_type2[mask2.reshape(-1), :])) ## all the frames use the same faces if(self.has_labels): if(not self.per_frame_label): # remove connections between different labels labels_temp = labels.reshape(-1) mask01 = labels_temp[self.faces[:,0]] == labels_temp[self.faces[:,1]] mask02 = labels_temp[self.faces[:,0]] == labels_temp[self.faces[:,2]] mask12 = labels_temp[self.faces[:,1]] == labels_temp[self.faces[:,2]] mask = np.logical_and(mask01, mask02) mask = np.logical_and(mask, mask12) self.faces = self.faces[mask,:] for f in range(nframes): vertices_f = vertices[f*numPnts:(f+1)*numPnts,:] mask = self.remove_long_edges(self.faces, vertices_f, edge_thresh) faces_temp = self.faces[mask, :] self.faces_buf[f] = gloo.IndexBuffer(faces_temp) # different frames have different faces if(self.has_labels): if(self.per_frame_label): for f in range(nframes): labels_temp = labels[f*numPnts:(f+1)*numPnts].reshape(-1) mask01 = labels_temp[self.faces[:,0]] == labels_temp[self.faces[:,1]] mask02 = labels_temp[self.faces[:,0]] == labels_temp[self.faces[:,2]] mask12 = labels_temp[self.faces[:,1]] == labels_temp[self.faces[:,2]] mask = np.logical_and(mask01, mask02) mask = np.logical_and(mask, mask12) faces_temp = self.faces[mask,:] vertices_f = vertices[f*numPnts:(f+1)*numPnts,:] mask = self.remove_long_edges(faces_temp, vertices_f, edge_thresh) faces_temp = faces_temp[mask,:] self.faces_buf[f] = gloo.IndexBuffer(faces_temp) self.vertices = vertices self.colors = np.hstack((colors, np.ones((colors.shape[0], 1)).astype(np.float32))) self.update_color = 0 if(colors.shape[0] == numPnts * nframes): self.update_color = 1 # data contains the data to render self.data = np.zeros(numPnts, [('a_position', np.float32, 3), ('a_color', np.float32, 4), ('a_seg_color', np.float32, 4)]) self.data['a_position'] = self.vertices[0:numPnts,:] self.data['a_color'] = self.colors[0:numPnts,:] if(self.has_labels): self.data['a_seg_color'] = self.seg_colors[0:numPnts,:] else: self.data['a_seg_color'] = self.colors[0:numPnts,:] self.center = np.mean(vertices, axis=0) self.program = gloo.Program(vert, frag) self.program.bind(gloo.VertexBuffer(self.data)) # self.program['a_color'] = self.data['a_color'] # self.program['a_position'] = self.data['a_position'] # self.program['a_seg_color'] = self.data['a_seg_color'] self.view = translate((0, 0, 0)) self.model = np.eye(4, dtype=np.float32) gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1]) # gloo.set_viewport(0, 0, nW, nH) if(isinstance(K, np.ndarray)): projection = projection_from_intrinsics(K, nH, nW) else: projection = ortho_projection(nH, nW) #print projection self.projection = projection.T # self.projection = np.ascontiguousarray(projection) # self.projection = np.ascontiguousarray(projection.T) # self.projection = perspective(45.0, self.size[0] / # float(self.size[1]), 2.0, 1000.0) self.program['u_projection'] = self.projection self.program['u_model'] = self.model self.program['u_view'] = self.view self.program['u_color'] = 1, 1, 1, 1 self.program['u_plot_seg'] = 0 self.program['u_point_size'] = 5 self.nH = nH self.nW = nW self.last_x = 0 self.last_y = 0 self.substract_center = translate((-self.center[0], -self.center[1], -self.center[2])) self.add_center = translate((self.center[0], self.center[1], self.center[2])) self.translate_X = self.center[0] self.translate_Y = self.center[1] self.translate_Z = self.center[2] self.scale = - self.center[2] / 100 self.rot_X = 0 self.rot_Y = 0 self.rot_Z = 0 self.quaternion = Quaternion() self.theta = 0 self.phi = 0 #gloo.set_clear_color('black') gloo.set_clear_color('white') gloo.set_state('opaque') gloo.set_state(depth_test=True) # gloo.set_polygon_offset(1, 1) self.show()
class InteractiveViewportMixin(object): """ Canvas mixin for interactive rotation, translation and zooming This class implements on_resize, on_mouse_move, on_mouse_press, on_mouse_release and on_mouse_wheel in such a way that the view matrix and the projection matrix changes in response to user input. The derived class should implement on_update_view and on_update_projection to update the matrices in the shader. """ def __init__(self, rview, zview=None, zoom_speed=0.1, move_speed=0.01): """ Constructor Parameters ---------- rview : float Radius of inscribed sphere within the field of view. zview : float Distance to the z-clip plane from the origin. zoom_speed : float How sensitive the field of view to mouse wheel rolling. move_speed : float How sensitive the translation of view to mouse dragging. """ self.rview = rview self.zview = zview if zview else rview self.zoom_speed = zoom_speed self.move_speed = move_speed self._translation = (0.0, 0.0, 0.0) self._quaternion = Quaternion() def on_update_view(self, view): """ Handle view matrix change """ pass def on_update_projection(self, proj): """ Handle projection matrix change """ pass def get_view_matrix(self): """ Return current view matrix """ return np.dot(self._quaternion.get_matrix(), transforms.translate(self._translation)) def get_projection_matrix(self): """ Return current projection matrix """ # Determine the field of view of orthographic projection so that the # sphere of radius rview centered at the origin inscribes the field # of view. width, height = self.physical_size aspect = float(width) / height if aspect > 1: top = self.rview right = self.rview * aspect else: top = self.rview / aspect right = self.rview assert min(top, right) == self.rview left = -right bottom = -top near = self.zview far = -self.zview left -= 1e-6 right += 1e-6 bottom -= 1e-6 top += 1e-6 near -= 1e-6 far += 1e-6 return transforms.ortho(left, right, bottom, top, near, far) def _update_view(self): self.on_update_view(self.get_view_matrix()) def _update_projection(self): self.on_update_projection(self.get_projection_matrix()) def on_resize(self, event): gloo.set_viewport(0, 0, *event.physical_size) self._update_projection() def on_mouse_move(self, event): """ Left drag to rotate, right drag to tranelate """ if event.is_dragging and event.last_event: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size if event.button == 1: self._quaternion *= arcball(x0, y0, w, h) * arcball( x1, y1, w, h) self._update_view() if event.button == 2: tx, ty, tz = self._translation tx += self.move_speed * (x1 - x0) ty += -self.move_speed * (y1 - y0) self._translation = (tx, ty, tz) self._update_view() def on_mouse_wheel(self, event): """ Roll mouse wheel to zoom in/out """ roll = event.delta[1] self.rview *= math.exp(-self.zoom_speed * roll) self._update_projection() def on_mouse_press(self, event): if event.button == 1: cursor = QtGui.QCursor(QtCore.Qt.ClosedHandCursor) QtGui.QApplication.setOverrideCursor(cursor) if event.button == 2: cursor = QtGui.QCursor(QtCore.Qt.SizeAllCursor) QtGui.QApplication.setOverrideCursor(cursor) def on_mouse_release(self, event): QtGui.QApplication.restoreOverrideCursor()
# Serial objects are not pickle-able, hence they cannot be a part of a Manager.Namespace and thus serial port is a global. arduino = at_talk.radio(args.port, 19200) # There are multiple namespaces for flexibility # {ns_comms, ns_qstate, ns_vis, ns_cfg} mgr = multiprocessing.Manager() ns_comms = mgr.Namespace() ns_comms.name = "Communications:\n\tMode that rxControl operates in(comms_mode)\n\tRecieved packet from mpu(quat_packet)\n\tRecieved packet for GS(gs_packet)" ns_comms.quat_packet = None ns_comms.gs_packet = None ns_comms.mode = 'att_est' ns_qstate = mgr.Namespace() ns_qstate.name = "QuadState:\n\tAttitude Quaternion(heading)\n\tRPY calc. from the heading(rpy)" ns_qstate.heading = Quaternion() ns_qstate.rpy = (0.0, 0.0, 0.0) ns_cfg = mgr.Namespace() ns_cfg.a_scale = 16384.0 # Accel 2g ns_cfg.g_scale = 16.384 # Gyro 2000 dps ns_cfg.TIME_INTERVAL = 0.02 # ms ns_cfg.comms_active = False ns_cfg.this_is_v2 = sys.version_info[0] == 2 # Queues vis_canvas = visual.Canvas(800, 600, ns_qstate) sorter = rxControl.Sorter(ns_comms, ns_cfg, ns_qstate, arduino) ns_cfg.comms_active = True
class Script: def __init__(self, path_to_script = None): """Standard __init__ method. Parameters ---------- path_to_script : str path to script Attributes ---------- command_series : list list of script commands end: int max frame command_list: list list of dictionaries with each element reprenting one operation. Keys are: 'start': int start from of operation 'end': int start from of operation 'operation': str type of operation, e.g. 'zoom', 'rotate' 'params': list list of parameters needed for operation e.g [2] for 'zoom' states_dict : list list of dictionaries defining napari viewer states for each frame. Dictionaries have keys: 'frame': int, frame 'rotate': vispy quaternion, camera rotation 'translate': tuple, camera center 'zoom': float, camera zooming factor 'vis': list of booleans, visibility of layers 'time': int, time-point key_frames: list same as states_dict but only elements containing a change are conserved """ if path_to_script is None: raise TypeError('You need to pass a string with a path to a script') else: self.path_to_script = path_to_script #instantiate Quaternion object self.q = Quaternion() def read_script(self): """Read the script and create a list with groups of commands belonging to a given unit""" #read all lines commands = [] with open(self.path_to_script) as f: commands = f.readlines() #group commands belonging together e.g. those belonging to #and From frame ... statement command_series = [] line = 0 while line<len(commands): main_line = commands[line] line+=1 if main_line[0]=='#': continue if len(re.findall('.*to frame \d+(\n)', main_line))==1: temp_lines = [] while (commands[line][0]=='-'): temp_lines.append(commands[line]) line+=1 if line == len(commands): break command_series.append([main_line,temp_lines]) else: command_series.append([main_line,[main_line]]) end = [re.findall('(At frame |to frame )(\d+).*',x) for x in commands] end = np.max([int(x[0][1]) for x in end if x]) self.command_series = command_series self.end = end def create_commandlist(self): """create a dictionary list of each operation. Each operation becomes one dictionary. Operations belonging to groups have same start/end frames""" #go through all commands and parse the information command_list = [] for c in self.command_series: #get start and end frames. For "At frame..." statements end == start if c[0].split()[0] == 'From': start = int(re.findall('From frame (\d+) to*', c[0])[0]) end = int(re.findall('to frame (\d+) *', c[0])[0]) else: start = int(re.findall('At frame (\d+).*', c[0])[0]) end = int(re.findall('At frame (\d+).*', c[0])[0]) #For each group of statements parse the commands for c2 in c[1]: parsed = self.parse_command(c2) #if parsing returns a list, it means that the operation has been split into parts #mainly to handle large rotations if type(parsed) is list: interm_steps = np.linspace(start,end,len(parsed)+1).astype(int) for i in range(len(interm_steps)-1): command_list.append([interm_steps[i], interm_steps[i+1], parsed[i]]) else: command_list.append([start, end, parsed]) #sort commands by time command_list = np.array(command_list) command_list = command_list[np.argsort(command_list[:,0]),:] #create list of dictionaries command_list = [{'start': x[0], 'end': x[1], 'operation': x[2][0], 'params': x[2][1:]} for x in command_list] self.command_list = command_list def parse_command(self, command): """given a command line, parse the content Returns ------- result : tuple tuple with the type of operation and the corresponding parameters e.g. ('zoom', 2) """ #chcek operation type mod_type = re.findall('.*(rotate|translate|zoom|make|time).*',command)[0] #for each operation type recover necessary parameters if mod_type == 'rotate': angle = int(re.findall('.*rotate by (\d+).*', command)[0]) axis = list(map(int,re.findall('.*around \((\d+)\,(\d+)\,(\d+).*', command)[0])) #if the rotation angle is large split it into 3 to ensure the rotation is accomplished fully if angle >= 180: new_q = self.q.create_from_axis_angle(angle/3*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False) result = [(mod_type, new_q),(mod_type, new_q),(mod_type, new_q)] else: new_q = self.q.create_from_axis_angle(angle*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False) result = (mod_type, new_q) elif mod_type == 'zoom': factor = float(re.findall('.*factor of (\d*\.*\d+).*', command)[0]) result = (mod_type, factor) elif mod_type == 'translate': translate = np.array(list(map(int,re.findall('.*by \((\-*\d+)\,(\-*\d+)\,(\-*\d+).*', command)[0]))) result = (mod_type, translate) elif mod_type == 'make': layer = int(re.findall('.*make layer (\d+).*', command)[0]) vis_status = command.split()[-1] if vis_status == 'invisible': result = ('vis', layer, False) else: result = ('vis', layer, True) elif mod_type == 'time': time_shift = int(re.findall('.*by (\-*\d+).*', command)[0]) result = (mod_type, time_shift) return result def create_frame_commandlist(self, movie): """Go through the list of operations and create for each frame a dictionary with modifications to be operated. Only frames with an operation are filled, the others are interpolated later""" states_dict = [dict(zip(('frame','rotate','translate','zoom', 'vis','time'), (a, [],[],[],[],[]))) for a in np.arange(self.end+1)] #initialize state with current view. This first point can be adjusted by using #a series of "At frame 0... " commands current_state = copy.deepcopy(movie.myviewer.window.qt_viewer.view.camera.get_state()) states_dict[0]['rotate'] = current_state['_quaternion'] states_dict[0]['zoom'] = current_state['scale_factor'] states_dict[0]['translate'] = current_state['center'] states_dict[0]['vis'] = [x.visible for x in movie.myviewer.layers] if len(movie.myviewer.dims.point)==4: states_dict[0]['time'] = movie.myviewer.dims.point[0] #fille the states_dict at the start/end positions by compounding operations over frame containing changes old_state = copy.deepcopy(states_dict[0]) for c in self.command_list: if c['operation'] == 'rotate': states_dict[c['start']]['rotate'] = copy.deepcopy(old_state['rotate']) states_dict[c['end']]['rotate'] = copy.deepcopy(old_state['rotate']*c['params'][0]) old_state['rotate'] = copy.deepcopy(states_dict[c['end']]['rotate']) elif c['operation'] == 'translate': states_dict[c['start']]['translate'] = copy.deepcopy(old_state['translate']) states_dict[c['end']]['translate'] = copy.deepcopy(tuple(np.array(old_state['translate']) + c['params'][0])) old_state['translate'] = copy.deepcopy(states_dict[c['end']]['translate']) elif c['operation'] == 'zoom': states_dict[c['start']]['zoom'] = copy.deepcopy(old_state['zoom']) states_dict[c['end']]['zoom'] = copy.deepcopy(old_state['zoom'] * c['params'][0]) old_state['zoom'] = copy.deepcopy(states_dict[c['end']]['zoom']) elif c['operation'] == 'vis': states_dict[c['start']]['vis'] = copy.deepcopy(old_state['vis']) states_dict[c['end']]['vis'] = copy.deepcopy(old_state['vis']) states_dict[c['end']]['vis'][c['params'][0]] = c['params'][1] old_state['vis'] = copy.deepcopy(states_dict[c['end']]['vis']) elif c['operation'] == 'time': states_dict[c['start']]['time'] = copy.deepcopy(old_state['time']) states_dict[c['end']]['time'] = copy.deepcopy(old_state['time'] + c['params'][0]) old_state['time'] = copy.deepcopy(states_dict[c['end']]['time']) old_state['frame'] = states_dict[-1]['frame'] states_dict[-1] = copy.deepcopy(old_state) self.states_dict = states_dict def make_keyframes(self): """In the states_dict list of dictionaries, conserve only elements where change is happening """ props = ['rotate', 'translate','zoom','vis','time'] states_copy = copy.deepcopy(self.states_dict) key_frames = [y for y in states_copy if np.any([y[x] for x in props])] self.key_frames = key_frames
self._yPos = np.append(self._yPos, y) self._zPos = np.append(self._zPos, z) self._plot.set_data((self._xPos.transpose(), self._yPos.transpose(), self._zPos.transpose()), symbol=None) def resetTransform(self): self._xPos = np.array([0], dtype=np.float32) self._yPos = np.array([0], dtype=np.float32) self._zPos = np.array([0], dtype=np.float32) if __name__ == '__main__': import sys mp = MotionPlotter() q1 = Quaternion() q2 = Quaternion() y = 0 p = 0 tx = 0 ty = 0 tz = 0 signx = -1 signy = -1 signz = -1 def update(ev): global y, p, tx, ty, tz, q1, q2, signx, signy, signz q1 = Quaternion.create_from_axis_angle(y, 0, 0, 1, degrees=True) q2 = Quaternion.create_from_axis_angle(p, 0, 1, 0, degrees=True) q = q1 * q2
class Canvas(app.Canvas): visualization_modes = ['cpk', 'backbone', 'aminoacid', 'dssp'] def __init__(self, pdbdata, mode='cpk'): #Startup app.Canvas.__init__(self, keys='interactive', size=(W, H)) #Loading shaders self.program = gloo.Program(vertex, fragment) #Analyze pdb file self.parser = PDBParser(QUIET=True, PERMISSIVE=True) self.structure = self.parser.get_structure('model', pdbdata) #Mode selection if mode not in Canvas.visualization_modes: raise Exception('Not recognized visualization mode %s' % mode) self.mode = mode #Get the data of our atoms self.atom_information() #Camera settings self.translate = max(abs(np.concatenate(self.coordinates))) + 40 self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate), dtype=np.float32) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.program['u_projection'] = self.projection self.quaternion = Quaternion() #Load data depending on the mode self.apply_zoom() self.load_data() #self.lines = visuals.LinePlotVisual(self.chain_coords[1], width= 5.0, marker_size= 0.0, color=self.chain_colors[1]) gloo.set_state(depth_test=True, clear_color='white') self.show() def atom_information(self): """Determines the coordinates, colors and sizes of the atoms depending on the mode""" if self.mode == 'cpk': #list of atoms self.atoms = [atom for atom in self.structure.get_atoms()] self.natoms = len(self.atoms) #atom coordinates self.coordinates = np.array([atom.coord for atom in self.atoms]) self.center = centroid(self.coordinates) self.coordinates -= self.center #atom color self.color = [ np.array(colorrgba(atom.get_id())[0:3]) for atom in self.atoms ] #atom radius self.radius = [vrad(atom.get_id()) for atom in self.atoms] elif self.mode == 'aminoacid': #list of atoms self.atoms = [ atom for atom in self.structure.get_atoms() if atom.get_parent().resname != 'HOH' ] self.natoms = len(self.atoms) #atom coordinates self.coordinates = np.array([atom.coord for atom in self.atoms]) self.center = centroid(self.coordinates) self.coordinates -= self.center #atom color self.color = [ colorrgba(restype(atom.get_parent().resname))[0:3] for atom in self.atoms ] #atom radius self.radius = [vrad(atom.get_id()) for atom in self.atoms] elif self.mode == 'backbone': #list of atoms self.atoms = [ atom for atom in self.structure.get_atoms() if atom.get_name() == 'CA' or atom.get_name() == 'N' ] self.natoms = len(self.atoms) #atom coordinates self.coordinates = np.array([atom.coord for atom in self.atoms]) self.center = centroid(self.coordinates) self.coordinates -= self.center #atom color self.color = [] self.chains = [] self.chain_coords = [] self.chain_colors = [] for chain in self.structure.get_chains(): self.chains.append(chain) self.can_coord = np.array([ atom.coord for atom in chain.get_atoms() if atom.get_name() == 'CA' or atom.get_name() == 'N' ]) self.can_coord -= self.center self.chain_coords.append(self.can_coord) self.chain_length = len(self.can_coord) self.chain_color = np.random.rand(1, 3) self.chain_colors.append(self.chain_color) self.color.append( np.tile(self.chain_color, (self.chain_length, 1))) if len(self.chains) > 1: self.color = np.concatenate(self.color) #atom radius self.radius = [vrad(atom.get_id()) for atom in self.atoms] def load_data(self): """Make an array with all the data and load it into VisPy Gloo""" data = np.zeros(self.natoms, [('a_position', np.float32, 3), ('a_color', np.float32, 3), ('a_radius', np.float32, 1)]) data['a_position'] = self.coordinates data['a_color'] = self.color data['a_radius'] = self.radius #*self.pixel_scale self.program.bind(gloo.VertexBuffer(data)) self.program['u_model'] = self.model self.program['u_view'] = self.view self.program['u_light_position'] = 0., 0., 2. self.program['u_light_spec_position'] = -5., 5., -5. print 'Data loaded' def on_resize(self, event): width, height = event.size def apply_zoom(self): """Determine the projection of the canvas""" width, height = self.physical_size gloo.set_viewport(0, 0, width, height) self.projection = perspective(95.0, width / float(height), 1.0, 400.0) self.program['u_projection'] = self.projection def on_draw(self, event): gloo.clear() self.program.draw('points') #draw separated chains #if self.mode in ['backbone','dssp']: #for i in range(len(self.chains)): #self.lines = visuals.LinePlotVisual(self.chain_coords[i], width= 5.0, marker_size= 0.0, color=self.chain_colors[i]) #self.lines.draw() def on_mouse_move(self, event): #Mouse rotation if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.model = self.quaternion.get_matrix() self.program['u_model'] = self.model self.update() #Mouse zoom elif event.button == 2 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos self.translate += (y1 - y0) self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate), dtype=np.float32) self.program['u_view'] = self.view self.update() def on_mouse_wheel(self, event): self.translate -= event.delta[1] self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate)) self.program['u_view'] = self.view self.update()
class RGBDMultiView(app.Canvas): """ """ def __init__(self, sensor_type="OrbbecAstra", sensor_scale=1.0): app.Canvas.__init__(self, keys='interactive', size=(1280, 1024)) self.title = 'RGBDMultiView' self.quaternion = Quaternion() # for mouse movement self.cam_params = GetCameraParameters(sensor_type, sensor_scale) self.fov = 30 ps = self.pixel_scale self.playbackSpeed = 1 # view and model control variables self.tshift = 0.05 # the amount to translate by self.rshift = 0.05 # amount to increase rotation by self.scaleFactor = 1000 # the amount to scale the input model by self.default_camera_view() self.view = np.dot(self.rotMat, self.transMat) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.GL_progs = {} # name : data for a single frame self.apply_zoom() gloo.set_state('translucent', clear_color='gray') self.timer = app.Timer('auto', connect=self.on_timer, start=False) def on_timer(self, event): if self.nframe_curr < self.nframe_total - self.playbackSpeed: # if we still got images, keep loading them self.nframe_curr += self.playbackSpeed self.set_frame(self.nframe_curr) else: # no more images means we're at the end so we should stop the timer allowing the video to # be reverse or restarted by through the spacebar if self.timer.running: self.timer.stop() self.update() def on_mouse_move(self, event): if event.button == 1 and event.last_event is not None: x0, y0 = event.last_event.pos x1, y1 = event.pos w, h = self.size self.quaternion = (self.quaternion * Quaternion(*_arcball(x0, y0, w, h)) * Quaternion(*_arcball(x1, y1, w, h))) self.rotMat = self.quaternion.get_matrix() self.view = np.dot(self.rotMat, self.transMat) for k, prog in self.GL_progs.items(): prog['u_view'] = self.view prog['u_model'] = self.model self.update() def default_camera_view(self): """ Set the view matrics to their default values """ self.offX = 0.0 # actual amounts translated thus far self.offY = -0.6 self.zoomAmount = 2.5 self.centerCoord = (0, 0, 0) # center of the current point cloud frame self.rotX = 9.3 self.rotY = 6.3 self.rotZ = 0.0 self.transMat = translate((self.offX, self.offY, -self.zoomAmount)) self.rotMat = rotationMatrix((self.rotX, self.rotY, self.rotZ)) q = quaternion_from_matrix(self.rotMat) ## just for mouse arc ball self.quaternion.w = q[0] self.quaternion.x = q[1] self.quaternion.y = q[2] self.quaternion.z = q[3] def print_camera_view(self): print(self.offX, self.offY, self.zoomAmount) print(self.rotX, self.rotY, self.rotZ) print() def on_key_press(self, event): ### pause and play if event.text == ' ': if self.timer.running: self.timer.stop() else: self.timer.start() ### manually advance through the frames elif event.text == ',' and self.nframe_curr >= self.playbackSpeed: self.nframe_curr -= self.playbackSpeed self.set_frame(self.nframe_curr) self.update() elif event.text == '.': if self.nframe_curr < self.nframe_total - self.playbackSpeed: self.nframe_curr += self.playbackSpeed elif self.nframe_curr < self.nframe_total - 1: self.nframe_curr += 1 self.set_frame(self.nframe_curr) self.update() ### camera manipulation if event.text in ['w', 'a', 's', 'd']: # planar translations if event.text == 'w': self.offY += self.tshift elif event.text == 'a': self.offX -= self.tshift elif event.text == 's': self.offY -= self.tshift elif event.text == 'd': self.offX += self.tshift self.transMat = translate((self.offX, self.offY, -self.zoomAmount)) if event.text in ['q', 'e', 'z', 'c', 'r', 'v']: # rotation about the axis if event.text == 'q': self.rotX += self.rshift elif event.text == 'e': self.rotX -= self.rshift elif event.text == 'z': self.rotY += self.rshift elif event.text == 'c': self.rotY -= self.rshift elif event.text == 'r': self.rotZ += self.rshift elif event.text == 'v': self.rotZ -= self.rshift self.rotMat = rotationMatrix((self.rotX, self.rotY, self.rotZ)) ### reset camera to original default orientation if event.text == 'x': self.print_camera_view() self.default_camera_view() if event.text == 'p': strTopFolder = "F:/data/deep-learning/additive_depth/98_vr_calib/" strFolderName = "test01_img_seq" strMultiviewFolder = os.path.join(strTopFolder, strFolderName) strPosesFile = os.path.join(strMultiviewFolder, "poses.txt") self.sensor_props = ReadManualCalibPoses(strPosesFile) for s in sensor_props: s.load_image_files(strMultiviewFolder) self.view = np.dot(self.rotMat, self.transMat) for k, prog in self.GL_progs.items(): prog['u_view'] = self.view prog['u_model'] = self.model self.update() def on_resize(self, event): self.apply_zoom() def on_mouse_wheel(self, event): self.zoomAmount -= event.delta[1] / 2.0 self.transMat = translate((self.offX, self.offY, -self.zoomAmount)) self.view = np.dot(self.rotMat, self.transMat) for k, prog in self.GL_progs.items(): prog['u_view'] = self.view self.update() def on_draw(self, event): gloo.clear() for k, prog in self.GL_progs.items(): prog.draw('points') def apply_zoom(self): gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1]) self.projection = perspective(self.fov, self.size[0] / float(self.size[1]), 0.01, 10000.0) for k, prog in self.GL_progs.items(): prog['u_projection'] = self.projection def load_frame_data(self, frame_n): ''' load_frame_data Arguments: frame_n: int obtain the frame number for the stream Return: frame_data: array contain three camera and contain [numVertice , xyz , rgb] ''' frame_data = {} for s in self.sensor_props: depthImg = cv2.imread(s.imgs_depth[frame_n], -1) ## load frames and compute point cloud clrImg = cv2.imread(s.imgs_color[frame_n], -1) ptcloud, nVertices = image_fusion(self.cam_params, depthImg, clrImg) transformedPtcloud = transform_pointcloud_vectorized( ptcloud, s.rotationMatrix(), s.translationMatrix()) xyz, rgb = np.hsplit(transformedPtcloud, 2) ## TODO: extra post-processing to convert points close to the camera to a specific color frame_data[s.name] = [nVertices, xyz, rgb] return frame_data def load_frame_to_stl(self, frame_n): ''' load_frame_data Arguments: frame_n: int obtain the frame number for the stream Return: frame_data: array contain three camera and contain [numVertice , xyz , rgb] ''' data = np.zeros(6, dtype=mesh.Mesh.dtype) frame_data = {} xyzpcd, rgbpcd = [], [] for i, s in enumerate(self.sensor_props): depthImg = cv2.imread(s.imgs_depth[frame_n], -1) ## load frames and compute point cloud clrImg = cv2.imread(s.imgs_color[frame_n], -1) ptcloud, nVertices = image_fusion(self.cam_params, depthImg, clrImg) transformedPtcloud = transform_pointcloud_vectorized( ptcloud, s.rotationMatrix(), s.translationMatrix()) xyz, rgb = np.hsplit(transformedPtcloud, 2) xyzpcd.insert(i, xyz) rgbpcd.insert(i, rgb) ## TODO: extra post-processing to convert points close to the camera to a specific color frame_data[s.name] = [nVertices, xyz, rgb] rgbpcd = np.vstack((rgbpcd[0], rgbpcd[1], rgbpcd[2])) xyzpcd = np.vstack((xyzpcd[0], xyzpcd[1], xyzpcd[2])) print(rgbpcd.shape, xyzpcd.shape) return xyzpcd, rgbpcd, frame_data def set_frame(self, frame_number): self.nframe_curr = frame_number frame_data = self.load_frame_data(self.nframe_curr) for k, prog in self.GL_progs.items(): nVertices, xyz, rgb = frame_data[k] ## load and bind the current frame data to the GL program data = np.zeros(nVertices, [('a_position', np.float32, 3), ('a_bg_color', np.float32, 4), ('a_fg_color', np.float32, 4), ('a_size', np.float32, 1)]) data['a_position'] = np.array(xyz / self.scaleFactor) data['a_bg_color'] = np.c_[ rgb / 255, np.ones( nVertices )] # make sure rgb is between [0,1] and gotta append an extra one to each row for the alpha value data['a_fg_color'] = 0, 0, 0, 1 data['a_size'] = 4 prog['u_model'] = self.model prog.bind(gloo.VertexBuffer(data)) self.update() def set_multiview_source(self, sensor_props): self.nframe_total = sys.maxsize self.sensor_props = sensor_props for s in self.sensor_props: gl_prog = gloo.Program(VERTEX_SHADER, FRAGMENT_SHADER) gl_prog['u_linewidth'] = 0 gl_prog['u_antialias'] = 0 gl_prog['u_model'] = self.model gl_prog['u_view'] = self.view gl_prog['u_size'] = 1 self.GL_progs[s.name] = gl_prog self.nframe_total = min(self.nframe_total, len(s.imgs_color)) self.nframe_total = min(self.nframe_total, len(s.imgs_depth)) print(self.nframe_total) self.apply_zoom() self.set_frame(0)