def processData(data): if isinstance(data, Vector4): return data elif isinstance(data, list) and len(data) == 4: return Vector4(data) else: return Vector4()
def calculateMouseRay(self, projectionMatrix, viewMAtrix, screenX, screenY, mouse_x, mouse_y): x = (2.0 * mouse_x) / screenX - 1.0 y = 1.0 - (2.0 * mouse_y) / screenY z = -1.0 D_view = Vector4.from_vector3(Vector3([x, y, z]), w=1.0) ray_eye = projectionMatrix.inverse * D_view ray_eye = Vector4([ray_eye.x, ray_eye.y, -1.0, 0.0]) ray_wor = ((viewMAtrix.inverse) * ray_eye).xyz return Vector3(ray_wor)
def on_render(self, gl_area, gl_context): if self.FBO_initialized is False: self.default_FBO = glGetIntegerv( GL_FRAMEBUFFER_BINDING ) # GLArea does not seem to use FBO 0 as the default. self.FBO.initializeFramebuffer(self.default_FBO, self.window_size) self.FBO_initialized = True self.renderer.processMovement(self.delta) if self.delta < 1: self.system.generateParticles((1.0, 1.0, 1.0), self.delta) ParticleMaster.update(self.delta, self.renderer.getCamera()) glEnable(GL_CLIP_DISTANCE0) self.FBO.bindReflectionFrameBuffer() distance = 2 * (self.renderer.getCamera().getPosition().y - self.water.getHeight()) self.renderer.getCamera().setCameraHeight( self.renderer.getCamera().getPosition().y - distance, True) self.renderer.renderScene( self.entities, self.normalMapEntities, self.terrainTiles, self.lights, self.running_seconds_from_start, Vector4((0, 1, 0, -self.water.getHeight() + 0.5))) self.renderer.getCamera().setCameraHeight( self.renderer.getCamera().getPosition().y + distance) self.FBO.bindRefractionFrameBuffer() self.renderer.renderScene(self.entities, self.normalMapEntities, self.terrainTiles, self.lights, self.running_seconds_from_start, Vector4((0, -1, 0, self.water.getHeight()))) glDisable(GL_CLIP_DISTANCE0) self.FBO.unbindCurrentFrameBuffer() self.renderer.renderScene(self.entities, self.normalMapEntities, self.terrainTiles, self.lights, self.running_seconds_from_start, Vector4((0, -1, 0, 15))) self.waterRenderer.render(self.delta, self.water, self.sun) ParticleMaster.renderParticles() self.guiRenderer.render(self.guis) TextMaster.render() self.queue_draw() # Schedules a redraw for Gtk.GLArea
def rotateCam(self, axis, dtheta): rotation_mat = matrix44.create_from_axis_rotation(axis, dtheta) self.camLookAt -= self.camPos newvec = Vector4.from_vector3(self.camLookAt, 1.0) newvec = rotation_mat.dot(newvec) self.camLookAt = Vector3.from_vector4(newvec)[0] self.camLookAt += self.camPos
def __respawnParticle(particle, object_, offset=Vector3([0., 0., 0.])): random = (np.random.randint(100) - 50) / 10. rColor = 0.5 + np.random.randint(100) / 100. particle.Position = object_.pos + random + offset particle.Color = Vector4([rColor, rColor, rColor, 1.0]) particle.Life = 1.0 particle.Velocity = object_.velocity * 0.1
def __calculateMouseRay(self): # Position of mouse in viewport space mouseX, mouseY = self.__inputEvents.getCursorPosition() normalizedCoords = self.getNormalizedDeviceCoords(mouseX, mouseY) clipCoords = Vector4((normalizedCoords[0], normalizedCoords[1], -1, 1)) eyeCoords = self.toEyeCoords(clipCoords) worldRay = self.toWorldCoords(eyeCoords) return worldRay
def setData(self, data): if isinstance(data, Vector4): self._data = data elif isinstance(data, list) and len(data) == 4: self._data = Vector4(data) else: self._data = self.defaultValue() PinWidgetBase.setData(self, self._data)
def unproject(self, v: tuple) -> Vector3: """ Unproject a vector from the world coordinates. """ # Only take xy coords ray_world = (self.matrix.T * self.proj_matrix.inverse * Vector4([v[0], v[1], -1, 1])).vector3[0] ray_world.normalize() return ray_world
def __init__(self): self._pos = Vector4([0, 1, 0, 0]) self.pitch = 0.0 self.yaw = 10.0 self.roll = 0.0 self.dist = 4.0 self.fovY = 45 self.aspect_ratio = 1 self.perspective = True
def __init__(self, Position=Vector3([0, 0, 0]), Velocity=Vector3([0, 0, 0]), Color=Vector4([1., 1., 1., 1.]), Life=0.): self.Position = Position self.Velocity = Velocity self.Color = Color self.Life = Life
def provide_static_transformation(tf2_publisher): # translate along the x axis translation = Vector4([1., 0., -1., 0.]) # rotate around x by 90 degrees rotation = Quaternion.from_z_rotation((np.pi / 2)) # with normal scaling scale = Vector4([1., 1., 1., 1.]) # combine the above af_s = rct.Affine3d(translation, rotation, scale) # actually send it t_s = rct.Transform(af_s, "base", "python_static", time.time()) print ">> providing STATIC: " print "YRP : ", t_s.transformation.yrp print "translation : ", t_s.transformation.translation print "rotation : ", t_s.transformation.rotation_quaternion tf2_publisher.send_transform(t_s, rct.TransformType.STATIC)
def get_near_far_from_view(self, view: Matrix44) -> Tuple[float, float]: nearest_view_z: float = -1000000 farthest_view_z: float = 1000000 for pos in self.extends: view_position = Vector4(matrix44.apply_to_vector(view, pos)) if view_position.z > nearest_view_z: nearest_view_z = view_position.z if view_position.z < farthest_view_z: farthest_view_z = view_position.z if nearest_view_z > 0: nearest_view_z = 0 return nearest_view_z, farthest_view_z
def convert_tf_to_transform(self, transform_stamped): position_vector = Vector4() position_vector.x = transform_stamped.transform.translation.x position_vector.y = transform_stamped.transform.translation.y position_vector.z = transform_stamped.transform.translation.z position_vector.w = 0. rotation_quaterniond = Quaternion() rotation_quaterniond.w = transform_stamped.transform.rotation.w rotation_quaterniond.x = transform_stamped.transform.rotation.x rotation_quaterniond.y = transform_stamped.transform.rotation.y rotation_quaterniond.z = transform_stamped.transform.rotation.z scale = Vector4([1., 1., 1., 1.]) affine3d = Affine3d(position_vector, rotation_quaterniond, scale) transform = Transform(affine3d, transform_stamped.header.frame_id, transform_stamped.child_frame_id, transform_stamped.header.stamp.to_time(), authority="") return transform
def test_m44_q_equivalence(self): """Test for equivalance of matrix and quaternion rotations. Create a matrix and quaternion, rotate each by the same values then convert matrix<->quaternion and check the results are the same. """ m = Matrix44.from_x_rotation(np.pi / 2.) mq = Quaternion.from_matrix(m) q = Quaternion.from_x_rotation(np.pi / 2.) qm = Matrix44.from_quaternion(q) self.assertTrue( np.allclose(np.dot([1., 0., 0., 1.], m), [1., 0., 0., 1.])) self.assertTrue( np.allclose(np.dot([1., 0., 0., 1.], qm), [1., 0., 0., 1.])) self.assertTrue( np.allclose(q * Vector4([1., 0., 0., 1.]), [1., 0., 0., 1.])) self.assertTrue( np.allclose(mq * Vector4([1., 0., 0., 1.]), [1., 0., 0., 1.])) np.testing.assert_almost_equal(q, mq, decimal=5) np.testing.assert_almost_equal(m, qm, decimal=5)
def provide_dynamic_transformation(tf2_publisher): # construct the dynamic transformation rotation = Quaternion.from_z_rotation(np.pi / 2) # with normal scaling scale = Vector4([1., 1., 1., 1.]) # translate again along x position = Vector4([0., -1., 2., 0.]) af_d = rct.Affine3d(position, rotation, scale) t_d = rct.Transform(af_d, "python_static", "python_dynamic", time.time()) print ">> providing DYNAMIC: " print "YRP : ", t_d.transformation.yrp print "translation : ", t_d.transformation.translation print "rotation : ", t_d.transformation.rotation_quaternion logging.getLogger('rct').setLevel(logging.WARNING) # actually send it while (doRun): tf2_publisher.send_transform(t_d, rct.TransformType.DYNAMIC) time.sleep(0.019)
def test_operators(self): from pyrr import Quaternion, Matrix44, Matrix33, Vector3, Vector4 import numpy as np # matrix multiplication m = Matrix44() * Matrix33() m = Matrix44() * Quaternion() m = Matrix33() * Quaternion() # matrix inverse m = ~Matrix44.from_x_rotation(np.pi) # quaternion multiplication q = Quaternion() * Quaternion() q = Quaternion() * Matrix44() q = Quaternion() * Matrix33() # quaternion inverse (conjugate) q = ~Quaternion() # quaternion dot product d = Quaternion() | Quaternion() # vector oprations v = Vector3() + Vector3() v = Vector4() - Vector4() # vector transform v = Quaternion() * Vector3() v = Matrix44() * Vector3() v = Matrix44() * Vector4() v = Matrix33() * Vector3() # dot and cross products dot = Vector3() | Vector3() cross = Vector3() ^ Vector3()
def test_conversions(self): from pyrr import Quaternion, Matrix33, Matrix44, Vector3, Vector4 v3 = Vector3([1.,0.,0.]) v4 = Vector4.from_vector3(v3, w=1.0) v3, w = Vector3.from_vector4(v4) m44 = Matrix44() q = Quaternion(m44) m33 = Matrix33(q) m33 = Matrix44().matrix33 m44 = Matrix33().matrix44 q = Matrix44().quaternion q = Matrix33().quaternion m33 = Quaternion().matrix33 m44 = Quaternion().matrix44
def test_conversions(self): from pyrr import Quaternion, Matrix33, Matrix44, Vector3, Vector4 v3 = Vector3([1., 0., 0.]) v4 = Vector4.from_vector3(v3, w=1.0) v3, w = Vector3.from_vector4(v4) m44 = Matrix44() q = Quaternion(m44) m33 = Matrix33(q) m33 = Matrix44().matrix33 m44 = Matrix33().matrix44 q = Matrix44().quaternion q = Matrix33().quaternion m33 = Quaternion().matrix33 m44 = Quaternion().matrix44
def project_to_img_frame(self, vector, viewMatrix): clip_space_vector = self.projection * ( viewMatrix * Vector4.from_vector3(vector, w=1.0)) if clip_space_vector.w != 0: nds_vector = Vector3(clip_space_vector.xyz) / clip_space_vector.w else: # Clipped nds_vector = clip_space_vector.xyz if nds_vector.z >= 1: return [-1, -1] viewOffset = 0 image_frame_vector =\ ((np.array(nds_vector.xy) + 1.0) / 2.0) * np.array([self.width, self.height]) + viewOffset # Translate from bottom-left to top-left image_frame_vector[1] = self.height - image_frame_vector[1] return image_frame_vector
def __init__(self, grid_cell_size: Vector3, bounding_volume: Tuple[Vector3, Vector3], layer_distance: float, extend_by: int = 1.0): self.grid_cell_size: Vector3 = grid_cell_size self.layer_distance: float = layer_distance self.bounding_volume: Tuple[Vector3, Vector3] = bounding_volume if self.bounding_volume[0].x > self.bounding_volume[1].x: self.bounding_volume[0].x, self.bounding_volume[1].x = bounding_volume[1].x - extend_by, \ bounding_volume[0].x + extend_by else: self.bounding_volume[0].x, self.bounding_volume[1].x = bounding_volume[0].x - extend_by, \ bounding_volume[1].x + extend_by if self.bounding_volume[0].y > self.bounding_volume[1].y: self.bounding_volume[0].y, self.bounding_volume[1].y = bounding_volume[1].y - extend_by, \ bounding_volume[0].y + extend_by else: self.bounding_volume[0].y, self.bounding_volume[1].y = bounding_volume[0].y - extend_by, \ bounding_volume[1].y + extend_by if self.bounding_volume[0].z > self.bounding_volume[1].z: self.bounding_volume[0].z, self.bounding_volume[1].z = bounding_volume[1].z - extend_by, \ bounding_volume[1].z + layer_distance + extend_by else: self.bounding_volume[0].z, self.bounding_volume[1].z = bounding_volume[0].z - extend_by, \ bounding_volume[0].z + layer_distance + extend_by self.grid_cell_count: List[int] = [ int((self.bounding_volume[1].x - self.bounding_volume[0].x) / self.grid_cell_size.x) + 1, int((self.bounding_volume[1].y - self.bounding_volume[0].y) / self.grid_cell_size.y) + 1, int((self.bounding_volume[1].z - self.bounding_volume[0].z) / self.grid_cell_size.z) + 1 ] self.grid_cell_count_overall: int = self.grid_cell_count[ 0] * self.grid_cell_count[1] * self.grid_cell_count[2] self.extends: List[Vector4] = [ vector4.create_from_vector3(self.bounding_volume[0], 1.0), vector4.create_from_vector3(self.bounding_volume[1], 1.0) ] self.extends.extend([ Vector4([ self.bounding_volume[1].x, self.bounding_volume[1].y, self.bounding_volume[0].z, 1.0 ]), Vector4([ self.bounding_volume[1].x, self.bounding_volume[0].y, self.bounding_volume[0].z, 1.0 ]), Vector4([ self.bounding_volume[0].x, self.bounding_volume[1].y, self.bounding_volume[0].z, 1.0 ]), Vector4([ self.bounding_volume[1].x, self.bounding_volume[1].y, self.bounding_volume[1].z, 1.0 ]), Vector4([ self.bounding_volume[1].x, self.bounding_volume[0].y, self.bounding_volume[1].z, 1.0 ]), Vector4([ self.bounding_volume[0].x, self.bounding_volume[1].y, self.bounding_volume[1].z, 1.0 ]) ])
def getEyeSpacePosition(self, light, viewMatrix): position = light.getPosition() eyeSpacePos = Vector4((position[0], position[1], position[2], 1.0)) eyeSpacePos = matrix44.apply_to_vector(viewMatrix, eyeSpacePos) return Vector3((eyeSpacePos[0], eyeSpacePos[1], eyeSpacePos[2]))
points = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], ]) # Calculate normals deltaVec1, deltaVec2 = points[0] - points[1], points[0] - points[2] normal = np.cross(deltaVec1, deltaVec2) endPoints = points + normal # Compute new values worldPositions = [] in_normal = None for i in range(len(points)): in_position = Vector4(np.concatenate([points[i], [1]])) in_normal = Vector4(normal.tolist() + [1]) #GLSL worldMatrix = wV.translationMatrix * wV.rotationMatrix * wV.scaleMatrix modelMatrix = mV.translationMatrix * mV.scaleMatrix * mV.rotationMatrix matrix = worldMatrix * modelMatrix in_normal = (matrix.inverse.transpose() * in_normal) worldPosition = matrix * in_position #SAVE GLSL worldPositions.append(worldPosition.xyz) in_normal = in_normal.xyz # Plot original points and normals ax.plot_trisurf(points[:, 0],
def renderSceneFromEye(self, fbo_curr, eEyePos): fbo_curr.use() fbo_curr.clear(1.0, 1.0, 1.0, 1.0) ''' if self.fbo_left == fbo_curr: fbo_curr.clear(0.32, 0.0, 0.0, 1.0) else: fbo_curr.clear(0.0, 0.0, 0.32, 1.0) ''' ipd_translate = None if eyePosition.LEFT == eEyePos: ipd_translate = Matrix44.from_translation(Vector4([ 0.0, 5.0, 0.0, 1.0])) elif eyePosition.RIGHT == eEyePos: ipd_translate = Matrix44.from_translation(Vector4([ 0.0, -5.0, 0.0, 1.0])) else: ipd_translate = Matrix44.from_translation(Vector4([ 0.0, 0.0, 0.0, 1.0])) self.ctx.enable(moderngl.DEPTH_TEST) ''' self.mvp_sample.write((self.proj_sample * self.lookat_sample).astype('f4').tobytes()) self.texture_sample.use() self.vao_sample.render(moderngl.TRIANGLE_STRIP) ''' time_sample = time.clock() - self.start_time rotate = Matrix44.from_z_rotation(np.sin(time_sample*10.0) * 0.5 + 0.2) self.bUseTexture_sample.value = False self.light_sample.value = (67.69, -8.14, 52.49) if visualMode.STEREO == self.eVisualMode: self.mvp_sample.write((self.proj_sample_stereo * self.lookat_sample * ipd_translate * rotate).astype('f4').tobytes()) else: self.mvp_sample.write((self.proj_sample * self.lookat_sample * ipd_translate * rotate).astype('f4').tobytes()) #self.mvp_sample.write((self.proj_sample * self.lookat_sample * ipd_translate).astype('f4').tobytes()) #self.mvp_sample.write((self.proj_sample * self.lookat_sample).astype('f4').tobytes()) #self.mvp_sample.write((self.proj_sample * ipd_translate * rotate).astype('f4').tobytes()) self.color_sample.value = (0.67, 0.49, 0.29) self.objects['ground'].render() self.color_sample.value = (0.46, 0.67, 0.29) self.objects['grass'].render() self.color_sample.value = (1.0, 1.0, 1.0) self.objects['billboard'].render() self.color_sample.value = (0.2, 0.2, 0.2) self.objects['billboard-holder'].render() self.bUseTexture_sample.value = True self.texture_sample.use() self.objects['billboard-image'].render()
if __name__ == '__main__': # Configure logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) logging.getLogger('rsb').setLevel(logging.WARNING) logging.getLogger('rct').setLevel(logging.INFO) print "\n>> Gathering transformation info ..." time.sleep(1) print ">> Currently available transformations:\n" print "\n\t".join( ("\t" + tf2_subscriber.all_frames_as_string()).split('\n')) # a point in space we want to transform source_point4 = Vector4([1., 1., 1., 1.]) print "Base point: ", source_point4 print "--------------------------------\n" # automatic search for trafos... # available_trafos = tf2_subscriber.all_frames_as_string().split('\n') # mappings_to_do = [] # for x in available_trafos: # if len(x) > 3: # x = x.replace("Frame ", "") # x = x.replace(" exists with parent ", ",") # x = x[:-1] # res = x.split(",") # mappings_to_do.append([res[1], res[0]])
def pinDataTypeHint(): return 'FloatVector4Pin', Vector4()
def toEyeCoords(self, clipCoords): invertedProjection = pyrr.matrix44.inverse(self.__projectionMatrix) eyeCoords = pyrr.matrix44.apply_to_vector(invertedProjection, clipCoords) return Vector4((eyeCoords[0], eyeCoords[1], -1, 0))
def pinDataTypeHint(): return DataTypes.FloatVector4, Vector4()
def mvps(self, pos): v_mod = self.model_matrix * Vector4([pos[0], pos[1], pos[2], 1.]) v_eye = self.view_matrix * v_mod v_clip = self.projection_matrix * v_eye return self.to_screen(Vector3([v_clip.x, v_clip.y, v_clip.z]) / v_clip.w)
def pan(self, dx, dy): dx *= -0.01 dy *= 0.01 dv = Vector4([dx, dy, 0.0, 0.0]) dv = ~self.rot() * dv self._pos += dv
def __init__(self, name, parent, dataType, direction, **kwargs): super(FloatVector4Pin, self).__init__(name, parent, dataType, direction, **kwargs) self.setDefaultValue(Vector4())
def object_hook(self, vec4Dict): return Vector4(vec4Dict[Vector4.__name__])