def mouseMoveEvent(self, event): pos = event.pos() # compute point on sphere under pointer (w, h) = self.viewport t = (2*self.old_pos.x() - w) / float(w) u = -(2*self.old_pos.y() - h) / float(h) # compute inverse of view transform ignoring rotation m = Matrix44.from_translation(Vector3([0, 0, -self.zoom])) * self.projTransform m = matrix44.inverse(m) rayOri = m * Vector3([t, u, -1]) rayEnd = m * Vector3([t, u, 1]) rayDir = rayEnd - rayOri self.picked = intersectRayUnitSphere(rayOri, rayDir) # rotate on left-drag if event.buttons() & QtCore.Qt.LeftButton > 0: # the rotation vector is the displacement vector rotated by 90 degrees dx = pos.x() - self.old_pos.x() dy = pos.y() - self.old_pos.y() if dx == 0 and dy == 0: return v = Vector3([dy, dx, 0]) # update the current orientation self.layers.multiplyOrientation(Quaternion.from_axis_rotation( -v.normalised, -v.length * 0.002, )) elif event.buttons() & QtCore.Qt.RightButton > 0: dz = pos.y() - self.old_pos.y() self.zoom = max(0, self.zoom + dz / 100.0) self.old_pos = pos self.update()
def test_operators_matrix44(self): m1 = Matrix44.identity() m2 = Matrix44.from_x_rotation(0.5) # add self.assertTrue( np.array_equal( m1 + m2, matrix44.create_identity() + matrix44.create_from_x_rotation(0.5))) # subtract self.assertTrue( np.array_equal( m1 - m2, matrix44.create_identity() - matrix44.create_from_x_rotation(0.5))) # multiply self.assertTrue( np.array_equal( m1 * m2, matrix44.multiply(matrix44.create_identity(), matrix44.create_from_x_rotation(0.5)))) # divide self.assertRaises(ValueError, lambda: m1 / m2) # inverse self.assertTrue( np.array_equal( ~m2, matrix44.inverse(matrix44.create_from_x_rotation(0.5))))
def test_operators_matrix44(self): m1 = Matrix44.identity() m2 = Matrix44.from_x_rotation(0.5) # add self.assertTrue(np.array_equal(m1 + m2, matrix44.create_identity() + matrix44.create_from_x_rotation(0.5))) # subtract self.assertTrue(np.array_equal(m1 - m2, matrix44.create_identity() - matrix44.create_from_x_rotation(0.5))) # multiply self.assertTrue(np.array_equal(m1 * m2, matrix44.multiply(matrix44.create_from_x_rotation(0.5), matrix44.create_identity()))) # divide self.assertRaises(ValueError, lambda: m1 / m2) # inverse self.assertTrue(np.array_equal(~m2, matrix44.inverse(matrix44.create_from_x_rotation(0.5)))) # == self.assertTrue(Matrix44() == Matrix44()) self.assertFalse(Matrix44() == Matrix44([1. for n in range(16)])) # != self.assertTrue(Matrix44() != Matrix44([1. for n in range(16)])) self.assertFalse(Matrix44() != Matrix44())
def model_view(self): """Property for the camera's model view matrix. This is the inverse of the camera's world matrix and is used as the initial matrix for the model view matrix. This is an @property decorated method. :rtype: numpy.array :return: A matrix set to the camera's model view matrix. """ # return the inverse of our world matrix return matrix44.inverse(self.world_transform.matrix)
def model_view( self ): """Property for the camera's model view matrix. This is the inverse of the camera's world matrix and is used as the initial matrix for the model view matrix. This is an @property decorated method. :rtype: numpy.array :return: A matrix set to the camera's model view matrix. """ # return the inverse of our world matrix return matrix44.inverse( self.world_transform.matrix )
def test_inverse(self): m = matrix44.create_from_y_rotation(np.pi) result = matrix44.inverse(m) self.assertTrue(np.allclose(result, matrix44.create_from_y_rotation(-np.pi)))
def test_inverse(self): m = matrix44.create_from_y_rotation(np.pi) result = matrix44.inverse(m) self.assertTrue( np.allclose(result, matrix44.create_from_y_rotation(-np.pi)))
def test_inverse(self): m1 = Matrix44.identity() * Matrix44.from_x_rotation(0.5) m = m1.inverse self.assertTrue(np.array_equal(m, matrix44.inverse(m1)))
def generate_joint_matrix(joint): # convert joint position and orientation to a matrix matrix = matrix44.multiply( matrix44.create_from_quaternion(joint.orientation), matrix44.create_from_translation(joint.position)) return matrix44.inverse(matrix)
float(atari_height) * zoom, 1.0])) atari_screen2_translation = pyrr.matrix44.create_from_translation( pyrr.Vector3([0, 0, -3])) atari_screen_2 = pyrr.matrix44.multiply(atari_screen2_scale, atari_screen2_translation) translation_cut = matrix44.create_from_translation(Vector3([0.0, -34.0, 0.0])) scale_cut = matrix44.create_from_scale(Vector3([160, 210, 1.0])) cutout_model = pyrr.matrix44.multiply(scale_cut, translation_cut) minime_translate = matrix44.create_from_translation( Vector3([0.0, -34.0 / 210.0, 0.0])) minime_scale = matrix44.create_from_scale( Vector3([32.0, (210.0 / (168.0 - 34.0)) * 32.0, 1.0])) minime_model = matrix44.multiply(minime_translate, minime_scale) minime_inv_model = matrix44.inverse(minime_model) transporter_scale = matrix44.create_from_scale(Vector3([32.0, 32.0, 1.0])) bbox_scale_factor = 16.0 / 0.9 bbox1_model = pyrr.matrix44.create_from_scale( pyrr.Vector3([bbox_scale_factor, bbox_scale_factor, 1.0])) model_loc = glGetUniformLocation(shader, "model") proj_loc = glGetUniformLocation(shader, "projection") color_model_loc = glGetUniformLocation(white_shader, "model") color_proj_loc = glGetUniformLocation(white_shader, "projection") color_loc = glGetUniformLocation(white_shader, "uColor") # init env env = gym.make('Pong-v0')