Example #1
0
def load_blender_gltf_light(g0, n0):
    
    nc = g0.model.nodes[n0.children[0]]
    
    l0 = nc.extensions.get('KHR_lights_punctual')['light']
    l1 = g0.model.extensions['KHR_lights_punctual']['lights'][l0]
    
    m0 = glm.mat4_cast(glm.quat(xyzw2wxyz(nc.rotation)))
    m1 = glm.mat4_cast(glm.quat(xyzw2wxyz(n0.rotation))) if n0.rotation else glm.mat4(1.0) 
    qq = glm.quat_cast(m1 * m0)

    attenuation = {
        'spot': 1 / 10,
        'point': 1 / 10,
        'directional': 5 / 4,
    }

    ambient = 0.001

    light = Light(
        n0.name, 
        qq, 
        n0.scale, 
        n0.translation,
        l1['type'],
        l1['color'],
        l1['intensity'] * attenuation.get(l1['type'], 1.),
        ambient,
        l1.get('spot', {}).get('outerConeAngle', math.pi / 4),
        l1.get('spot', {}).get('innerConeAngle', math.pi / 4 * 0.9),
    )
    light._source = n0
    
    return light
Example #2
0
	def load_animations(self, file, correction):
		root = xml.parse(file).getroot()

		anim = root.find('library_animations').find('animation')
		root_ptr = self.get_child_with_attribute(
			root.find('library_visual_scenes').find('visual_scene'),
			'node',
			'id',
			'Armature'
		)

		if root_ptr != None:
			root_joint = root_ptr.find('node').attrib['id']
		else:
			root_joint = root.find('library_visual_scenes').find('visual_scene').find('node').attrib['id']

		times = [float(x) for x in ' '.join(anim.find('source').find('float_array').text.split('\n')).split(' ') if x != '']
		duration = times[len(times) - 1]

		keyframes = []
		for time in times:
			keyframes.append(KeyFrame(time))

		for joint_node in root.find('library_animations').findall('animation'):

			joint_name_id = joint_node.find('channel').attrib['target'].split('/')[0]

			data_id = self.get_child_with_attribute(
				joint_node.find('sampler'),
				'input',
				'semantic',
				'OUTPUT'
			).attrib['source'][1:]

			transforms = [float(x) for x in ' '.join(self.get_child_with_attribute(
				joint_node,
				'source',
				'id',
				data_id
			).find('float_array').text.split('\n')).split(' ') if x != '']

			for i, time in enumerate(times):
				matrix = glm.mat4(transforms[i*16:(i+1)*16])
				matrix = glm.transpose(matrix)

				if joint_name_id == root_joint:
					if correction:
						corr = glm.rotate(glm.mat4(1.0), glm.radians(90), glm.vec3(1, 0, 0))
					else:
						corr = glm.mat4(1.0)
					matrix = corr * matrix

				position = glm.vec3(matrix[3].x, matrix[3].y, matrix[3].z)
				rotation = glm.quat_cast(matrix)
				keyframes[i].pose[joint_name_id] = JointTransform(position, rotation)

		return Animation(duration, keyframes)
Example #3
0
def load_blender_gltf_camera(g0, n0):
    
    nc = g0.model.nodes[n0.children[0]]
    c0 = g0.model.cameras[nc.camera]

    m0 = glm.mat4_cast(glm.quat(xyzw2wxyz(nc.rotation)))
    m1 = glm.mat4_cast(glm.quat(xyzw2wxyz(n0.rotation)))  
    qq = glm.quat_cast(m1 * m0)
    
    camera = Camera(
        n0.name, 
        qq, 
        n0.scale, 
        n0.translation,
        c0.type,
        **vars(c0.perspective or c0.orthographic)
    )
    camera._source = n0
    
    return camera
Example #4
0
def decompose_matrix(
    mat: glm.mat4
) -> Tuple[Optional[glm.vec3], Optional[glm.quat], Optional[glm.vec3]]:
    sx = glm.length(glm.vec3(mat[0]))
    sy = glm.length(glm.vec3(mat[1]))
    sz = glm.length(glm.vec3(mat[2]))
    if glm.determinant(mat) < 0.0:
        sx = -sx

    translation = glm.vec3(mat[3])
    scale = glm.vec3(sx, sy, sz)

    inv_sx = 1.0 / sx
    inv_sy = 1.0 / sy
    inv_sz = 1.0 / sz

    rot_mat = copy.copy(mat)
    rot_mat[0][0] *= inv_sx
    rot_mat[0][1] *= inv_sx
    rot_mat[0][2] *= inv_sx
    rot_mat[1][0] *= inv_sy
    rot_mat[1][1] *= inv_sy
    rot_mat[1][2] *= inv_sy
    rot_mat[2][0] *= inv_sz
    rot_mat[2][1] *= inv_sz
    rot_mat[2][2] *= inv_sz
    rot_mat[3] = glm.vec4(0.0, 0.0, 0.0, 1.0)
    rotation = glm.normalize(glm.quat_cast(rot_mat))

    if translation == glm.vec3():
        translation = None
    if rotation == glm.quat():
        rotation = None
    if scale == glm.vec3():
        scale = None

    return (translation, rotation, scale)
def DIF(data, data_root, filename):
    '''
    convert global feature to DIF feature
    '''
    id_root = 0
    id_leftfoot = 19
    id_right_foot = 16
    if (np.sum(np.abs(data[:, :, 1])) < 0.001):
        num_actor = 1
    else:
        num_actor = 2
    print("num_actor:{}".format(num_actor))
    for actor in range(num_actor):
        for i in range(data.shape[0]):
            quat_root = glm.quat(data_root[i, 0, actor], data_root[i, 1,
                                                                   actor],
                                 data_root[i, 2, actor], data_root[i, 3,
                                                                   actor])
            R_root = glm.mat4_cast(quat_root)
            x_axis = R_root * glm.vec4(1, 0, 0, 1)
            x_axis_new = np.array([x_axis[0], 0, x_axis[2]])
            x_axis_new /= np.linalg.norm(x_axis_new)
            y_axis_new = np.array([0, 1, 0])
            z_axis_new = np.cross(x_axis_new, y_axis_new)

            R = np.eye(4)
            R[0:3, 0] = x_axis_new
            R[0:3, 1] = y_axis_new
            R[0:3, 2] = z_axis_new
            T = np.eye(4)
            T[0, 3] = data[i, 0, actor]
            T[2, 3] = data[i, 2, actor]
            T[1, 3] = 0.5 * (data[i, id_leftfoot * 3 + 1, actor] +
                             data[i, id_right_foot * 3 + 1, actor])

            M = np.linalg.inv(np.dot(T, R))

            rotation_matrix = glm.mat3([
                x_axis_new.tolist(),
                y_axis_new.tolist(),
                z_axis_new.tolist()
            ])
            rotation_matrix = glm.mat4(rotation_matrix)

            for j in range(25):
                #embed()
                pos = np.array([
                    data[i, j * 7, actor], data[i, 7 * j + 1, actor],
                    data[i, 7 * j + 2, actor], 1
                ])
                pos = np.dot(M, pos)

                quat = glm.quat(data[i, 7 * j + 3, actor], data[i, 7 * j + 4,
                                                                actor],
                                data[i, 7 * j + 5, actor], data[i, 7 * j + 6,
                                                                actor])
                rotation_camera = glm.mat4_cast(quat)
                rotation_local = glm.inverse(
                    rotation_matrix) * rotation_camera * rotation_matrix
                quat_local = glm.quat_cast(rotation_local)
                # make sure the W part of quaternion is larger than 0
                if (quat_local[3] < 0):
                    quat_local = -quat_local
                #print(np.linalg.norm(data[i,7*j+3:7*j+7]))
                for k in range(3):
                    data[i, 7 * j + k, actor] = pos[k]
                for k in range(4):
                    data[i, 7 * j + 3 + k, actor] = quat_local[(3 + k) % 4]

        #print("###")
    return data