def normalize_indices_group(indices, data): minimum = indices.min() maximum = indices.max() # for x in indices: # for ele in x: # if ele < minimum: # minimum = ele # if ele > maximum: # maximum = ele ret = np.array(data[minimum:maximum + 1], np.float) return PointCollection(ret, indices - minimum)
def decode_polygon(polygon, influences=None): """ Decodes an mdl0 polygon :returns geometry """ # build the decoder_string decoder if influences is None: influences = polygon.parent.get_influences() pos_matrix_index = polygon.get_weight_index() vertex_index = polygon.get_vertex_index() vertices = polygon.get_vertex_group() normals = polygon.get_normal_group() normal_index = polygon.get_normal_index() colors = polygon.get_color_group() color_index = polygon.get_color0_index() texcoords = [] texcoord_index = polygon.get_uv_index(0) for i in range(polygon.count_uvs()): texcoords.append(polygon.get_uv_group(i)) face_point_indices, weights = decode_indices(polygon, polygon.encode_str) face_point_indices = np.array(face_point_indices, dtype=np.uint) face_point_indices[:, [0, 1]] = face_point_indices[:, [1, 0]] g_verts = PointCollection(vertices.get_decoded(), face_point_indices[:, :, vertex_index]) linked_bone = polygon.get_linked_bone() if pos_matrix_index >= 0: # apply influences to vertices influence_collection = decode_pos_mtx_indices( influences, weights, g_verts, face_point_indices[:, :, pos_matrix_index] // 3) else: influence = influences[linked_bone.weight_id] g_verts.apply_affine_matrix(np.array( linked_bone.get_transform_matrix()), apply=True) influence_collection = InfluenceCollection({0: influence}) from abmatt.converters.geometry import Geometry geometry = Geometry(polygon.name, polygon.get_material().name, g_verts, triangles=None, influences=influence_collection, linked_bone=linked_bone) # create the point collections if normals: geometry.normals = PointCollection( normals.get_decoded(), face_point_indices[:, :, normal_index]) if colors: geometry.colors = ColorCollection( colors.get_decoded(), face_point_indices[:, :, color_index]) for tex in texcoords: pc = PointCollection(tex.get_decoded(), face_point_indices[:, :, texcoord_index], tex.minimum, tex.maximum) geometry.texcoords.append(pc) texcoord_index += 1 return geometry
def __decode_input(self, input, triangles): decoded_type = input.attrib['semantic'] source = self.get_referenced_element(input, 'source') while source.tag != 'source': source = self.get_referenced_element(first(source, 'input'), 'source') accessor = first(first(source, 'technique_common'), 'accessor') stride = accessor.attrib['stride'] float_array = self.get_referenced_element(accessor, 'source') points = np.array([float(x) for x in float_array.text.split()]) if stride: points = points.reshape((-1, int(stride))) offset = int(input.attrib['offset']) if decoded_type == 'COLOR': return decoded_type, ColorCollection(points, triangles[:, :, offset], normalize=True) elif decoded_type in ('VERTEX', 'NORMAL', 'TEXCOORD'): return decoded_type, PointCollection(points, triangles[:, :, offset]) else: raise ValueError('Unknown collada input {}'.format(decoded_type))
def decode_polygon(polygon, influences): """ Decodes an mdl0 polygon :returns geometry """ # build the decoder_string decoder pos_matrix_index = polygon.get_weight_index() tex_matrix_index = polygon.get_uv_matrix_index(0) vertex_index = polygon.get_vertex_index() vertices = polygon.get_vertex_group() normals = polygon.get_normal_group() normal_index = polygon.get_normal_index() colors = polygon.get_color_group() color_index = polygon.get_color0_index() texcoords = [] texcoord_index = polygon.get_uv_index(0) for i in range(polygon.count_uvs()): texcoords.append(polygon.get_uv_group(i)) face_point_indices, weights = decode_indices(polygon, polygon.encode_str) face_point_indices = np.array(face_point_indices, dtype=np.uint) face_point_indices[:, [0, 1]] = face_point_indices[:, [1, 0]] # decoded_verts = g_verts = PointCollection(decode_geometry_group(vertices), face_point_indices[:, :, vertex_index]) linked_bone = polygon.get_bone() if pos_matrix_index >= 0: # apply influences to vertices influence_collection = decode_pos_mtx_indices( influences, weights, g_verts, face_point_indices[:, :, pos_matrix_index] // 3) else: influence = influences[linked_bone.weight_id] rotation_matrix = get_rotation_matrix( np.array(linked_bone.get_inv_transform_matrix(), dtype=float)) decoded_verts = influence.apply_to_all(g_verts.points, decode=True) if not np.allclose(rotation_matrix, np.identity(3)): for i in range(len(decoded_verts)): decoded_verts[i] = np.dot(rotation_matrix, decoded_verts[i]) influence_collection = InfluenceCollection({0: influence}) if tex_matrix_index > 0: for x in polygon.has_tex_matrix: if x: indices = face_point_indices[:, :, tex_matrix_index] tex_matrix_index += 1 geometry = Geometry(polygon.name, polygon.get_material().name, g_verts, triangles=face_point_indices[:, :, vertex_index:], influences=influence_collection, linked_bone=linked_bone) # create the point collections if normals: geometry.normals = PointCollection( decode_geometry_group(normals), face_point_indices[:, :, normal_index]) if colors: geometry.colors = ColorCollection( ColorCollection.decode_data(colors), face_point_indices[:, :, color_index]) for tex in texcoords: x = decode_geometry_group(tex) pc = PointCollection(x, face_point_indices[:, :, texcoord_index], tex.minimum, tex.maximum) pc.flip_points() geometry.texcoords.append(pc) texcoord_index += 1 return geometry
def decode_geometry(self, xml_geometry, material_name=None): mesh = first(xml_geometry, 'mesh') tri_node = first(mesh, 'triangles') if not material_name: material_name = tri_node.attrib['material'] inputs = [] stride = 0 uniqueOffsets = [] indices = [] for input in tri_node.iter('input'): offset = int(input.attrib['offset']) if offset not in uniqueOffsets: # duplicate uniqueOffsets.append(offset) inputs.append(input) for x in tri_node.iter('p'): indices.extend([int(index) for index in x.text.split()]) vertices = normals = colors = None texcoords = [] data_inputs = [] data_types = [] offsets = [] for input in inputs: offset = int(input.attrib['offset']) source = self.get_referenced_element(input, 'source') if source.tag != 'source': for x in source.iter('input'): source = self.get_referenced_element(x, 'source') data_inputs.append(self.__decode_source(source)) data_types.append(x.attrib['semantic']) offsets.append(offset) stride += 1 else: data_inputs.append(self.__decode_source(source)) data_types.append(input.attrib['semantic']) offsets.append(offset) stride += 1 triangles = np.array(indices, np.uint16).reshape( (-1, 3, len(uniqueOffsets))) count = tri_node.attrib.get('count') if count is not None and int(count) != triangles.shape[0]: raise ValueError( 'Failed to parse {} triangles of unexpected shape, expected {} and got {}' .format(material_name, count, triangles.shape[0])) for i in range(len(data_inputs)): decode_type = data_types[i] face_indices = np.copy(triangles[:, :, offsets[i]]) if decode_type == 'TEXCOORD': texcoords.append(PointCollection(data_inputs[i], face_indices)) elif decode_type == 'POSITION': vertices = PointCollection(data_inputs[i], face_indices) elif decode_type == 'NORMAL': normals = PointCollection(data_inputs[i], face_indices) elif decode_type == 'COLOR': colors = ColorCollection(data_inputs[i], face_indices, normalize=True) else: raise ValueError('Unknown semantic {}'.format(decode_type)) name = xml_geometry.attrib.get('name') if not name: name = get_id(xml_geometry) geometry = Geometry(name, material_name, vertices=vertices, texcoords=texcoords, normals=normals, colors=colors, triangles=None) return geometry