def create_shader_materials(self, m, mesh): for material in m.shaderMaterials: mat = bpy.data.materials.new(m.header.meshName + ".ShaderMaterial") mat.use_nodes = True principled = PrincipledBSDFWrapper(mat, is_readonly=False) for prop in material.properties: if (prop.name == "DiffuseTexture"): tex = load_texture(self, prop.value) if tex != None: principled.base_color_texture.image = tex elif (prop.name == "NormalMap"): tex = load_texture(self, prop.value) if tex != None: principled.normalmap_texture.image = tex elif (prop.name == "BumpScale"): principled.normalmap_strength = prop.value # Color type elif prop.type == 5: mat[prop.name] = rgba_to_vector(prop) else: mat[prop.name] = prop.value mesh.materials.append(mat)
def process_next_chunk(context, file, previous_chunk, importedObjects, IMAGE_SEARCH, KEYFRAME): from bpy_extras.image_utils import load_image contextObName = None contextLamp = None contextCamera = None contextMaterial = None contextWrapper = None contextMatrix = None contextMesh_vertls = None contextMesh_facels = None contextMeshMaterials = [] contextMesh_smooth = None contextMeshUV = None TEXTURE_DICT = {} MATDICT = {} # Localspace variable names, faster. SZ_FLOAT = struct.calcsize('f') SZ_2FLOAT = struct.calcsize('2f') SZ_3FLOAT = struct.calcsize('3f') SZ_4FLOAT = struct.calcsize('4f') SZ_U_SHORT = struct.calcsize('H') SZ_4U_SHORT = struct.calcsize('4H') SZ_4x3MAT = struct.calcsize('ffffffffffff') object_list = [] # for hierarchy object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent pivot_list = [] # pivots with hierarchy handling def putContextMesh(context, myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials, myContextMeshSmooth): bmesh = bpy.data.meshes.new(contextObName) if myContextMesh_facels is None: myContextMesh_facels = [] if myContextMesh_vertls: bmesh.vertices.add(len(myContextMesh_vertls) // 3) bmesh.vertices.foreach_set("co", myContextMesh_vertls) nbr_faces = len(myContextMesh_facels) bmesh.polygons.add(nbr_faces) bmesh.loops.add(nbr_faces * 3) eekadoodle_faces = [] for v1, v2, v3 in myContextMesh_facels: eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3)) bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3)) bmesh.polygons.foreach_set("loop_total", (3, ) * nbr_faces) bmesh.loops.foreach_set("vertex_index", eekadoodle_faces) if bmesh.polygons and contextMeshUV: bmesh.uv_layers.new() uv_faces = bmesh.uv_layers.active.data[:] else: uv_faces = None for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials): if matName is None: bmat = None else: bmat = MATDICT.get(matName) # in rare cases no materials defined. if bmat: img = TEXTURE_DICT.get(bmat.name) else: print(" warning: material %r not defined!" % matName) bmat = MATDICT[matName] = bpy.data.materials.new( matName) img = None bmesh.materials.append(bmat) # can be None if uv_faces and img: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx # TODO: How to restore this? # uv_faces[fidx].image = img else: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx if uv_faces: uvl = bmesh.uv_layers.active.data[:] for fidx, pl in enumerate(bmesh.polygons): face = myContextMesh_facels[fidx] v1, v2, v3 = face # eekadoodle if v3 == 0: v1, v2, v3 = v3, v1, v2 uvl[pl.loop_start].uv = contextMeshUV[v1 * 2:(v1 * 2) + 2] uvl[pl.loop_start + 1].uv = contextMeshUV[v2 * 2:(v2 * 2) + 2] uvl[pl.loop_start + 2].uv = contextMeshUV[v3 * 2:(v3 * 2) + 2] # always a tri bmesh.validate() bmesh.update() ob = bpy.data.objects.new(contextObName, bmesh) object_dictionary[contextObName] = ob context.view_layer.active_layer_collection.collection.objects.link(ob) importedObjects.append(ob) if myContextMesh_smooth: for f, pl in enumerate(bmesh.polygons): smoothface = myContextMesh_smooth[f] if smoothface > 0: bmesh.polygons[f].use_smooth = True if contextMatrix: ob.matrix_local = contextMatrix object_matrix[ob] = contextMatrix.copy() #a spare chunk new_chunk = Chunk() temp_chunk = Chunk() CreateBlenderObject = False CreateLightObject = False CreateCameraObject = False def read_float_color(temp_chunk): temp_data = file.read(SZ_3FLOAT) temp_chunk.bytes_read += SZ_3FLOAT return [float(col) for col in struct.unpack('<3f', temp_data)] def read_float(temp_chunk): temp_data = file.read(SZ_FLOAT) temp_chunk.bytes_read += SZ_FLOAT return struct.unpack('<f', temp_data)[0] def read_short(temp_chunk): temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT return struct.unpack('<H', temp_data)[0] def read_byte_color(temp_chunk): temp_data = file.read(struct.calcsize('3B')) temp_chunk.bytes_read += 3 return [float(col) / 255 for col in struct.unpack('<3B', temp_data)] def read_texture(new_chunk, temp_chunk, name, mapto): uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0 contextWrapper.use_nodes = True tintcolor = None extend = 'wrap' alpha = False pct = 50 contextWrapper.emission_color = contextMaterial.line_color[:3] contextWrapper.base_color = contextMaterial.diffuse_color[:3] contextWrapper.specular = contextMaterial.specular_intensity contextWrapper.roughness = contextMaterial.roughness contextWrapper.metallic = contextMaterial.metallic contextWrapper.alpha = contextMaterial.diffuse_color[3] while (new_chunk.bytes_read < new_chunk.length): read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: pct = read_short(temp_chunk) elif temp_chunk.ID == MAT_MAP_FILEPATH: texture_name, read_str_len = read_string(file) img = TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, recursive=IMAGE_SEARCH) temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif temp_chunk.ID == MAT_MAP_USCALE: uscale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VSCALE: vscale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_UOFFSET: uoffset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VOFFSET: voffset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_TILING: tiling = read_short(temp_chunk) if tiling & 0x1: extend = 'decal' elif tiling & 0x2: extend = 'mirror' elif tiling & 0x8: extend = 'invert' elif tiling & 0x10: extend = 'noWrap' elif tiling & 0x20: alpha = 'sat' elif tiling & 0x40: alpha = 'alpha' elif tiling & 0x80: tint = 'tint' elif tiling & 0x100: tint = 'noAlpha' elif tiling & 0x200: tint = 'RGBtint' elif temp_chunk.ID == MAT_MAP_ANG: angle = read_float(temp_chunk) print("\nwarning: UV angle mapped to z-rotation") elif temp_chunk.ID == MAT_MAP_COL1: tintcolor = read_byte_color(temp_chunk) skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read # add the map to the material in the right channel if img: add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1), (uoffset, voffset, 0), angle, tintcolor, mapto) dirname = os.path.dirname(file.name) #loop through all the data for this chunk (previous chunk) and see what it is while (previous_chunk.bytes_read < previous_chunk.length): read_chunk(file, new_chunk) #is it a Version chunk? if new_chunk.ID == VERSION: #read in the version of the file temp_data = file.read(struct.calcsize('I')) version = struct.unpack('<I', temp_data)[0] new_chunk.bytes_read += 4 # read the 4 bytes for the version number #this loader works with version 3 and below, but may not with 4 and above if version > 3: print( '\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) #is it an object info chunk? elif new_chunk.ID == OBJECTINFO: process_next_chunk(context, file, new_chunk, importedObjects, IMAGE_SEARCH, KEYFRAME) #keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read #is it an object chunk? elif new_chunk.ID == OBJECT: if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials, contextMesh_smooth) contextMesh_vertls = [] contextMesh_facels = [] contextMeshMaterials = [] contextMesh_smooth = None contextMeshUV = None # Reset matrix contextMatrix = None CreateBlenderObject = True contextObName, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len #is it a material chunk? elif new_chunk.ID == MATERIAL: contextMaterial = bpy.data.materials.new('Material') contextWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=False) elif new_chunk.ID == MAT_NAME: material_name, read_str_len = read_string(file) #plus one for the null character that ended the string new_chunk.bytes_read += read_str_len contextMaterial.name = material_name.rstrip( ) # remove trailing whitespace MATDICT[material_name] = contextMaterial elif new_chunk.ID == MAT_AMBIENT: read_chunk(file, temp_chunk) # only available color is emission color if temp_chunk.ID == MAT_FLOAT_COLOR: contextMaterial.line_color[:3] = read_float_color(temp_chunk) elif temp_chunk.ID == MAT_24BIT_COLOR: contextMaterial.line_color[:3] = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_DIFFUSE: read_chunk(file, temp_chunk) if temp_chunk.ID == MAT_FLOAT_COLOR: contextMaterial.diffuse_color[:3] = read_float_color( temp_chunk) elif temp_chunk.ID == MAT_24BIT_COLOR: contextMaterial.diffuse_color[:3] = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SPECULAR: read_chunk(file, temp_chunk) # Specular color is available if temp_chunk.ID == MAT_FLOAT_COLOR: contextMaterial.specular_color = read_float_color(temp_chunk) elif temp_chunk.ID == MAT_24BIT_COLOR: contextMaterial.specular_color = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SHINESS: read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT contextMaterial.roughness = 1 - ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(SZ_FLOAT) temp_chunk.bytes_read += SZ_FLOAT contextMaterial.roughness = 1 - float( struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SHIN2: read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT contextMaterial.specular_intensity = ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(SZ_FLOAT) temp_chunk.bytes_read += SZ_FLOAT contextMaterial.specular_intensity = float( struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SHIN3: read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT contextMaterial.metallic = ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(SZ_FLOAT) temp_chunk.bytes_read += SZ_FLOAT contextMaterial.metallic = float( struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_TRANSPARENCY: read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT contextMaterial.diffuse_color[3] = 1 - ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(SZ_FLOAT) temp_chunk.bytes_read += SZ_FLOAT contextMaterial.diffuse_color[3] = 1 - float( struct.unpack('f', temp_data)[0]) else: print("Cannot read material transparency") new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_TEXTURE_MAP: read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") elif new_chunk.ID == MAT_SPECULAR_MAP: read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY") elif new_chunk.ID == MAT_OPACITY_MAP: contextMaterial.blend_method = 'BLEND' read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA") elif new_chunk.ID == MAT_REFLECTION_MAP: read_texture(new_chunk, temp_chunk, "Reflect", "METALLIC") elif new_chunk.ID == MAT_BUMP_MAP: read_texture(new_chunk, temp_chunk, "Bump", "NORMAL") elif new_chunk.ID == MAT_BUMP_PERCENT: temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT contextWrapper.normalmap_strength = ( float(struct.unpack('<H', temp_data)[0]) / 100) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SHIN_MAP: read_texture(new_chunk, temp_chunk, "Shininess", "ROUGHNESS") elif new_chunk.ID == MAT_SELFI_MAP: read_texture(new_chunk, temp_chunk, "Emit", "EMISSION") elif new_chunk.ID == MAT_TEX2_MAP: read_texture(new_chunk, temp_chunk, "Tex", "TEXTURE") elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support. # no lamp in dict that would be confusing # ...why not? just set CreateBlenderObject to False newLamp = bpy.data.lights.new("Lamp", 'POINT') contextLamp = bpy.data.objects.new(contextObName, newLamp) context.view_layer.active_layer_collection.collection.objects.link( contextLamp) importedObjects.append(contextLamp) temp_data = file.read(SZ_3FLOAT) contextLamp.location = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT contextMatrix = None # Reset matrix CreateBlenderObject = False CreateLightObject = True elif CreateLightObject and new_chunk.ID == MAT_FLOAT_COLOR: # color temp_data = file.read(SZ_3FLOAT) contextLamp.data.color = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_MULTIPLIER: # intensity temp_data = file.read(SZ_FLOAT) contextLamp.data.energy = float(struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += SZ_FLOAT elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_SPOT: # spotlight temp_data = file.read(SZ_3FLOAT) contextLamp.data.type = 'SPOT' spot = mathutils.Vector(struct.unpack('<3f', temp_data)) aim = contextLamp.location + spot hypo = math.copysign(math.sqrt(pow(aim[1], 2) + pow(aim[0], 2)), aim[1]) track = math.copysign(math.sqrt(pow(hypo, 2) + pow(spot[2], 2)), aim[1]) angle = math.radians(90) - math.copysign(math.acos(hypo / track), aim[2]) contextLamp.rotation_euler[0] = -1 * math.copysign(angle, aim[1]) contextLamp.rotation_euler[2] = -1 * (math.radians(90) - math.acos(aim[0] / hypo)) new_chunk.bytes_read += SZ_3FLOAT temp_data = file.read(SZ_FLOAT) # hotspot hotspot = float(struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += SZ_FLOAT temp_data = file.read(SZ_FLOAT) # angle beam_angle = float(struct.unpack('f', temp_data)[0]) contextLamp.data.spot_size = math.radians(beam_angle) contextLamp.data.spot_blend = (1.0 - (hotspot / beam_angle)) * 2 new_chunk.bytes_read += SZ_FLOAT elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_ROLL: # roll temp_data = file.read(SZ_FLOAT) contextLamp.rotation_euler[1] = float( struct.unpack('f', temp_data)[0]) new_chunk.bytes_read += SZ_FLOAT elif contextObName and new_chunk.ID == OBJECT_CAMERA and CreateCameraObject is False: # Basic camera support camera = bpy.data.cameras.new("Camera") contextCamera = bpy.data.objects.new(contextObName, camera) context.view_layer.active_layer_collection.collection.objects.link( contextCamera) imported_objects.append(contextCamera) temp_data = file.read(SZ_3FLOAT) contextCamera.location = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT temp_data = file.read(SZ_3FLOAT) target = mathutils.Vector(struct.unpack('<3f', temp_data)) cam = contextCamera.location + target focus = math.copysign(math.sqrt(pow(cam[1], 2) + pow(cam[0], 2)), cam[1]) new_chunk.bytes_read += SZ_3FLOAT temp_data = file.read(SZ_FLOAT) # triangulating camera angles direction = math.copysign( math.sqrt(pow(focus, 2) + pow(target[2], 2)), cam[1]) pitch = math.radians(90) - math.copysign( math.acos(focus / direction), cam[2]) contextCamera.rotation_euler[0] = -1 * math.copysign(pitch, cam[1]) contextCamera.rotation_euler[1] = float( struct.unpack('f', temp_data)[0]) contextCamera.rotation_euler[2] = -1 * (math.radians(90) - math.acos(cam[0] / focus)) new_chunk.bytes_read += SZ_FLOAT temp_data = file.read(SZ_FLOAT) contextCamera.data.lens = ( float(struct.unpack('f', temp_data)[0]) * 10) new_chunk.bytes_read += SZ_FLOAT contextMatrix = None # Reset matrix CreateBlenderObject = False CreateCameraObject = True elif new_chunk.ID == OBJECT_MESH: pass elif new_chunk.ID == OBJECT_VERTICES: """Worldspace vertex locations""" temp_data = file.read(SZ_U_SHORT) num_verts = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 contextMesh_vertls = struct.unpack( '<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts)) new_chunk.bytes_read += SZ_3FLOAT * num_verts # dummyvert is not used atm! elif new_chunk.ID == OBJECT_FACES: temp_data = file.read(SZ_U_SHORT) num_faces = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 temp_data = file.read(SZ_4U_SHORT * num_faces) new_chunk.bytes_read += SZ_4U_SHORT * num_faces # 4 short ints x 2 bytes each contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data) contextMesh_facels = [ contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4) ] elif new_chunk.ID == OBJECT_MATERIAL: material_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len # remove 1 null character. temp_data = file.read(SZ_U_SHORT) num_faces_using_mat = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * num_faces_using_mat) new_chunk.bytes_read += SZ_U_SHORT * num_faces_using_mat temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data) contextMeshMaterials.append((material_name, temp_data)) #look up the material in all the materials elif new_chunk.ID == OBJECT_SMOOTH: temp_data = file.read(struct.calcsize('I') * num_faces) smoothgroup = struct.unpack('<%dI' % (num_faces), temp_data) new_chunk.bytes_read += struct.calcsize('I') * num_faces contextMesh_smooth = smoothgroup elif new_chunk.ID == OBJECT_UV: temp_data = file.read(SZ_U_SHORT) num_uv = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 temp_data = file.read(SZ_2FLOAT * num_uv) new_chunk.bytes_read += SZ_2FLOAT * num_uv contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data) elif new_chunk.ID == OBJECT_TRANS_MATRIX: # How do we know the matrix size? 54 == 4x4 48 == 4x3 temp_data = file.read(SZ_4x3MAT) data = list(struct.unpack('<ffffffffffff', temp_data)) new_chunk.bytes_read += SZ_4x3MAT contextMatrix = mathutils.Matrix( (data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed() elif (new_chunk.ID == MAT_MAP_FILEPATH): texture_name, read_str_len = read_string(file) if contextMaterial.name not in TEXTURE_DICT: TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH) new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif new_chunk.ID == EDITKEYFRAME: pass elif new_chunk.ID == KFDATA_KFSEG: temp_data = file.read(struct.calcsize('I')) start = struct.unpack('<I', temp_data)[0] new_chunk.bytes_read += 4 context.scene.frame_start = start temp_data = file.read(struct.calcsize('I')) stop = struct.unpack('<I', temp_data)[0] new_chunk.bytes_read += 4 context.scene.frame_end = stop # including these here means their EK_OB_NODE_HEADER are scanned elif new_chunk.ID in { KFDATA_AMBIENT, KFDATA_CAMERA, KFDATA_OBJECT, KFDATA_TARGET, KFDATA_LIGHT, KFDATA_L_TARGET, }: # another object is being processed child = None elif new_chunk.ID == OBJECT_NODE_HDR: object_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += 4 temp_data = file.read(SZ_U_SHORT) hierarchy = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 child = object_dictionary.get(object_name) if child is None and object_name != '$AMBIENT$': child = bpy.data.objects.new(object_name, None) # create an empty object context.view_layer.active_layer_collection.collection.objects.link( child) importedObjects.append(child) object_list.append(child) object_parent.append(hierarchy) pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0))) elif new_chunk.ID == OBJECT_INSTANCE_NAME: object_name, read_str_len = read_string(file) if child.name == '$$$DUMMY': child.name = object_name else: child.name += "." + object_name object_dictionary[object_name] = child new_chunk.bytes_read += read_str_len elif new_chunk.ID == OBJECT_PIVOT: # pivot temp_data = file.read(SZ_3FLOAT) pivot = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG: # translation new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_3FLOAT) loc = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT if nframe == 0: child.location = loc elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and child.type == 'MESH': # rotation new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_4FLOAT) rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data) new_chunk.bytes_read += SZ_4FLOAT if nframe == 0: child.rotation_euler = mathutils.Quaternion( (axis_x, axis_y, axis_z), -rad).to_euler() # why negative? elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and child.type == 'MESH': # scale new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_3FLOAT) sca = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT if nframe == 0: child.scale = sca elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and child.type == 'LIGHT': # color new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_3FLOAT) rgb = struct.unpack('<3f', temp_data) new_chunk.bytes_read += SZ_3FLOAT if nframe == 0: child.data.color = rgb elif new_chunk.ID == FOV_TRACK_TAG and child.type == 'CAMERA': # Field of view new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_FLOAT) fov = struct.unpack('<f', temp_data)[0] new_chunk.bytes_read += SZ_FLOAT if nframe == 0: child.data.angle = math.radians(fov) elif new_chunk.ID == ROLL_TRACK_TAG and child.type == 'CAMERA': # Roll angle new_chunk.bytes_read += SZ_U_SHORT * 5 temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(SZ_U_SHORT) new_chunk.bytes_read += SZ_U_SHORT * 2 for i in range(nkeys): temp_data = file.read(SZ_U_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += SZ_U_SHORT temp_data = file.read(SZ_U_SHORT * 2) new_chunk.bytes_read += SZ_U_SHORT * 2 temp_data = file.read(SZ_FLOAT) roll = struct.unpack('<f', temp_data)[0] new_chunk.bytes_read += SZ_FLOAT if nframe == 0: child.rotation_euler[1] = math.radians(roll) else: buffer_size = new_chunk.length - new_chunk.bytes_read binary_format = "%ic" % buffer_size temp_data = file.read(struct.calcsize(binary_format)) new_chunk.bytes_read += buffer_size #update the previous chunk bytes read previous_chunk.bytes_read += new_chunk.bytes_read # FINISHED LOOP # There will be a number of objects still not added if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials, contextMesh_smooth) # Assign parents to objects # check _if_ we need to assign first because doing so recalcs the depsgraph for ind, ob in enumerate(object_list): parent = object_parent[ind] if parent == ROOT_OBJECT: if ob.parent is not None: ob.parent = None else: if ob.parent != object_list[parent]: if ob == object_list[parent]: print(' warning: Cannot assign self to parent ', ob) else: ob.parent = object_list[parent] # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining? # fix pivots for ind, ob in enumerate(object_list): if ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get( ob, mathutils.Matrix()) # unlikely to fail pivot_matrix = mathutils.Matrix.Translation(-1 * pivot) #pivot_matrix = mathutils.Matrix.Translation(pivot_matrix.to_3x3() @ -pivot) ob.data.transform(pivot_matrix)