def __init__(self, pth):
        self.name = os.path.basename(pth)
        stream = bStream(path=pth)
        stream.seek(1)
        name = stream.readString(len=11)
        self.offsets = [stream.readUInt32() for offset in range(21)]

        stream.seek(self.offsets[2])

        v_count = int((self.offsets[3] - self.offsets[2]) / 6)  #0
        tc_count = int((self.offsets[10] - self.offsets[6]) / 8)  #/8
        shader_count = int((self.offsets[11] - self.offsets[10]) / 0x28)

        self.verts = []
        for x in range(v_count):
            t = [stream.readInt16(), stream.readInt16(), stream.readInt16()]
            self.verts.append([t[0], -t[2], t[1]])

        self.materials = [
            self.readShader(stream, x) for x in range(shader_count)
        ]
        stream.seek(self.offsets[6])
        self.texcoords = [[stream.readFloat(),
                           stream.readFloat()] for x in range(tc_count)]

        self.readGraphObjects(stream, 0, None, name)
Example #2
0
def rgb5A3_from_blender(image):
    img_data = [[
        image.pixels[(y * image.size[0] + x) *
                     4:((y * image.size[0] + x) * 4) + 4]
        for x in range(image.size[0])
    ] for y in range(image.size[1])]
    img_out = bStream()

    for ty in range(0, image.size[1], 4):
        for tx in range(0, image.size[0], 4):
            for by in range(4):
                for bx in range(4):
                    pixel = img_data[(image.size[1] - 1) - (ty + by)][(tx +
                                                                       bx)]
                    pixel = [int(p * 255) for p in pixel]

                    if (pixel[3] == 255):  # use rgb555 mode
                        img_out.writeUInt16(0x8000 | ((pixel[0] & 0xF8) << 7)
                                            | ((pixel[1] & 0xF8) << 2)
                                            | ((pixel[2] & 0xF8) >> 3))
                    else:
                        img_out.writeUInt16(((pixel[3] & 0xE0) << 8)
                                            | ((pixel[0] & 0xF0) << 4)
                                            | (pixel[1] & 0xF0)
                                            | (pixel[2] >> 4))

    img_out.seek(0)
    return (0x05, image.size[0], image.size[1], img_out.fhandle.read())
def import_model(pth):
    stream = bStream(path=pth)
    stream.endian = '<'

    cmb_chunk = read_cmb_chunk(stream)
    stream.seek(cmb_chunk['skl_chunk_offset'])

    skl_chunk = read_skl_chunk(stream)

    stream.seek(cmb_chunk['vatr_chunk_offset'])
    vatr_chunk = read_vatr_chunk(stream, cmb_chunk['vatr_chunk_offset'])

    amt = bpy.data.armatures.new(f"{cmb_chunk['name']}_skl")
    cmb_object = bpy.data.objects.new(cmb_chunk['name'], amt)

    scene = bpy.context.scene
    bpy.context.scene.collection.objects.link(cmb_object)
    bpy.context.view_layer.objects.active = cmb_object

    ## Mesh Set up
    mesh = bpy.data.meshes.new(f"{cmb_chunk['name']}_mesh_data")
    mesh.from_pydata(vatr_chunk['positions'], [], [])
    mesh.update()

    mesh_obj = bpy.data.objects.new(f"{cmb_chunk['name']}_mesh", mesh)
    mesh_obj.parent = cmb_object
    bpy.context.scene.collection.objects.link(mesh_obj)

    #Resume Adding Skeleton
    bpy.ops.object.mode_set(mode='EDIT')

    for bone in skl_chunk['bones']:
        blender_bone = amt.edit_bones.new(f"bone_{bone['id']}")
        parent = amt.edit_bones.get(f"bone_{bone['parent']}")
        print(
            f"Added bone 'bone_{bone['id']}', parent id is {bone['parent']}, found {parent.name if parent is not None else '(None)'} in armature"
        )

        blender_bone.parent = parent if parent is not None else None
        offset_from = (parent.head if parent is not None else [0, 0, 0])
        blender_bone.head = [
            bone['translation'][0] + offset_from[0],
            (-bone['translation'][2]) + offset_from[1],
            bone['translation'][1] + offset_from[2]
        ]
        blender_bone.tail = [
            bone['translation'][0] + offset_from[0],
            (-bone['translation'][2]) + offset_from[1],
            bone['translation'][1] + offset_from[2]
        ]
        blender_bone.tail[2] += 1

    bpy.ops.object.mode_set(mode='OBJECT')
Example #4
0
    def write(self, stream):
        batch_headers = bStream()
        primitive_buffer = bStream()

        batch_headers.pad((0x18 * len(self.batches)))
        primitives_start = batch_headers.tell()
        batch_headers.seek(0)

        for batch in self.batches:
            list_start = primitive_buffer.tell()
            primitive_buffer.write(batch.primitives.fhandle.read())
            list_end = primitive_buffer.tell()
            batch.writeHeader(batch_headers,
                              math.ceil((list_end - list_start) / 32),
                              list_start + primitives_start)

        batch_headers.seek(0)
        primitive_buffer.seek(0)
        stream.write(batch_headers.fhandle.read())
        stream.write(primitive_buffer.fhandle.read())
        batch_headers.close()
        primitive_buffer.close()
Example #5
0
def ConvertTexture(tex_path):
    img = Image.open(tex_path)
    img_data = img.load()

    img_out = bStream()

    for ty in range(0, img.height, 8):
        for tx in range(0, img.width, 8):
            for by in range(0, 8, 4):
                for bx in range(0, 8, 4):
                    img_out.write(CompressBlock(img, img_data, tx, ty, bx, by))

    img_out.seek(0)
    return (img.width, img.height, img_out.read())
def save_anim(pth):
    stream = bStream(path=pth)
    if(bpy.context.scene.camera is not None):
        cam = bpy.context.scene.camera
        if(not(cam.parent.type == 'EMPTY') or not (len(cam.parent.children) == 2)):

            return False
        

        stream.writeUInt16(int(bpy.context.scene.frame_end))
        stream.writeUInt16(0)
        groups_definitoins = stream.fhandle.tell()
        stream.pad(60)
        stream.writeFloat(1.18)
        
        cam_curves = cam.animation_data.action.fcurves
        target_curves = cam.parent.children[1].animation_data.action.fcurves
        ocam_curves = bpy.data.cameras[cam.name.split('.')[0]].animation_data.action.fcurves

        XGroup = CMNWriteGroupData(stream, cam_curves[0])
        YGroup = CMNWriteGroupData(stream, cam_curves[2])
        ZGroup = CMNWriteGroupData(stream, cam_curves[1], invert=True)

        TXGroup = CMNWriteGroupData(stream, target_curves[0])
        TYGroup = CMNWriteGroupData(stream, target_curves[2])
        TZGroup = CMNWriteGroupData(stream, target_curves[1], invert=True)
        
        # These groups are written manually due to not being anmiatible in blender or unknown 
        UnkGroup = CMNWriteGroupData(stream, None, dummy=0.0)
        FOVGroup = CMNWriteGroupData(stream, ocam_curves[0])

        ZNearGroup = CMNWriteGroupData(stream, ocam_curves[1])
        ZFarGroup = CMNWriteGroupData(stream, ocam_curves[2])

        stream.seek(groups_definitoins)

        CMNWriteGroup(stream, XGroup)
        CMNWriteGroup(stream, YGroup)
        CMNWriteGroup(stream, ZGroup)

        CMNWriteGroup(stream, TXGroup)
        CMNWriteGroup(stream, TYGroup)
        CMNWriteGroup(stream, TZGroup)

        CMNWriteGroup(stream, UnkGroup)
        CMNWriteGroup(stream, FOVGroup)
        CMNWriteGroup(stream, ZNearGroup)
        CMNWriteGroup(stream, ZFarGroup)

        return True
Example #7
0
    def __init__(self, shape, start, nbt):
        # Model should be triangulated !
        if (len(shape.mesh.indices) % 3 != 0):
            raise ValueError("Model not triangles or trianglestrips!")

        self.face_count = math.ceil(len(shape.mesh.indices) / 3)
        self.attributes = (0 | 1 << 9 | 1 << 10 | 1 << 13)
        self.primitives = bStream()
        #SpaceCats: I dont like this, nbt should only be on where its used.
        #TODO: Find a way to only enable nbt on only meshes that use it
        self.useNBT = nbt
        GeneratePrimitives(shape.mesh, self.primitives, self.useNBT)
        self.primitives.padTo32(self.primitives.tell())
        self.primitives.seek(0)
Example #8
0
    def writeTextures(self, stream):
        header_section = bStream()
        data_section = bStream()
        header_size = bStream.padTo32Delta(
            0xC * len(self.textures)) + (0xC * len(self.textures))

        texture_offsets = []
        for texture in self.textures:
            texture_offsets.append(data_section.tell())
            data_section.write(texture[3])

        for x in range(0, len(texture_offsets)):
            header_section.write(
                struct.pack(">HHBBHI", self.textures[x][1],
                            self.textures[x][2], self.textures[x][0], 0, 0,
                            texture_offsets[x] + header_size))

        header_section.padTo32(header_section.tell())
        header_section.seek(0)
        data_section.seek(0)
        stream.write(header_section.fhandle.read())
        stream.write(data_section.fhandle.read())
        header_section.close()
        data_section.close()
Example #9
0
def cmpr_from_blender(image):
    start = time.time()
    img_data = [[
        image.pixels[(y * image.size[0] + x) *
                     4:((y * image.size[0] + x) * 4) + 4]
        for x in range(image.size[0])
    ] for y in range(image.size[1])]
    img_out = bStream()

    #calculate block count to ensure that we dont get any garbage data

    for ty in range(0, image.size[1], 8):
        for tx in range(0, image.size[0], 8):
            for by in range(0, 8, 4):
                for bx in range(0, 8, 4):
                    rgba = [0 for x in range(64)]
                    mask = 0

                    for y in range(4):
                        if (ty + by + y < len(img_data)):
                            for x in range(4):
                                if (tx + bx + x < len(img_data[0])):
                                    index = (y * 4) + x
                                    mask |= (1 << index)
                                    localIndex = 4 * index
                                    pixel = img_data[(image.size[1] - 1) -
                                                     (ty + by + y)][(tx + bx +
                                                                     x)]

                                    if (type(pixel) != int):
                                        rgba[localIndex + 0] = int(pixel[0] *
                                                                   255)
                                        rgba[localIndex + 1] = int(pixel[1] *
                                                                   255)
                                        rgba[localIndex + 2] = int(pixel[2] *
                                                                   255)
                                        rgba[localIndex + 3] = int(
                                            pixel[3] *
                                            255 if len(pixel) == 4 else 0xFF
                                        )  #just in case alpha is not enabled

                    img_out.write(
                        squish.compressMasked(bytes(rgba), mask, squish.DXT1))

    img_out.seek(0)
    end = time.time()
    print(f"{image.name} compressed in {end-start} seconds")
    return (0x0E, image.size[0], image.size[1], img_out.fhandle.read())
Example #10
0
    def __init__(self, mesh, nbt, mesh_data, use_normals, use_positions,
                 use_tristrips):

        self.face_count = len(
            mesh.polygons)  #Isnt used by the game so, not important really
        self.attributes = (0 | 1 << 9 | 1 << 10 | 1 << 13)
        self.primitives = bStream()
        self.useNBT = nbt
        self.use_normals = use_normals
        self.use_positions = use_positions

        if (use_tristrips):
            GenerateTristripPrimitives(mesh, self.primitives, self.useNBT,
                                       mesh_data)
        else:
            GeneratePrimitives(mesh, self.primitives, self.useNBT, mesh_data)

        self.primitives.padTo32(self.primitives.tell())
        self.primitives.seek(0)
Example #11
0
def rgb565_from_blender(image):
    img_data = [[
        image.pixels[(y * image.size[0] + x) *
                     4:((y * image.size[0] + x) * 4) + 4]
        for x in range(image.size[0])
    ] for y in range(image.size[1])]
    img_out = bStream()

    for ty in range(0, image.size[1], 4):
        for tx in range(0, image.size[0], 4):
            for by in range(4):
                for bx in range(4):
                    pixel = img_data[(image.size[1] - 1) - (ty + by)][(tx +
                                                                       bx)]
                    pixel = [int(p * 255) for p in pixel]

                    img_out.writeUInt16(((pixel[0] & 0xF8) << 8)
                                        | ((pixel[1] & 0xFC) << 3)
                                        | ((pixel[2] & 0xF8) >> 3))

    img_out.seek(0)
    return (0x04, image.size[0], image.size[1], img_out.fhandle.read())
def load_model(pth):
    stream = bStream(path=pth)
    stream.seek(0x24)
    vertex_offset = stream.readUInt32()
    normal_offset = stream.readUInt32()
    triangle_data_offset = stream.readUInt32()
    triangle_group_offset = stream.readUInt32()
    gridIndex_data_offset = stream.readUInt32()
    gridIndex_data_offset_dupe = stream.readUInt32()
    unkown_data_offset = stream.readUInt32()

    stream.seek(vertex_offset)  #vertex data always here
    vertices = [[stream.readFloat(),
                 stream.readFloat(),
                 stream.readFloat()]
                for x in range(int((normal_offset - vertex_offset) / 0xC))]

    for v in vertices:
        t = v[1]
        v[1] = -v[2]
        v[2] = t

    #stream.seek(normalOffset)
    #self.normals = [[stream.readFloat(), stream.readFloat(), stream.readFloat()] for x in range((triangleDataOffset - normalOffset) / 0xC)]

    #Read triangle data
    stream.seek(triangle_data_offset)
    triangles = [
        readTriangle(stream) for x in range(
            int((triangle_group_offset - triangle_data_offset) / 0x18))
    ]

    mesh = bpy.data.meshes.new('col.mp')
    mesh.from_pydata(vertices, [], triangles)
    mesh.update()

    col_obj = bpy.data.objects.new('col.mp', mesh)
    bpy.context.scene.collection.objects.link(col_obj)
    def __init__(self, pth, use_tristrips, compat):
        root = bpy.context.selected_objects[0]

        self.meshes_used = []
        self.materials_used = []
        self.get_used_meshes(root)

        self.textures = TextureManager(self.materials_used)
        self.shaders = ShaderManager(self.textures.material_indices,
                                     self.materials_used)
        self.batches = BatchManager(self.meshes_used, use_tristrips)

        print(f"Meshes being used are {self.meshes_used}")

        graph_nodes = []

        self.generate_scenegraph(root, graph_nodes, 0, -1, -1)
        print(graph_nodes)

        offsets = [0 for x in range(21)]
        out = bStream(path=pth)
        out.writeUInt8(0x02)

        if (len(root.name) < 11):
            out.writeString(root.name + (" " * (11 - len(root.name))))
        else:
            out.writeString(root.name[0:11])

        out.writeUInt32List(offsets)

        offsets[0] = out.tell()
        self.textures.writeTextures(out)
        if (compat):  #pad after textures
            out.padTo32(out.tell())

        offsets[1] = out.tell()
        self.textures.writeMaterials(out)
        if (compat):  # pad after materials
            out.padTo32(out.tell())

        offsets[2] = out.tell()
        for vertex in self.batches.mesh_data['vertex']:
            out.writeInt16(int(vertex[0]))
            out.writeInt16(int(vertex[2]))
            out.writeInt16(int(-vertex[1]))

        if (compat):  # pad after vertices
            out.padTo32(out.tell())

        offsets[3] = out.tell()
        for normal in self.batches.mesh_data['normal']:
            out.writeFloat(normal[0])
            out.writeFloat(normal[1])
            out.writeFloat(normal[2])

        if (compat):  #pad after normals
            out.padTo32(out.tell())

        offsets[6] = out.tell()
        for uv in self.batches.mesh_data['uv']:
            out.writeFloat(uv[0])
            out.writeFloat(-uv[1])

        if (compat):  #pad after uvs
            out.padTo32(out.tell())

        offsets[10] = out.tell()
        self.shaders.writeShaders(out)
        if (compat):  #pad after shaders
            out.padTo32(out.tell())

        offsets[11] = out.tell()
        self.batches.write(out)
        if (compat):  #pad after batches
            out.padTo32(out.tell())

        offsets[12] = out.tell()

        out.pad(0x8C * len(graph_nodes))
        for node in graph_nodes:
            node['part_offset'] = out.tell() - offsets[12]
            for part in node['parts']:
                print(
                    f"Writing Node {node['my_index']} Part: {part} at offset {out.tell():x}"
                )
                out.writeInt16(part[1])
                out.writeInt16(part[0])

        if (compat):  #Pad after scenegraph
            out.padTo32(out.tell())

        out.seek(offsets[12])
        for node in graph_nodes:

            out.writeInt16(node['parent_index'])
            out.writeInt16(node['child_index'])
            out.writeInt16(node['next_index'])
            out.writeInt16(node['prev_index'])
            out.writeUInt8(node['render_flags'])
            out.writeUInt8(node['render_flags'])
            out.writeUInt16(0)
            for v in node['scale']:
                out.writeFloat(v)

            for v in node['rotation']:
                out.writeFloat(v)

            for v in node['position']:
                out.writeFloat(v)

            for v in range(6):  #no bb for now, easy addition later
                out.writeFloat(0)

            out.writeFloat(0)  #unk
            out.writeUInt16(node['part_count'])
            out.writeUInt16(0)
            out.writeUInt32(node['part_offset'])
            out.writeUInt32(0)
            out.writeUInt32List([0 for x in range(13)])

        out.seek(0x0C)
        out.writeUInt32List(offsets)
        out.close()
Example #14
0
nbt_data = []

try:
    batch_section = BatchManager(shapes, materials, use_bump)
    if (use_bump):
        nbt_data = BatchManager.CalculateTangentSpace(shapes, materials,
                                                      attrib)
except ValueError as error:
    print(error)

textures = TextureManager(materials)
shaders = ShaderManager(materials, textures)

offsets = [0 for x in range(21)]

model = bStream(path=bin_pth)
model.writeUInt8(0x02)
model.writeString("NewBinModel")
model.writeUInt32List(offsets)

# Write each section independently
position_section = bStream()
normal_section = bStream()
texcoord0_section = bStream()

for vertex in attrib.vertices:
    position_section.writeInt16(int(vertex))

if (use_bump):
    for normal in nbt_data:
        normal_section.writeFloat(normal)
    def __init__(self, pth):
        stream = bStream(path=pth)

        if (stream.readUInt32() != 0x04B40000):
            return

        self.face_count = stream.readUInt16()
        stream.readUInt16()  # 2 byte padding
        self.graph_node_count = stream.readUInt16()
        self.packet_count = stream.readUInt16()
        self.weight_count = stream.readUInt16()
        self.joint_count = stream.readUInt16()
        self.position_count = stream.readUInt16()
        self.normal_count = stream.readUInt16()
        self.color_count = stream.readUInt16()
        self.texcoord_count = stream.readUInt16()
        stream.fhandle.read(8)  # 8 bytes padding
        self.texture_count = stream.readUInt16()
        stream.readUInt16()  # 2 bytes padding
        self.sampler_count = stream.readUInt16()
        self.draw_element_count = stream.readUInt16()
        self.material_count = stream.readUInt16()
        self.shape_count = stream.readUInt16()
        stream.readUInt32()  # 4 bytes padding

        self.graph_node_offset = stream.readUInt32()
        self.packet_offset = stream.readUInt32()
        self.matrix_offset = stream.readUInt32()
        self.weight_offset = stream.readUInt32()
        self.joint_index_offset = stream.readUInt32()
        self.weight_table_offset = stream.readUInt32()
        self.position_offset = stream.readUInt32()
        self.normal_offset = stream.readUInt32()
        self.color_offset = stream.readUInt32()
        self.texcoord_offset = stream.readUInt32()
        stream.fhandle.read(8)  # 8 bytes padding
        self.texture_array_offset = stream.readUInt32()
        stream.readUInt32()  # 2 bytes padding
        self.material_offset = stream.readUInt32()
        self.sampler_offset = stream.readUInt32()
        self.shape_offset = stream.readUInt32()
        self.draw_element_offset = stream.readUInt32()
        stream.fhandle.read(8)  # 8 bytes padding

        stream.seek(self.position_offset)
        self.raw_vertices = [
            stream.readVec3() for pos in range(self.position_count)
        ]

        stream.seek(self.normal_offset)
        self.raw_normals = [
            stream.readVec3() for norm in range(self.normal_count)
        ]

        stream.seek(self.color_offset)
        self.raw_colors = [[
            stream.readUInt8(),
            stream.readUInt8(),
            stream.readUInt8(),
            stream.readUInt8()
        ] for col in range(self.color_count)]

        stream.seek(self.texcoord_offset)
        self.raw_texcoords = [[stream.readFloat(),
                               stream.readFloat()]
                              for coord in range(self.texcoord_count)]

        mesh = bpy.data.meshes.new(
            pth.split('/')[-1].split('.')[0])  # add a new mesh
        mesh.from_pydata(self.vertices, [], [])
        bm = bmesh.new()
        bm.from_mesh(mesh)

        # make the bmesh the object's mesh
        bm.to_mesh(mesh)
        bm.free()  # always do this when finished

        mesh.update()

        mdl_obj = bpy.data.objects.new('mdl_obj', mesh)

        bpy.context.scene.collection.objects.link(mdl_obj)
        bpy.context.view_layer.objects.active = mdl_obj
        bpy.ops.object.mode_set(mode='EDIT')
        #bpy.ops.mesh.delete_loose()
        bpy.ops.object.mode_set(mode='OBJECT')
def load_anim(pth):
    stream = bStream(path=pth)
    cam_anim = bpy.data.objects.new(f"{os.path.basename(pth).split('.')[0]}", None)
    cmn_name = f"{os.path.basename(pth).split('.')[0]}_CAM"
    cmntarget_name = f"{os.path.basename(pth).split('.')[0]}_TGT"
    
    cam_action = bpy.data.actions.new(f"{cmn_name}_ACN")
    target_action = bpy.data.actions.new(f"{cmntarget_name}_ACN")

    cam = bpy.data.cameras.new(cmn_name)
    cam_obj = bpy.data.objects.new(cmn_name, cam)

    cam_target = bpy.data.objects.new(cmntarget_name, None)
    track = cam_obj.constraints.new("TRACK_TO")
    track.target = cam_target
    track.track_axis = 'TRACK_NEGATIVE_Z'
    track.up_axis = 'UP_Y'
    track.use_target_z = True

    cam_obj.parent = cam_anim
    cam_target.parent = cam_anim

    # Start loading anmation

    frame_count = stream.readUInt16()
    print(frame_count)
    stream.readUInt16() #Padding
    frames = {
        'x':[],
        'y':[],
        'z':[],
        'tx':[],
        'ty':[],
        'tz':[],
        'unk':[],
        'fov':[],
        'znear':[],
        'zfar':[]
    }
    
    XGroup = CMNLoadGroup(stream)
    YGroup = CMNLoadGroup(stream)
    ZGroup = CMNLoadGroup(stream)

    TXGroup = CMNLoadGroup(stream)
    TYGroup = CMNLoadGroup(stream)
    TZGroup = CMNLoadGroup(stream)

    UnkGroup = CMNLoadGroup(stream)
    FOVGroup = CMNLoadGroup(stream)
    ZNearGroup = CMNLoadGroup(stream)
    ZFarGroup = CMNLoadGroup(stream)

    #Load Frame Data
    CMNLoadGroupData(stream, XGroup, 'x', frames)
    CMNLoadGroupData(stream, YGroup, 'y', frames)
    CMNLoadGroupData(stream, ZGroup, 'z', frames)

    CMNLoadGroupData(stream, TXGroup, 'tx', frames)
    CMNLoadGroupData(stream, TYGroup, 'ty', frames)
    CMNLoadGroupData(stream, TZGroup, 'tz', frames)

    CMNLoadGroupData(stream, UnkGroup, 'unk', frames)
    CMNLoadGroupData(stream, FOVGroup, 'fov', frames)
    CMNLoadGroupData(stream, ZNearGroup, 'znear', frames)
    CMNLoadGroupData(stream, ZFarGroup, 'zfar', frames)

    #Set Frame Data
    bpy.context.scene.frame_end = frame_count
    cam_obj.animation_data_clear()
    cam_target.animation_data_clear()
    cam_anim_data = cam_obj.animation_data_create()
    cam_target_anim_data = cam_target.animation_data_create()


    GenerateFCurves(cam_action, 'x', 0, frames['x'])
    GenerateFCurves(cam_action, 'y', 1, frames['z'], invert=True)
    GenerateFCurves(cam_action, 'z', 2, frames['y'])

    GenerateFCurves(target_action,'x', 0, frames['tx'])
    GenerateFCurves(target_action,'y', 1, frames['tz'], invert=True)
    GenerateFCurves(target_action,'z', 2, frames['ty'])#clip_start

    #so apparently you cant animate fov in blender, stupid shit, f**k you.
    GenerateKeyframes(cam, "lens", frames['fov'])
    GenerateKeyframes(cam, "clip_start", frames['znear'])
    GenerateKeyframes(cam, "clip_end", frames['zfar'])


    cam_anim_data.action = cam_action
    cam_target_anim_data.action = target_action

    bpy.context.scene.collection.objects.link(cam_anim)
    bpy.context.scene.collection.objects.link(cam_obj)
    bpy.context.scene.collection.objects.link(cam_target)