def quaternion_cleanup(object, prevent_flips=True, prevent_inverts=True): """fixes signs in quaternion fcurves swapping from one frame to another""" for curves in get_all_quaternion_curves(object): start = int(min((curves[i].keyframe_points[0].co.x for i in range(4)))) end = int(max((curves[i].keyframe_points[-1].co.x for i in range(4)))) for curve in curves: for i in range(start, end): curve.keyframe_points.insert(i, curve.evaluate(i)).interpolation = 'LINEAR' zipped = list(zip( curves[0].keyframe_points, curves[1].keyframe_points, curves[2].keyframe_points, curves[3].keyframe_points)) for i in range(1, len(zipped)): if prevent_flips: rot_prev = Quaternion((zipped[i-1][j].co.y for j in range(4))) rot_cur = Quaternion((zipped[i][j].co.y for j in range(4))) diff = rot_prev.rotation_difference(rot_cur) if abs(diff.angle - pi) < 0.5: rot_cur.rotate(Quaternion(diff.axis, pi)) for j in range(4): zipped[i][j].co.y = rot_cur[j] if prevent_inverts: change_amount = 0.0 for j in range(4): change_amount += abs(zipped[i-1][j].co.y - zipped[i][j].co.y) if change_amount > 1.0: for j in range(4): zipped[i][j].co.y *= -1.0
def debug_boundmesh(self, subsets, skeldata): vert_offset = 0 for set, bone in zip(subsets, skeldata): if not set.faces: continue # Negate trans to account for flipped axes trans = Vector(bone.position) trans.negate() # Rotate rot quaternion to account for flipped axes rot = Quaternion(bone.rotation) rot.rotate( Quaternion((0,0,0,1)) ) relative_zero = rot * trans verts = [] faces = [] for vert in set.vertices: # Relative vertex position: Absolute position - bone position # I guess? verts.append( rot * Vector(vert) + relative_zero ) # Create object and mesh mesh = bpy.data.meshes.new(bone.name) object = bpy.data.objects.new(bone.name, mesh) bpy.context.scene.objects.link(object) bpy.context.scene.objects.active = object # Load vertices and faces mesh.from_pydata(verts, [], [])
def transform_rot(keyframes, matrix): for frame in keyframes.keys(): axis, angle = Quaternion(keyframes[frame]).to_axis_angle() axis.rotate(matrix) quat = Quaternion(axis, angle) quat.normalize() keyframes[frame] = tuple(quat)
def resetRoll(self, context: bpy.types.Context): sv3d, rv3d = getViews3D(context) viewPos, rot, viewDir = prepareCameraTransformation(sv3d, rv3d) up = Vector((0, 1, 0)) up.rotate(rot) py = getPitchYaw(viewDir, up) newRot = Quaternion(Vector((1, 0, 0)), py[0]) if py[0] != 0: newRot.rotate(Quaternion(Vector((0, 0, 1)), py[1])) applyCameraTranformation(sv3d, rv3d, viewPos, newRot)
def import_skeleton(self, data): skeldata = BoneData.build_bones(data) # Create armature and object name = 'Armature' bpy.ops.object.add(type='ARMATURE', enter_editmode=True, location=(0, 0, 0)) # Armature object ob = bpy.context.active_object ob.show_in_front = True ob.name = name # Armature amt = ob.data amt.display_type = 'STICK' # Create bones from skeleton data # Sims 2 bones seem to be reversed head to tail for bonedata in skeldata: bone = amt.edit_bones.new(bonedata.name) # Negate trans to account for flipped axes trans = Vector(bonedata.position) trans.negate() # Rotate rot quaternion to account for flipped axes rot = Quaternion(bonedata.rotation) rot.rotate(Quaternion((0, 0, 0, 1))) bone.head = rot @ trans if bonedata.parent != None: parent = amt.edit_bones[bonedata.parent] bone.parent = parent bone.tail = parent.head # Check if the length of a bone is too short for blender bonelen = bone.tail.length - bone.head.length if bonelen > -0.0005 and bonelen < 0.0005: # Blender does not support 0 length bones bone.head += Vector((0, 0, 0.00005)) # Enter custom properties for exporting later # # Translate Vector and Rotation Quaternion bone["translate"] = [ bonedata.position[0], bonedata.position[1], bonedata.position[2] ] bone["rotation"] = [ bonedata.rotation[0], bonedata.rotation[1], bonedata.rotation[2], bonedata.rotation[3] ] # Go back to Object mode bpy.ops.object.mode_set(mode='OBJECT') # Return the Armature object return ob
def rand_rot(v, mean_branch_angle): v_temp = Vector((v.x, v.z, v.y)) cross1 = v.cross(v_temp) cross2 = v.cross(cross1) branch_angle = random.gauss(mean_branch_angle, branch_sigma) q1 = Quaternion(cross1, radians(branch_angle)) q2 = Quaternion(q1) angle = random.uniform(0.0, math.pi) q1.rotate(Quaternion(v, angle)) q2.rotate(Quaternion(v, angle + math.pi)) return q1.to_euler(), q2.to_euler()
def load_camera(dmx_camera_path): camera_data = load(dmx_camera_path) scene = camera_data.root camera_info = scene['camera'] fov = camera_info['fieldOfView'] aperture = camera_info['aperture'] if 'channelsClip' in scene: camera_clip = scene['channelsClip'] else: camera_clip = scene['animationList']['animations'][0] fps = camera_clip['frameRate'] camera = bpy.data.cameras.new(name=camera_info.name) camera_obj = bpy.data.objects.new(camera_info.name, camera) bpy.context.scene.collection.objects.link(camera_obj) camera_obj.location = Vector(camera_info['transform']['position']) camera_obj.rotation_quaternion = Quaternion( camera_info['transform']['orientation']) camera.lens = 0.5 * 36 / math.tan(math.radians(fov) / 2) camera_obj.rotation_mode = 'QUATERNION' for channel in camera_clip['channels']: value_log = channel['log'] value_layer = value_log['layers'][0] if channel.name.endswith('_p'): for time, value in zip(value_layer['times'], value_layer['values']): frame = math.ceil(time * fps) pos = Vector([value[1], -value[0], value[2]]) camera_obj.location = pos camera_obj.keyframe_insert(data_path="location", frame=frame) elif channel.name.endswith('_o'): for time, value in zip(value_layer['times'], value_layer['values']): frame = math.ceil(time * fps) quat = Quaternion([value[1], value[3], value[0], value[2]]) quat.rotate(Euler([-math.pi / 2, 0, -math.pi])) camera_obj.rotation_quaternion = quat camera_obj.keyframe_insert(data_path="rotation_quaternion", frame=frame) elif channel.name.endswith('_fieldOfView') or channel.name.endswith( 'fieldOfView'): for time, value in zip(value_layer['times'], value_layer['values']): frame = math.ceil(time * fps) camera.lens = 0.5 * 36 / math.tan(math.radians(value) / 2) camera.keyframe_insert(data_path="lens", frame=frame) pass
def pan3dView(self, rv3d: bpy.types.RegionView3D, delta: Vector, sensitivity): if sensitivity == 0: return viewPos, _viewDir = getViewPos(rv3d) rot = Quaternion(rv3d.view_rotation) yawRot = Quaternion(Vector((0, 0, 1)), -delta[0] * 0.017453292 * sensitivity) pitchAxis = Vector((1, 0, 0)) pitchAxis.rotate(rot) pitchRot = Quaternion(pitchAxis, delta[1] * 0.017453292 * sensitivity) rot.rotate(pitchRot) rot.rotate(yawRot) rv3d.view_rotation = rot setViewPos(rv3d, viewPos)
def create_armature(hskn: HSKN): model_name = Path(hskn.name).stem armature = bpy.data.armatures.new(f"{model_name}_ARM_DATA") armature_obj = bpy.data.objects.new(f"{model_name}_ARM", armature) armature_obj.show_in_front = True bpy.context.scene.collection.objects.link(armature_obj) armature_obj.select_set(True) bpy.context.view_layer.objects.active = armature_obj bpy.ops.object.mode_set(mode='EDIT') bl_bones = [] for n, bone in enumerate(hskn.bones): parent_id = hskn.bone_parents[n] bl_bone = armature.edit_bones.new(bone.name) bl_bone.tail = (Vector([0, 0, 0.1])) + bl_bone.head bl_bones.append(bl_bone) if n != 0: bl_bone.parent = armature.edit_bones.get( hskn.bones[parent_id].name) bl_bones.clear() bpy.ops.object.mode_set(mode='POSE') for n, bone in enumerate(hskn.bones): bone_pos_rot = hskn.pos_rot_data[n] bl_bone = armature_obj.pose.bones.get(bone.name) x, y, z = bone_pos_rot.pos pos = Vector([x, y, z]) w, x, y, z = bone_pos_rot.rot rot = Quaternion([x, -y, -z, w]) rot.rotate(Euler([math.radians(0), math.radians(180), math.radians(0)])) mat = Matrix.Translation(pos) @ rot.to_matrix().to_4x4() bl_bone.matrix_basis.identity() bl_bone.matrix = bl_bone.parent.matrix @ mat if bl_bone.parent else mat bpy.ops.pose.armature_apply() bpy.ops.object.mode_set(mode='OBJECT') return armature_obj
def operation_rotate(self, q, p): qr = Quaternion(q) qr.rotate(p) return qr
def convert_source_rotation(rot: List[float]): qrot = Quaternion([rot[0], rot[1], -rot[3], rot[2]]) qrot.rotate(Euler([0, -90, 0])) return qrot
def extract(properties, *args, **kargs): dirpath = bpy.path.abspath(properties.dirpath) if not dirpath: raise AttributeError( 'COLMAP Workspace Directory must be provided.\nCameras, images and points3D files must exist.' ) # find the requisite files in either format requisites = ['cameras', 'images', 'points3D'] extensions = ['.bin', '.txt', None] for ext in extensions: if not ext: raise AttributeError( 'COLMAP Workspace Directory must contain:\ncameras, images and points3D files.\nThese files must be in either .BIN or .TXT format.' ) elif len([ f for f in os.listdir(dirpath) for c in requisites if f == f'{c}{ext}' ]) == 3: # found the correct set of files with this extension break # check for a project.ini as it may contain an alternate image path setting try: # https://stackoverflow.com/a/25493615 with open(os.path.join(dirpath, 'project.ini'), 'r') as f: config_string = f'[DEFAULT]\n{f.read()}' config = ConfigParser() config.read_string(config_string) image_path = config['DEFAULT']['image_path'] except: image_path = os.path.join(dirpath, '..', 'images') cameras = {} trackers = {} data = {'trackers': trackers, 'cameras': cameras} # https://colmap.github.io/format.html try: ccameras, images, points3D = read_model(dirpath, ext=ext) except Exception as ex: raise AttributeError( f'Error when reading COLMAP workspace directory:\n{str(ex)}') model = list(ccameras.values())[0] resolution = (model.width, model.height) data.setdefault('resolution', resolution) def shift(co): # COLMAP uses the convention that the upper left image corner has coordinate (0, 0) # and the center of the upper left most pixel has coordinate (0.5, 0.5). # Translate the point to the center of the image. return (co[0] - resolution[0] / 2.0 + 0.5, co[1] - resolution[1] / 2.0 + 0.5) for idx, i in images.items(): camera = ccameras[i.camera_id] f, cx, cy = parse_camera_param_list(camera) filename = i.name.strip() if not os.path.isabs(filename) or not os.path.isfile(filename): filename = os.path.join(image_path, filename) # The coordinates of the projection/camera center are given by -R^t * T, # where R^t is the inverse/transpose of the 3x3 rotation matrix composed # from the quaternion and T is the translation vector. The local camera # coordinate system of an image is defined in a way that the X axis points # to the right, the Y axis to the bottom, and the Z axis to the front as # seen from the image. R = Quaternion(i.qvec).to_matrix() R.transpose() # c = -R^T t T = Vector(i.tvec) c = -1 * R @ T # t = -R * c R.transpose() R.rotate(Euler((pi, 0, 0))) t = -1 * R @ c cameras.setdefault( idx, { 'filename': filename, 'f': f, 'k': (0, 0, 0), 't': tuple(t), 'principal': (cx, cy), 'R': tuple(map(tuple, tuple(R))), 'trackers': { i.point3D_ids[tidx]: shift(i.xys[tidx]) for tidx in range(len(i.xys)) if i.point3D_ids[tidx] >= 0 }, }) for idx, p in points3D.items(): trackers.setdefault(idx, { 'co': tuple(p.xyz), 'rgb': tuple(p.rgb), 'error': p.error, }) return data
def execute(self, context): print("Export", self.properties.filepath) version = Version() model = Model() texture = Textures() material = Materials() geosets = [] bones = [] pivotpoints = model.PivotPoints() meshcount = 0 file = open(self.properties.filepath, 'w') for obj in bpy.context.scene.objects: if obj.type == 'MESH': # depsgraph = context.evaluated_depsgraph_get() # mesh = bpy.data.meshes.new_from_object(obj.evaluated_get(depsgraph), preserve_all_data_layers=True, depsgraph=depsgraph) mesh = obj.data mesh.transform(obj.matrix_world) geoset = model.Geoset() mesh.calc_loop_triangles() armatures = [] for modifier in obj.modifiers: if modifier.type == 'ARMATURE' and modifier.use_vertex_groups: armatures.append(modifier.object) for armature in armatures: loadarmature(armature,bones,pivotpoints) # bone_names = set(b.name for b in armature.object.data.bones) for vertex in mesh.vertices: geoset.Vertices.append([vertex.co[0]*20,vertex.co[1]*20,vertex.co[2]*20]) geoset.Normals.append([vertex.normal[0],vertex.normal[1],vertex.normal[2]]) vgroups = sorted(vertex.groups[:], key=lambda x:x.weight, reverse=True) if len(vgroups): group = list(list(filter(lambda b: b.Name == obj.vertex_groups[vg.group].name, bones))[0].ObjectId for vg in vgroups if vg.weight > 0.25)[:3] else: group = [0] if group not in geoset.Groups: geoset.Groups.append(group) geoset.VertexGroup.append(geoset.Groups.index(group)) for group in geoset.Groups: for g in group: bones[g].GeosetId = meshcount geoset.TVertices = [[0.0,0.0]] * len(geoset.Vertices) for tri in mesh.loop_triangles: geoset.Faces.append((tri.vertices[0],tri.vertices[1],tri.vertices[2])) for i in range(3): geoset.TVertices[mesh.loops[tri.loops[i]].vertex_index] = [mesh.uv_layers.active.data[tri.loops[i]].uv[0],1 - mesh.uv_layers.active.data[tri.loops[i]].uv[1]] geosets.append(geoset) meshcount += 1 version.FormatVersion = 800 model.Name = "test" model.NumGeosets = meshcount model.BlendTime = 150 model.NumBones = len(bones) version.write(file) model.write(file) texture.write(file) material.write(file) for geoset in geosets: geoset.write(file) for bone in bones: if bone.ParentName: bone.Parent = list(filter(lambda b: b.Name == bone.ParentName, bones))[0].ObjectId for k,frame in bone.Translation.items(): bone.Translation[k] = list(armature.matrix_world @ Vector(frame)) for k,frame in bone.Rotation.items(): axis, angle = Quaternion(frame).to_axis_angle() axis.rotate(armature.matrix_world) quat = Quaternion(axis, angle) quat.normalize() bone.Rotation[k] = [quat[1],quat[2],quat[3],quat[0]] bone.write(file) pivotpoints.write(file) return {'FINISHED'}
def extract(properties, *args, **kargs): dirpath = bpy.path.abspath(properties.dirpath) # find the requisite files in either format requisites = ['cameras', 'images', 'points3D'] extensions = ['.bin', '.txt'] files = [ f for f in os.listdir(dirpath) for c in requisites if f.startswith(c) ] ext = set(extensions).intersection( [os.path.splitext(r)[1].lower() for r in files]) if len(files) != 3 or len(ext) != 1: raise Exception( 'COLMAP sparse reconstruction must contain a cameras, images and points3D file in .BIN or .TXT format' ) # check for a project.ini as it may contain an alternate image path setting try: # https://stackoverflow.com/a/25493615 with open(os.path.join(dirpath, 'project.ini'), 'r') as f: config_string = f'[DEFAULT]\n{f.read()}' config = ConfigParser() config.read_string(config_string) image_path = config['DEFAULT']['image_path'] except: image_path = dirpath cameras = {} trackers = {} data = {'trackers': trackers, 'cameras': cameras} # https://colmap.github.io/format.html ccameras, images, points3D = read_model(dirpath, ext=ext.pop()) model = list(ccameras.values())[0] resolution = (model.width, model.height) data.setdefault('resolution', resolution) for idx, i in images.items(): camera = ccameras[i.camera_id] f, cx, cy = parse_camera_param_list(camera) filename = i.name.strip() if not os.path.isabs(filename) or not os.path.isfile(filename): filename = os.path.join(image_path, filename) # The coordinates of the projection/camera center are given by -R^t * T, # where R^t is the inverse/transpose of the 3x3 rotation matrix composed # from the quaternion and T is the translation vector. The local camera # coordinate system of an image is defined in a way that the X axis points # to the right, the Y axis to the bottom, and the Z axis to the front as # seen from the image. R = Quaternion(i.qvec).to_matrix() R.transpose() # c = -R^T t T = Vector(i.tvec) c = -1 * R @ T # t = -R * c R.transpose() R.rotate(Euler((pi, 0, 0))) t = -1 * R @ c cameras.setdefault( idx, { 'filename': filename, 'f': f, 'k': (0, 0, 0), 't': tuple(t), 'principal': (cx, cy), 'R': tuple(map(tuple, tuple(R))), 'trackers': {}, }) for idx, p in points3D.items(): trackers.setdefault(idx, {'co': tuple(p.xyz), 'rgb': tuple(p.rgb)}) # COLMAP uses the convention that the upper left image corner has coordinate (0, 0) # and the center of the upper left most pixel has coordinate (0.5, 0.5). for image_idx, image_id in enumerate(p.image_ids): # COLMAP db format allows more dimensions, but sparse file format may not reperesent it # to be safe, truncated coords to 2 dimensions co = list(images[image_id].xys[p.point2D_idxs[image_idx]])[:2] co[0] = co[0] - resolution[0] / 2.0 + 0.5 co[1] = co[1] - resolution[1] / 2.0 + 0.5 cameras[p.image_ids[0]]['trackers'].setdefault(idx, co) return data