def test_expmap_axis_normalization(self): q = Quaternion((1, 1, 0), 2) e = q.to_exponential_map() self.assertAlmostEqual(e.x, 2 * math.sqrt(0.5), 6) self.assertAlmostEqual(e.y, 2 * math.sqrt(0.5), 6) self.assertAlmostEqual(e.z, 0)
def rotateCam(rift): cont = G.getCurrentController() owner = cont.owner scene = G.getCurrentScene() rift.poll() rotation = Quaternion((rift.rotation[0], rift.rotation[1], rift.rotation[2], rift.rotation[3])) eu = rotation.to_euler() #ativecam fix = Euler((-1.57, 0, 3*1.57), 'XYZ') rot = Euler((-eu.z, eu.y, -eu.x), 'XYZ') #owner #fix = Euler((0, 2*-1.57, -1.57), 'XYZ') #rot = Euler((-eu.x, eu.z, eu.y), 'XYZ') rot.rotate(fix) #cam = scene.active_camera cam = scene.cameras["Camera"] cam.localOrientation = rot
def _create_bones(self, bone, parent): abone = self._armature.edit_bones.new(bone.name) abone.tail = Vector([0, 1, 0]) if parent: abone.parent = parent rot_part = Quaternion(bone.rotation).inverted().to_matrix() pos_part = Vector(bone.position) transform = Matrix.Translation(bone.position) * rot_part.to_4x4() if parent: transform = parent.matrix * transform rot_part = transform.to_3x3() pos_part = transform.to_translation() # abone.transform(transform) abone.transform(rot_part) abone.translate(pos_part) nrm_mtx = transform.to_3x3() nrm_mtx.invert() nrm_mtx.transpose() for vi in range(0, bone.vertex_count): vt_ind = vi + bone.vertex_index self._file.vertices[vt_ind] = list(transform * Vector(self._file.vertices[vt_ind])) self._file.normals[vt_ind] = list(nrm_mtx * Vector(self._file.normals[vt_ind])) for child in bone.children: self._create_bones(child, abone)
def loadTPose(rig, filename): if filename: filepath = os.path.join(os.path.dirname(__file__), filename) filepath = os.path.normpath(filepath) print("Loading %s" % filepath) struct = loadJson(filepath) rig.McpTPoseFile = filename else: return False unit = Matrix() for pb in rig.pose.bones: pb.matrix_basis = unit for name,value in struct: bname = getBoneName(rig, name) try: pb = rig.pose.bones[bname] except KeyError: continue quat = Quaternion(value) pb.matrix_basis = quat.to_matrix().to_4x4() setBoneTPose(pb, quat) rig.McpTPoseLoaded = True rig.McpRestTPose = False return True
def test_to_expmap(self): q = Quaternion((0, 0, 1), math.radians(90)) e = q.to_exponential_map() self.assertAlmostEqual(e.x, 0) self.assertAlmostEqual(e.y, 0) self.assertAlmostEqual(e.z, math.radians(90), 6)
def execute(self, context): scn = context.scene if 'localgrid_menu_items_strings' in scn and \ 'localgrid_menu_items_float' in scn: strings_dict = scn['localgrid_menu_items_strings'] float_dict = scn['localgrid_menu_items_float'] for i in range(len(strings_dict.keys())): strings = strings_dict[str(i)] icon, name = strings.split(',', 1) ls = float_dict[str(i)] orig = Vector([ls[0], ls[1], ls[2]]) quat = Quaternion([ls[3], ls[4], ls[5], ls[6]]) item = scn.local_grid.items.add() item.item_name = name item.icon = icon item.orig = orig item.quat = quat.inverted() if 'localgrid_menu_items_strings' in scn: del(scn['localgrid_menu_items_strings']) if 'localgrid_menu_items_float' in scn: del(scn['localgrid_menu_items_float']) if hasattr(scn, 'localgrid_menu_items_strings'): del(scn.localgrid_menu_items_strings) if hasattr(scn, 'localgrid_menu_items_float'): del(scn.localgrid_menu_items_float) return {'FINISHED'}
def execute(self, context): selected = bpy.context.selected_objects obj = selected[-1] surf = bpy.context.scene.objects['surface'] loc = bpy.context.scene.cursor_location bvh = BVHTree.FromObject(surf, bpy.context.scene) loc = surf.matrix_world.inverted() * loc (loc, normal, index, dist) = bvh.find_nearest(loc) if self.use_smooth: normal = smooth_normal(surf, loc, index) loc = surf.matrix_world * loc bpy.ops.object.duplicate() new_obj = bpy.context.selected_objects[-1] (unused, surf_rot, unused) = surf.matrix_world.decompose() (unused, obj_rot, scale) = obj.matrix_world.decompose() normal = surf_rot * normal vec = obj_rot * Vector((0.0, 0.0, 1.0)) q = vec.rotation_difference(normal) q = Quaternion().slerp(q, self.align_with_normal) mat_scale = Matrix() for i in range(3): mat_scale[i][i] = scale[i] new_obj.matrix_world = (Matrix.Translation(loc) * q.to_matrix().to_4x4() * obj_rot.to_matrix().to_4x4() * mat_scale) bpy.context.scene.objects.active = new_obj return {'FINISHED'}
def loadMhpFile(context, filepath): ob = context.object rig = ob.parent scn = context.scene if rig and rig.type == 'ARMATURE': (pname, ext) = os.path.splitext(filepath) mhppath = pname + ".mhp" fp = open(mhppath, "rU") for line in fp: words = line.split() if len(words) < 5: continue elif words[1] == "quat": q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5]))) mat = q.to_matrix().to_4x4() pb = rig.pose.bones[words[0]] pb.matrix_basis = mat elif words[1] == "gquat": q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5]))) mat = q.to_matrix().to_4x4() maty = mat[1].copy() matz = mat[2].copy() mat[1] = -matz mat[2] = maty pb = rig.pose.bones[words[0]] pb.matrix_basis = pb.bone.matrix_local.inverted() * mat fp.close() print("Mhp file %s loaded" % mhppath)
def calc_pose_mats(iqmodel, iqpose, bone_axis): loc_pose_mat = [None] * len(iqmodel.bones) abs_pose_mat = [None] * len(iqmodel.bones) recalc = False # convert pose to local matrix and compute absolute matrix for n in range(len(iqmodel.bones)): iqbone = iqmodel.bones[n] pose_pos = iqpose[n].translate pose_rot = iqpose[n].rotate pose_scale = iqpose[n].scale local_pos = Vector(pose_pos) local_rot = Quaternion((pose_rot[3], pose_rot[0], pose_rot[1], pose_rot[2])) local_scale = Vector(pose_scale) mat_pos = Matrix.Translation(local_pos) mat_rot = local_rot.to_matrix().to_4x4() mat_scale = Matrix.Scale(local_scale.x, 3).to_4x4() loc_pose_mat[n] = mat_pos * mat_rot * mat_scale if iqbone.parent >= 0: abs_pose_mat[n] = abs_pose_mat[iqbone.parent] * loc_pose_mat[n] else: abs_pose_mat[n] = loc_pose_mat[n] # Remove negative scaling from bones. # Due to numerical instabilities in blender's matrix <-> head/tail/roll math # this isn't always stable when the bones are in the X axis. If the bones # end up rotated 90 degrees from what they should be, that's the reason. for n in range(len(iqmodel.bones)): if abs_pose_mat[n].is_negative: if not hasattr(iqmodel, 'abs_bind_mat'): print("warning: removing negative scale in bone", iqmodel.bones[n].name) abs_pose_mat[n] = abs_pose_mat[n] * Matrix.Scale(-1, 4) recalc = True # flip bone axis (and recompute local matrix if needed) if bone_axis == 'X': axis_flip = Matrix.Rotation(math.radians(-90), 4, 'Z') abs_pose_mat = [m * axis_flip for m in abs_pose_mat] recalc = True if bone_axis == 'Z': axis_flip = Matrix.Rotation(math.radians(-90), 4, 'X') abs_pose_mat = [m * axis_flip for m in abs_pose_mat] recalc = True if recalc: inv_pose_mat = [m.inverted() for m in abs_pose_mat] for n in range(len(iqmodel.bones)): iqbone = iqmodel.bones[n] if iqbone.parent >= 0: loc_pose_mat[n] = inv_pose_mat[iqbone.parent] * abs_pose_mat[n] else: loc_pose_mat[n] = abs_pose_mat[n] return loc_pose_mat, abs_pose_mat
def quats_to_matrix(qx, qy, qz, qw, tx, ty, tz): # pylint: disable=invalid-name """ Converts the quaternions and the translation into a 4-dimensional matrix """ # this is straight up math, nothing to "graps" or "understand". Var names are practical # pylint: disable=invalid-name mat = Quaternion((qx, qy, qz, qw)).to_matrix().to_4x4() mat.translation = Vector((tx, ty, tz)) return mat
def test_from_expmap(self): e = Vector((1, 1, 0)) q = Quaternion(e) axis, angle = q.to_axis_angle() self.assertAlmostEqual(angle, math.sqrt(2), 6) self.assertAlmostEqual(axis.x, math.sqrt(0.5), 6) self.assertAlmostEqual(axis.y, math.sqrt(0.5), 6) self.assertAlmostEqual(axis.z, 0)
def change_to_scs_quaternion_coordinates(rot): """Transposes quaternion rotation from Blender to SCS game engine. :param rot: Blender quaternion (or four floats) :type rot: Quaternion | list | tuple :return: Transposed quaternion rotation :rtype: Quaternion """ quat = Quaternion((rot[0], rot[1], rot[2], rot[3])) return (scs_to_blend_matrix().inverted() * quat.to_matrix().to_4x4() * scs_to_blend_matrix()).to_quaternion()
def angle_between_nor(nor_orig, nor_result): angle = math.acos(nor_orig.dot(nor_result)) axis = nor_orig.cross(nor_result).normalized() q = Quaternion() q.x = axis.x * math.sin(angle / 2) q.y = axis.y * math.sin(angle / 2) q.z = axis.z * math.sin(angle / 2) q.w = math.cos(angle / 2) return q
def set_LRS(self, context, obj, LRS, rotation_mode='QUATERNION'): L, R, S = LRS L_mode, R_mode, S_mode, to_m, persp = self.calc_matrix(context, obj) mL = (L is not None) and (L_mode != 'BASIS') mR = (R is not None) and (R_mode != 'BASIS') mS = (S is not None) and (S_mode != 'BASIS') if mL or mR or mS: in_m = matrix_inverted_safe(to_m) * BlUtil.Object.matrix_world(obj) if not mL: in_L = in_m.to_translation() else: L = Vector(L) # make sure it's a Vector if L_mode in ('CAMERA', 'VIEW'): L = Vector((L.x / persp.x, L.y / persp.y, -L.z)).lerp( Vector((L.x * L.z / persp.x, L.y * L.z / persp.y, -L.z)), persp.z) in_L = L L = None if not mR: in_R = in_m.to_quaternion() if not R: rotation_mode = obj.rotation_mode else: if rotation_mode == 'QUATERNION': in_R = Quaternion(R) elif rotation_mode == 'AXIS_ANGLE': in_R = Quaternion(R[1:], R[0]) else: if (len(R) == 4): R = R[1:] in_R = Euler(R).to_quaternion() R = None if not mS: in_S = in_m.to_scale() else: in_S = Vector(S) S = None x, y, z = in_R.normalized().to_matrix().col in_m = matrix_compose(x*in_S.x, y*in_S.y, z*in_S.z, in_L) BlUtil.Object.matrix_world_set(obj, to_m * in_m) if (not mL) and (not L): L = Vector(obj.location) if (not mR) and (not R): R = BlUtil.Object.rotation_convert(obj.rotation_mode, obj.rotation_quaternion, obj.rotation_axis_angle, obj.rotation_euler, rotation_mode) if (not mS) and (not S): S = Vector(obj.scale) if L: obj.location = Vector(L) if R: BlUtil.Object.rotation_apply(obj, R, rotation_mode) if S: obj.scale = Vector(S)
def compute_pose(self, world_poses, local_rotations, joint_offsets, index): """ Compute a pose for an index. Returns the resulting mat4 """ parent_quat = Quaternion([1, 0, 0, 0]) parent_pose = Matrix() if index > 0: parent_quat = local_rotations[index - 1] parent_pose = world_poses[index - 1] local_pose = (parent_quat.inverted() * local_rotations[index]).to_matrix() local_pose.resize_4x4() return parent_pose * joint_offsets[index] * local_pose
def setTPose(context): rig = context.object scn = context.scene if not rig.McpHasTPose: print(("%s has no defined T-pose" % rig)) quat = Quaternion((1,0,0,0)) mat = quat.to_matrix().to_4x4() for pb in rig.pose.bones: try: qw = pb["McpRestW"] except KeyError: continue pb.matrix_basis = mat print("Set T-pose")
def clearTPose(context): rig = context.object scn = context.scene if not rig.McpHasTPose: print(("%s has no defined T-pose" % rig)) for pb in rig.pose.bones: try: qw = pb["McpRestW"] qx = pb["McpRestX"] qy = pb["McpRestY"] qz = pb["McpRestZ"] except KeyError: continue quat = Quaternion((qw,qx,qy,qz)) pb.matrix_basis = quat.to_matrix().to_4x4() print("Cleared T-pose")
def bake_path_offsets(context, cu_path, ob, action, specials): """ bake path offsets into an action """ channels = get_bone_channels(action) channels = topmost_level(channels, ob, specials) limits = (int(action.frame_range[0]), 2 + int(action.frame_range[1])) values = evaluate_curves(channels, limits) zero_offset = get_path_offset(context, cu_path, ob, 0).copy() for bone, groups in channels.items(): for data_path, curves in groups.items(): data = [(cu.data_path, cu.array_index, cu.group.name) for cu in curves] while curves: cu = curves.pop(-1) action.fcurves.remove(cu) for datum in data: cu = action.fcurves.new(datum[0], datum[1], datum[2]) curves.append(cu) for frame in range(limits[0], limits[1]): context.scene.frame_set(frame) current_offset = ob.matrix_world print(ob.name, current_offset.to_translation() , zero_offset.to_translation()) for bone, groups in channels.items(): for transforms in 'location', 'rotation_quaternion': if 'location' in groups: old_loc = values[bone]['location'][frame - limits[0]] else: old_loc = Vector((0,0,0)) if 'rotation_quaternion' in groups: old_rot = Quaternion(values[bone]['rotation_quaternion'][frame - limits[0]]) else: old_rot = Quaternion((1, 0, 0, 0)) old_trans = Matrix.Translation(old_loc).to_4x4() * old_rot.to_matrix().to_4x4() rest_mat = ob.data.bones[bone].matrix_local old_trans_world = current_offset * rest_mat * old_trans new_trans =\ rest_mat.inverted() * zero_offset.inverted() * old_trans_world new_loc, new_rot, sca = new_trans.decompose() for group, curves in groups.items(): for array_index, curve in enumerate(curves): if curve.data_path.endswith('location'): insert_keyframe_curve( curve, frame, new_loc[array_index], 'LINEAR') else: insert_keyframe_curve( curve, frame, new_rot[array_index], 'LINEAR')
def createTPose(context): rig = context.object scn = context.scene if rig.McpHasTPose: setTPose(context) return filepath = os.path.join(os.path.dirname(__file__), "t_pose.json") struct = loadJson(filepath) for name,value in struct: pb = rig.pose.bones[name] quat = Quaternion(value) pb.matrix_basis = quat.to_matrix().to_4x4() rest = quat.inverted() pb["McpRestW"] = rest.w pb["McpRestX"] = rest.x pb["McpRestY"] = rest.y pb["McpRestZ"] = rest.z children = [] for ob in scn.objects: if ob.type != 'MESH': continue for mod in ob.modifiers: if (mod.type == 'ARMATURE' and mod.object == rig): children.append((ob, mod.name)) scn.objects.active = ob bpy.ops.object.modifier_apply(apply_as='SHAPE', modifier=mod.name) ob.data.shape_keys.key_blocks[mod.name].value = 1 scn.objects.active = rig bpy.ops.pose.armature_apply() for ob,name in children: scn.objects.active = ob mod = ob.modifiers.new(name, 'ARMATURE') mod.object = rig mod.use_vertex_groups = True bpy.ops.object.modifier_move_up(modifier=name) setShapeKey(ob, name, 1.0) scn.objects.active = rig rig.McpHasTPose = True print("Created T-pose")
def applied_influence(matrix, group_matrix=None): q = matrix.to_3x3().to_quaternion() loc = matrix.to_translation() if self.mode == 'A_TO_B': q0 = Quaternion([1, 0, 0, 0]) loc0 = Vector() else: q0 = group_matrix.to_quaternion() if self.transform_mode == 'TRANSLATE': q = q0.copy() if self.transform_mode in {'ALL', 'TRANSLATE'}: loc0 = group_matrix.to_translation() else: loc0 = Vector() q = q * self.influence + q0 * (1.0 - self.influence) loc = loc * self.influence + loc0 * (1.0 - self.influence) mat = q.to_matrix().to_4x4() mat.col[3][:3] = loc return mat
def getmatrix(self): # Rotating / panning / zooming 3D view is handled here. # Get matrix. if self.selobj.rotation_mode == 'AXIS_ANGLE': # when roataion mode is axisangle angle, x, y, z = self.selobj.rotation_axis_angle self.matrix = Matrix.Rotation(-angle, 4, Vector((x, y, z))) elif self.selobj.rotation_mode == 'QUATERNION': # when rotation on object is quaternion w, x, y, z = self.selobj.rotation_quaternion x = -x y = -y z = -z quat = Quaternion([w, x, y, z]) self.matrix = quat.to_matrix() self.matrix.resize_4x4() else: # when rotation of object is euler ax, ay, az = self.selobj.rotation_euler mat_rotX = Matrix.Rotation(-ax, 4, 'X') mat_rotY = Matrix.Rotation(-ay, 4, 'Y') mat_rotZ = Matrix.Rotation(-az, 4, 'Z') if self.selobj.rotation_mode == 'XYZ': self.matrix = mat_rotX * mat_rotY * mat_rotZ elif self.selobj.rotation_mode == 'XZY': self.matrix = mat_rotX * mat_rotZ * mat_rotY elif self.selobj.rotation_mode == 'YXZ': self.matrix = mat_rotY * mat_rotX * mat_rotZ elif self.selobj.rotation_mode == 'YZX': self.matrix = mat_rotY * mat_rotZ * mat_rotX elif self.selobj.rotation_mode == 'ZXY': self.matrix = mat_rotZ * mat_rotX * mat_rotY elif self.selobj.rotation_mode == 'ZYX': self.matrix = mat_rotZ * mat_rotY * mat_rotX # handle object scaling sx, sy, sz = self.selobj.scale mat_scX = Matrix.Scale(sx, 4, Vector([1, 0, 0])) mat_scY = Matrix.Scale(sy, 4, Vector([0, 1, 0])) mat_scZ = Matrix.Scale(sz, 4, Vector([0, 0, 1])) self.matrix = mat_scX * mat_scY * mat_scZ * self.matrix
def exportParticles(context, emitter, psys, oct_t): """Exports a particle system for the specified emitter""" octane = context.scene.octane_render export_path = bpath.abspath(octane.path) pset = psys.settings infostr = "Exporting PS '%s' (%s) on emitter '%s'" % (psys.name, pset.type, emitter.name) particles = [p for p in psys.particles] if pset.type == 'HAIR' else [p for p in psys.particles if p.alive_state == 'ALIVE'] if pset.render_type == "OBJECT": dupli_ob = pset.dupli_object if dupli_ob is not None and octane.instances_write_dupli: info(infostr + " with %i instances of '%s' objects" % (len(particles), dupli_ob.name)) filepath = "".join([bpath.abspath(octane.path), dupli_ob.name]) info("Writing dupli object to file '%s'" % (filepath + ".obj")) dupli_world = dupli_ob.matrix_world.copy() transl_inv = Matrix.Translation(-dupli_world.translation) dupli_ob.matrix_world = transl_inv * dupli_ob.matrix_world writeDupliObjects(context, [dupli_ob], filepath) dupli_ob.matrix_world = dupli_world # # elif pset.render_type == "GROUP": # duplig = pset.dupli_group # if duplig is not None: # objects = duplig.objects # infostr += " with %i instances from group '%s'" % (len(particles), duplig.name) # info(infostr + " {0}".format([o.name for o in objects])) # # TODO: separate group scatter per object else: warning("Invalid PS visualization type '%s'" % pset.render_type) return if not pset.use_rotation_dupli: warning("'Use object rotation' should be on. Rotations wont conform to Blender veiwport") try: fh = open(export_path + psys.name + ".csv", "w") for p in particles: #if pset.type == 'HAIR' or not p.alive_state == 'DEAD': if (pset.type == "HAIR"): loc = Matrix.Translation(p.hair_keys[0].co) scale = Matrix.Scale(p.size, 4) * Matrix.Scale(pset.hair_length, 4) else: loc = Matrix.Translation(p.location) scale = Matrix.Scale(p.size, 4) rot = Quaternion.to_matrix(p.rotation).to_4x4() t = loc * rot * scale t = emitter.matrix_world * t if pset.type == "HAIR" else t t = oct_t[0] * t * oct_t[1] writeTransform(t, fh) fh.close() except IOError as err: msg = "IOError during file handling '{0}'".format(err) error(msg) raise ExportException(msg)
def extract_current_pose(): """ Convert current object's pose to OpenCV's "rvec" and "tvec". """ ob = bpy.context.object if ob.rotation_mode == "QUATERNION": q = ob.rotation_quaternion elif ob.rotation_mode == "AXIS_ANGLE": q = Quaternion(ob.rotation_axis_angle[1:4], ob.rotation_axis_angle[0]) else: assert ob.rotation_mode in ("XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX") q = ob.rotation_euler.to_quaternion() # Rotate 180 deg around local X because a blender camera has Y and Z axes opposite to OpenCV's q *= Quaternion((1.0, 0.0, 0.0), radians(180.0)) aa = q.to_axis_angle() rvec = [c * -aa[1] for c in aa[0]] tvec = list(q.inverted() * (-ob.location)) return rvec, tvec
def importSkeletonPiece(skel, tree, piece, parentName, depth=0): bone = skel.edit_bones.new(piece.name) track = piece.trackRef.Reference trans = Vector(track.translation()) rot = Quaternion(track.rotation()) mTrans = Matrix.Translation(trans) mRot = rot.to_matrix() mRot.resize_4x4() if parentName: parent = skel.edit_bones[parentName] bone.parent = parent bone.head = parent.tail bone.use_connect = False m = parent.matrix.copy() else: bone.head = (0, 0, 0) m = Matrix() m = m * mTrans m = m * mRot bone.tail = transform(m, bone.head) # recursively import the children bones for childID in piece.children: importSkeletonPiece(skel, tree, tree[childID], piece.name, depth+1)
def draw_arrow(nockx, nocky, headx, heady, headlength=10, \ headangle=math.radians(70), headonly=False): ''' nockx, nocky: 筈 headx, heady: 鏃 headangle: 0 <= headangle <= 180 headlength: nockとhead上での距離 ''' if nockx == headx and nocky == heady or headonly and headlength == 0: return angle = max(min(math.pi / 2, headangle / 2), 0) # 箆との角度 vn = Vector((nockx, nocky)) vh = Vector((headx, heady)) '''if headonly: vh = vh + (vh - vn).normalized() * headlength headx, heady = vh ''' bgl.glBegin(bgl.GL_LINES) # shaft if not headonly: bgl.glVertex2f(nockx, nocky) bgl.glVertex2f(headx, heady) # head if headlength: length = headlength / math.cos(angle) vec = (vn - vh).normalized() * length vec.resize_3d() q = Quaternion((0, 0, 0, -1)) q.angle = angle v = vec * q bgl.glVertex2f(headx, heady) bgl.glVertex2f(headx + v[0], heady + v[1]) q.angle = -angle v = vec * q bgl.glVertex2f(headx, heady) bgl.glVertex2f(headx + v[0], heady + v[1]) bgl.glEnd()
def drawBone2(p1, p2, radiuses, material): length = dist(p1,p2) print('length :',length) v = Vector(diffv(p1, p2)) up = Vector((0,0,1)) if v!=-up: rot = up.rotation_difference(v) else: rot = Quaternion((1,0,0),math.pi) s1 = drawEllipsoid((0,0,-0.5*length),radiuses,material) s2 = drawEllipsoid((0,0,0.5*length),radiuses,material) c1 = drawCylinder(zero,radiuses,length,materials.blue) s1.select = True s2.select = True c1.select = True #bpy.ops.transform.translate(value=(0,0,length/2)) #bpy.ops.object.editmode_toggle() bpy.ops.transform.rotate(value=rot.angle, axis=rot.axis) #bpy.ops.object.editmode_toggle() #bpy.ops.transform.translate(value=Vector((0,0,-0.5*length))*rot.to_matrix()) rot.normalize(); bpy.ops.transform.translate(value=Vector((0,0,0.5*length))*rot.to_matrix()) bpy.ops.transform.translate(value=p1) return (s1,s2,c1)
def getmatrix(self, selobj): # Rotating / panning / zooming 3D view is handled here. # Creates a matrix. if selobj.rotation_mode == 'AXIS_ANGLE': # object rotation_quaternionmode axisangle ang, x, y, z = selobj.rotation_axis_angle matrix = Matrix.Rotation(-ang, 4, Vector((x, y, z))) elif selobj.rotation_mode == 'QUATERNION': # object rotation_quaternionmode euler w, x, y, z = selobj.rotation_quaternion x = -x y = -y z = -z self.quat = Quaternion([w, x, y, z]) matrix = self.quat.to_matrix() matrix.resize_4x4() else: # object rotation_quaternionmode euler ax, ay, az = selobj.rotation_euler mat_rotX = Matrix.Rotation(-ax, 4, 'X') mat_rotY = Matrix.Rotation(-ay, 4, 'Y') mat_rotZ = Matrix.Rotation(-az, 4, 'Z') if selobj.rotation_mode == 'XYZ': matrix = mat_rotX * mat_rotY * mat_rotZ elif selobj.rotation_mode == 'XZY': matrix = mat_rotX * mat_rotZ * mat_rotY elif selobj.rotation_mode == 'YXZ': matrix = mat_rotY * mat_rotX * mat_rotZ elif selobj.rotation_mode == 'YZX': matrix = mat_rotY * mat_rotZ * mat_rotX elif selobj.rotation_mode == 'ZXY': matrix = mat_rotZ * mat_rotX * mat_rotY elif selobj.rotation_mode == 'ZYX': matrix = mat_rotZ * mat_rotY * mat_rotX # handle object scaling sx, sy, sz = selobj.scale mat_scX = Matrix.Scale(sx, 4, Vector([1, 0, 0])) mat_scY = Matrix.Scale(sy, 4, Vector([0, 1, 0])) mat_scZ = Matrix.Scale(sz, 4, Vector([0, 0, 1])) matrix = mat_scX * mat_scY * mat_scZ * matrix return matrix
def _updateMatrix(self): from mathutils import Quaternion, Matrix import json try: self._websocket.send('n') result = json.loads(self._websocket.recv()) self._matrix = Quaternion((result[7], result[4], result[5], result[6])).to_matrix().to_4x4() position = Matrix.Translation((result[1], result[2], result[3])) self._matrix = position * self._matrix self._matrix.invert() except Exception as err: self.logger.log_traceback(err)
def _arc_segment(v_1, v_2): ELorigin = bpy.context.scene.objects['ELorigin'] ELground = bpy.context.scene.objects['ELground'] v = v_2 - v_1 d = v.length ELorigin.location = Vector((0, 0, 0)) ELground.location = Vector((0, 0, -d)) v_L = ELground.location - ELorigin.location q = Quaternion() c = Vector.cross(v_L, v) q.x = c.x q.y = c.y q.z = c.z q.w = sqrt((v_L.length ** 2) * (v.length ** 2)) + \ Vector.dot(v_L, v) q.normalize() euler = q.to_euler() bpy.ops.object.runfslg_operator() laALL = bpy.context.scene.objects['laALL'] laALL.name = 'lARC' laALL.rotation_euler = euler laALL.location = v_1 bpy.context.active_object.select = False laALL.select = True bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) laALL.select = False bpy.context.active_object.select = True return laALL
def create(gltf, scene_idx): """Scene creation.""" if scene_idx is not None: pyscene = gltf.data.scenes[scene_idx] list_nodes = pyscene.nodes # Create a new scene only if not already exists in .blend file # TODO : put in current scene instead ? if pyscene.name not in [scene.name for scene in bpy.data.scenes]: # TODO: There is a bug in 2.8 alpha that break CLEAR_KEEP_TRANSFORM # if we are creating a new scene scene = bpy.context.scene scene.render.engine = "BLENDER_EEVEE" gltf.blender_scene = scene.name else: gltf.blender_scene = pyscene.name # Switch to newly created main scene bpy.context.window.scene = bpy.data.scenes[gltf.blender_scene] else: # No scene in glTF file, create all objects in current scene scene = bpy.context.scene scene.render.engine = "BLENDER_EEVEE" gltf.blender_scene = scene.name list_nodes = BlenderScene.get_root_nodes(gltf) # Create Yup2Zup empty obj_rotation = bpy.data.objects.new("Yup2Zup", None) obj_rotation.rotation_mode = 'QUATERNION' obj_rotation.rotation_quaternion = Quaternion( (sqrt(2) / 2, sqrt(2) / 2, 0.0, 0.0)) bpy.data.scenes[gltf.blender_scene].collection.objects.link( obj_rotation) if list_nodes is not None: for node_idx in list_nodes: BlenderNode.create(gltf, node_idx, None) # None => No parent # Now that all mesh / bones are created, create vertex groups on mesh if gltf.data.skins: for skin_id, skin in enumerate(gltf.data.skins): if hasattr(skin, "node_ids"): BlenderSkin.create_vertex_groups(gltf, skin_id) for skin_id, skin in enumerate(gltf.data.skins): if hasattr(skin, "node_ids"): BlenderSkin.assign_vertex_groups(gltf, skin_id) for skin_id, skin in enumerate(gltf.data.skins): if hasattr(skin, "node_ids"): BlenderSkin.create_armature_modifiers(gltf, skin_id) if gltf.data.animations: gltf.animation_managed = [] for anim_idx, anim in enumerate(gltf.data.animations): gltf.current_animation_names = {} if list_nodes is not None: for node_idx in list_nodes: BlenderAnimation.anim(gltf, anim_idx, node_idx) for an in gltf.current_animation_names.values(): gltf.animation_managed.append(an) # Parent root node to rotation object if list_nodes is not None: exclude_nodes = [] for node_idx in list_nodes: if gltf.data.nodes[node_idx].is_joint: # Do not change parent if root node is already parented (can be the case for skinned mesh) if not bpy.data.objects[gltf.data.nodes[node_idx]. blender_armature_name].parent: bpy.data.objects[ gltf.data.nodes[node_idx]. blender_armature_name].parent = obj_rotation else: exclude_nodes.append(node_idx) else: # Do not change parent if root node is already parented (can be the case for skinned mesh) if not bpy.data.objects[ gltf.data.nodes[node_idx].blender_object].parent: bpy.data.objects[gltf.data.nodes[node_idx]. blender_object].parent = obj_rotation else: exclude_nodes.append(node_idx) if gltf.animation_object is False: for node_idx in list_nodes: if node_idx in exclude_nodes: continue # for root node that are parented by the process # for example skinned meshes for obj_ in bpy.context.scene.objects: obj_.select_set(False) if gltf.data.nodes[node_idx].is_joint: bpy.data.objects[gltf.data.nodes[ node_idx].blender_armature_name].select_set(True) bpy.context.view_layer.objects.active = bpy.data.objects[ gltf.data.nodes[node_idx].blender_armature_name] else: bpy.data.objects[gltf.data.nodes[node_idx]. blender_object].select_set(True) bpy.context.view_layer.objects.active = bpy.data.objects[ gltf.data.nodes[node_idx].blender_object] bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM') # remove object bpy.context.scene.collection.objects.unlink(obj_rotation) bpy.data.objects.remove(obj_rotation)
def calibrate_rotation(_): global calibration, raw calibration = Quaternion([raw.w, raw.x, raw.y, raw.z])
class SelProject(bpy.types.Operator): bl_idname = "mesh.selproject" bl_label = "SelProject" bl_description = "Use object projection as selection tool" bl_options = {'REGISTER', 'UNDO'} def invoke(self, context, event): global started started = True self.area = context.area self.area.header_text_set(text="SelProject : Enter to confirm - ESC to exit") self.init_selproject(context) context.window_manager.modal_handler_add(self) self._handle = bpy.types.SpaceView3D.draw_handler_add(self.redraw, (), 'WINDOW', 'POST_PIXEL') return {'RUNNING_MODAL'} def modal(self, context, event): global started if event.type in {'RET', 'NUMPAD_ENTER'}: self.area.header_text_set() if self.obhide != None: bpy.ops.object.select_all(action = 'DESELECT') self.obF.select = True bpy.context.scene.objects.active = self.obF bpy.ops.object.delete() self.obhide.hide = False bpy.ops.object.select_all(action = 'DESELECT') self.empt.select = True bpy.context.scene.objects.active = self.empt bpy.ops.object.delete() self.obT.select = True bpy.context.scene.objects.active = self.obT started = False for v in self.vsellist: v.select = True for e in self.esellist: e.select = True for f in self.fsellist: f.select = True self.obF.location = self.originobF self.obT.location = self.originobT self.bmT.select_flush(1) self.bmT.to_mesh(self.meT) self.meT.update() self.bmF.free() self.bmT.free() bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') bpy.ops.object.editmode_toggle() return {'FINISHED'} elif event.type == 'ESC': self.area.header_text_set() if self.obhide != None: bpy.ops.object.select_all(action = 'DESELECT') self.obF.select = True bpy.context.scene.objects.active = self.obF bpy.ops.object.delete() self.obhide.hide = False bpy.ops.object.select_all(action = 'DESELECT') self.empt.select = True bpy.context.scene.objects.active = self.empt bpy.ops.object.delete() started = False self.obF.location = self.originobF self.obT.location = self.originobT self.bmF.free() self.bmT.free() for obj in self.oldobjlist: obj.select = True self.scn.objects.active = self.oldobj bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') if self.oldmode == 'EDIT': bpy.ops.object.editmode_toggle() return {'CANCELLED'} elif event.type in {'LEFTMOUSE', 'MIDDLEMOUSE', 'RIGHTMOUSE', 'WHEELDOWNMOUSE', 'WHEELUPMOUSE', 'G', 'S', 'R', 'X', 'Y', 'Z', 'MOUSEMOVE'}: context.region.tag_redraw() return {'PASS_THROUGH'} return {'RUNNING_MODAL'} def getmatrix(self, selobj): # Rotating / panning / zooming 3D view is handled here. # Creates a matrix. if selobj.rotation_mode == 'AXIS_ANGLE': # object rotation_quaternionmode axisangle ang, x, y, z = selobj.rotation_axis_angle matrix = Matrix.Rotation(-ang, 4, Vector((x, y, z))) elif selobj.rotation_mode == 'QUATERNION': # object rotation_quaternionmode euler w, x, y, z = selobj.rotation_quaternion x = -x y = -y z = -z self.quat = Quaternion([w, x, y, z]) matrix = self.quat.to_matrix() matrix.resize_4x4() else: # object rotation_quaternionmode euler ax, ay, az = selobj.rotation_euler mat_rotX = Matrix.Rotation(-ax, 4, 'X') mat_rotY = Matrix.Rotation(-ay, 4, 'Y') mat_rotZ = Matrix.Rotation(-az, 4, 'Z') if selobj.rotation_mode == 'XYZ': matrix = mat_rotX * mat_rotY * mat_rotZ elif selobj.rotation_mode == 'XZY': matrix = mat_rotX * mat_rotZ * mat_rotY elif selobj.rotation_mode == 'YXZ': matrix = mat_rotY * mat_rotX * mat_rotZ elif selobj.rotation_mode == 'YZX': matrix = mat_rotY * mat_rotZ * mat_rotX elif selobj.rotation_mode == 'ZXY': matrix = mat_rotZ * mat_rotX * mat_rotY elif selobj.rotation_mode == 'ZYX': matrix = mat_rotZ * mat_rotY * mat_rotX # handle object scaling sx, sy, sz = selobj.scale mat_scX = Matrix.Scale(sx, 4, Vector([1, 0, 0])) mat_scY = Matrix.Scale(sy, 4, Vector([0, 1, 0])) mat_scZ = Matrix.Scale(sz, 4, Vector([0, 0, 1])) matrix = mat_scX * mat_scY * mat_scZ * matrix return matrix def getscreencoords(self, vector): # calculate screencoords of given Vector vector = vector * self.matrixT vector = vector + self.obT.location svector = bpy_extras.view3d_utils.location_3d_to_region_2d(self.region, self.rv3d, vector) if svector == None: return [0, 0] else: return [svector[0], svector[1]] def checksel(self): self.selverts = [] self.matrixT = self.getmatrix(self.obT) self.matrixF = self.getmatrix(self.obF).inverted() direc1 = (self.obF.location - self.empt.location) * self.matrixF direc2 = (self.obF.location - self.empt.location) * self.matrixT.inverted() direc2.length = 10000 for v in self.bmT.verts: vno1 = v.normal vno1.length = 0.0001 vco1 = v.co + vno1 hit1 = self.obT.ray_cast(vco1, vco1 + direc2) vno2 = -v.normal vno2.length = 0.0001 vco2 = v.co + vno2 hit2 = self.obT.ray_cast(vco2, vco2 + direc2) if hit1[2] == -1 or hit2[2] == -1: vco = ((v.co * self.matrixT + self.obT.location) - self.obF.location) * self.matrixF hit = self.obF.ray_cast(vco, vco + direc1) if hit[2] != -1: v.select = True self.selverts.append(v) def init_selproject(self, context): self.obhide = None # main operation self.scn = context.scene self.region = context.region self.rv3d = context.space_data.region_3d self.oldobjlist = list(self.scn.objects) self.oldobj = context.active_object self.oldmode = self.oldobj.mode mesh = self.oldobj.data if self.scn.UseSel and context.mode == 'EDIT_MESH': self.obhide = context.active_object me = self.obhide.data bmundo = bmesh.new() bmundo.from_mesh(me) objlist = [] for obj in self.scn.objects: objlist.append(obj) bpy.ops.mesh.separate(type = 'SELECTED') for obj in self.scn.objects: if not(obj in objlist): self.obF = obj bmundo.to_mesh(me) bmundo.free() self.obhide.hide = True else: self.obF = bpy.data.objects.get(self.scn.FromObject) if context.mode == 'EDIT_MESH': bpy.ops.object.editmode_toggle() self.obF.select = True self.scn.objects.active = self.obF self.originobF = self.obF.location bpy.ops.object.origin_set(type = 'ORIGIN_GEOMETRY') self.meF = self.obF.to_mesh(self.scn, 1, 'PREVIEW') self.bmF = bmesh.new() self.bmF.from_mesh(self.meF) self.obT = bpy.data.objects.get(self.scn.ToObject) self.obT.select = True self.scn.objects.active = self.obT self.originobT = self.obT.location bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY') self.meT = self.obT.data self.bmT = bmesh.new() self.bmT.from_mesh(self.meT) self.vsellist = [] for v in self.bmT.verts: if v.select: self.vsellist.append(v) self.esellist = [] for e in self.bmT.edges: if e.select: self.esellist.append(e) self.fsellist = [] for f in self.bmT.faces: if f.select: self.fsellist.append(f) bpy.ops.object.add(type='EMPTY', location=(self.obF.location + self.obT.location) / 2) self.empt = context.active_object self.empt.name = "SelProject_dir_empty" self.selverts = [] def redraw(self): if started: self.checksel() glColor3f(1.0, 1.0, 0) for v in self.selverts: glBegin(GL_QUADS) x, y = self.getscreencoords(v.co) glVertex2f(x-2, y-2) glVertex2f(x-2, y+2) glVertex2f(x+2, y+2) glVertex2f(x+2, y-2) glEnd()
def make_transform_matrix(loc, rot): mat_loc = Matrix.Translation(loc) mat_rot = Quaternion(rot).to_matrix().to_4x4() return mat_loc @ mat_rot
def recursive_node_traverse(self, blender_object, blender_bone, parent_uuid, parent_coll_matrix_world, armature_uuid=None, dupli_world_matrix=None): node = VExportNode() node.uuid = str(uuid.uuid4()) node.parent_uuid = parent_uuid node.set_blender_data(blender_object, blender_bone) # add to parent if needed if parent_uuid is not None: self.add_children(parent_uuid, node.uuid) else: self.roots.append(node.uuid) # Set blender type if blender_bone is not None: node.blender_type = VExportNode.BONE self.nodes[armature_uuid].bones[blender_bone.name] = node.uuid node.use_deform = blender_bone.id_data.data.bones[ blender_bone.name].use_deform elif blender_object.type == "ARMATURE": node.blender_type = VExportNode.ARMATURE elif blender_object.type == "CAMERA": node.blender_type = VExportNode.CAMERA elif blender_object.type == "LIGHT": node.blender_type = VExportNode.LIGHT elif blender_object.instance_type == "COLLECTION": node.blender_type = VExportNode.COLLECTION else: node.blender_type = VExportNode.OBJECT # For meshes with armature modifier (parent is armature), keep armature uuid if node.blender_type == VExportNode.OBJECT: modifiers = {m.type: m for m in blender_object.modifiers} if "ARMATURE" in modifiers and modifiers[ "ARMATURE"].object is not None: if parent_uuid is None or not self.nodes[ parent_uuid].blender_type == VExportNode.ARMATURE: # correct workflow is to parent skinned mesh to armature, but ... # all users don't use correct workflow print( "WARNING: Armature must be the parent of skinned mesh") print( "Armature is selected by its name, but may be false in case of instances" ) # Search an armature by name, and use the first found # This will be done after all objects are setup node.armature_needed = modifiers["ARMATURE"].object.name else: node.armature = parent_uuid # For bones, store uuid of armature if blender_bone is not None: node.armature = armature_uuid # for bone/bone parenting, store parent, this will help armature tree management if parent_uuid is not None and self.nodes[ parent_uuid].blender_type == VExportNode.BONE and node.blender_type == VExportNode.BONE: node.parent_bone_uuid = parent_uuid # Objects parented to bone if parent_uuid is not None and self.nodes[ parent_uuid].blender_type == VExportNode.BONE and node.blender_type != VExportNode.BONE: node.parent_bone_uuid = parent_uuid # World Matrix # Store World Matrix for objects if dupli_world_matrix is not None: node.matrix_world = dupli_world_matrix elif node.blender_type in [ VExportNode.OBJECT, VExportNode.COLLECTION, VExportNode.ARMATURE, VExportNode.CAMERA, VExportNode.LIGHT ]: # Matrix World of object is expressed based on collection instance objects are # So real world matrix is collection world_matrix @ "world_matrix" of object node.matrix_world = parent_coll_matrix_world @ blender_object.matrix_world.copy( ) if node.blender_type == VExportNode.CAMERA and self.export_settings[ gltf2_blender_export_keys.CAMERAS]: correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) node.matrix_world @= correction.to_matrix().to_4x4() elif node.blender_type == VExportNode.LIGHT and self.export_settings[ gltf2_blender_export_keys.LIGHTS]: correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) node.matrix_world @= correction.to_matrix().to_4x4() elif node.blender_type == VExportNode.BONE: node.matrix_world = self.nodes[ node.armature].matrix_world @ blender_bone.matrix axis_basis_change = Matrix( ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) node.matrix_world = node.matrix_world @ axis_basis_change # Force empty ? # For duplis, if instancer is not display, we should create an empty if blender_object.is_instancer is True and blender_object.show_instancer_for_render is False: node.force_as_empty = True # Storing this node self.add_node(node) ###### Manage children ###### # standard children if blender_bone is None and blender_object.is_instancer is False: for child_object in blender_object.children: if child_object.parent_bone: # Object parented to bones # Will be manage later continue else: # Classic parenting self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world) # Collections if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection: for dupli_object in blender_object.instance_collection.objects: if dupli_object.parent is not None: continue self.recursive_node_traverse(dupli_object, None, node.uuid, node.matrix_world) # Armature : children are bones with no parent if blender_object.type == "ARMATURE" and blender_bone is None: for b in [ b for b in blender_object.pose.bones if b.parent is None ]: self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, node.uuid) # Bones if blender_object.type == "ARMATURE" and blender_bone is not None: for b in blender_bone.children: self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, armature_uuid) # Object parented to bone if blender_bone is not None: for child_object in [ c for c in blender_object.children if c.parent_bone is not None and c.parent_bone == blender_bone.name ]: self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world) # Duplis if blender_object.is_instancer is True and blender_object.instance_type != 'COLLECTION': depsgraph = bpy.context.evaluated_depsgraph_get() for (dupl, mat) in [(dup.object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and id(dup.parent.original) == id(blender_object)]: self.recursive_node_traverse(dupl, None, node.uuid, parent_coll_matrix_world, dupli_world_matrix=mat)
def __init__(self): self.node_index = 0 self.name = '' self.rotation = Quaternion() self.location = Vector()
def create_object(mu, muobj, parent): obj = None mesh = None if (not mu.create_colliders and (hasattr(muobj, "shared_mesh") and not hasattr(muobj, "renderer"))): return None name = muobj.transform.name xform = None if hasattr(muobj, "bone") else muobj.transform if hasattr(muobj, "shared_mesh") and hasattr(muobj, "renderer"): mesh = create_mesh(mu, muobj.shared_mesh, name) for poly in mesh.polygons: poly.use_smooth = True obj = create_data_object(name, mesh, xform) attach_material(mesh, muobj.renderer, mu) elif hasattr(muobj, "skinned_mesh_renderer"): smr = muobj.skinned_mesh_renderer mesh = create_mesh(mu, smr.mesh, name) for poly in mesh.polygons: poly.use_smooth = True obj = create_data_object(name, mesh, xform) create_vertex_groups(obj, smr.bones, smr.mesh.boneWeights) create_armature_modifier(obj, mu) attach_material(mesh, smr, mu) if not obj: data = None if hasattr(muobj, "light"): data = create_light(mu, muobj.light, name) elif hasattr(muobj, "camera"): data = create_camera(mu, muobj.camera, name) if data: obj = create_data_object(name, data, xform) # Blender points spotlights along local -Z, unity along local +Z # which is Blender's +Y, so rotate 90 degrees around local X to # go from Unity to Blender rot = Quaternion((0.5**0.5, 0.5**0.5, 0, 0)) obj.rotation_quaternion @= rot if hasattr(muobj, "bone"): #FIXME skinned_mesh_renderer double transforms? Not yet sure this is a #problem, but if so will need to not import the whole hierarchy as one #armature. if obj: obj.parent = mu.armature_obj obj.parent_type = 'BONE' obj.parent_bone = muobj.bone obj.matrix_parent_inverse[1][3] = -BONE_LENGTH pbone = mu.armature_obj.pose.bones[muobj.bone] pbone.scale = muobj.transform.localScale else: if not obj: if mu.create_colliders and hasattr(muobj, "collider"): obj = create_collider(mu, muobj) set_transform(obj, xform) else: obj = create_data_object(name, None, xform) if name[:5] == "node_": print(name, name[:5]) obj.empty_display_type = 'SINGLE_ARROW' print(obj.empty_display_type) # Blender's empties use the +Z axis for single-arrow # display, so that is the most natural orientation for # nodes in blender. # However, KSP uses the transform's +Z (Unity) axis which # is Blender's +Y, so rotate -90 degrees around local X to # go from KSP to Blender print(obj.rotation_quaternion) rot = Quaternion((0.5**0.5, -(0.5**0.5), 0, 0)) obj.rotation_quaternion @= rot print(obj.rotation_quaternion) obj.parent = parent if obj: #FIXME will lose properties from any empty objects that have properties #set when using an armature. Maybe create an empty? Put properties on #bones? mu.collection.objects.link(obj) if hasattr(muobj, "tag_and_layer"): obj.muproperties.tag = muobj.tag_and_layer.tag obj.muproperties.layer = muobj.tag_and_layer.layer if mu.create_colliders and hasattr(muobj, "collider"): if obj.data: cobj = create_collider(mu, muobj) set_transform(cobj, None) mu.collection.objects.link(cobj) cobj.parent = obj muobj.bobj = obj for child in muobj.children: create_object(mu, child, obj) if hasattr(muobj, "animation"): for clip in muobj.animation.clips: create_action(mu, muobj.path, clip) return obj
def pitch_down(self, angle): """Pitch the turtle down about the right axis""" self.dir.rotate(Quaternion(self.right, radians(-angle))) self.dir.normalize()
def loadAnimation(self, path): data = self.teaSerializer.read(path) selectedObj = bpy.context.active_object #Walk up object tree to amature walkObj = selectedObj while not walkObj.name.endswith('-amt') and walkObj.parent: walkObj = walkObj.parent if not walkObj.name.endswith('-amt'): self.log.warning("Amature object nof found for: {}".format( obj.name)) return armatureObj = walkObj obj = walkObj.children[0] bones = armatureObj.pose.bones bpy.context.scene.frame_set(1) for frame in data: bpy.context.scene.objects.active = obj if frame.translation: translation = frame.translation obj.location = (translation.x, translation.y, translation.z) obj.keyframe_insert(data_path='location') if frame.rotation: rotation = frame.rotation obj.rotation_quaternion = (rotation.w, rotation.x, rotation.y, rotation.z) obj.keyframe_insert(data_path='rotation_quaternion') #bpy.ops.anim.keyframe_insert(type='LocRotScale', confirm_success=False) bpy.context.scene.objects.active = armatureObj bpy.ops.object.mode_set(mode='POSE') if len(bones) != len(frame.groups): #raise InconsistentStateException("Bones in amature must match animation groups, existing {} new {}".format(len(bones), len(frame['groups']))) log.warning( "Bones in amature must match animation groups, existing {} new {}" .format(len(bones), len(frame.groups))) #break # This seems to be required to handle mismatched data maxBone = min(len(bones), len(frame.groups)) for groupIndex in range(maxBone - 1, -1, -1): # group index = bone index group = frame.groups[groupIndex] bone = bones[groupIndex] location = Vector( (group.translate.x, group.translate.y, group.translate.z)) #self.log.info("moving bone to %s" % str(group.translate)) #bone.location = location rotation = Quaternion((group.Quaternion.w, group.Quaternion.x, group.Quaternion.y, group.Quaternion.z)) #self.log.info("rotating bone to %s" % str(rotation)) bone.rotation_quaternion = rotation scale = Vector((group.zoom.x, group.zoom.y, group.zoom.z)) #self.log.info("scaling bone to %s" % str(group.zoom)) #bone.scale = scale bone.keyframe_insert(data_path="location") bone.keyframe_insert(data_path="rotation_quaternion") bone.keyframe_insert(data_path="scale") #bpy.ops.anim.keyframe_insert(type='LocRotScale', confirm_success=False) # FIXME translation and zoom are both 0,0,0 # TODO keyframes of rotation are glitchy. probably cause im not doing it right. keyframe has to be set via bpy.ops.anim.keyframe_insert but i dont know how to select the bone. bpy.ops.object.mode_set(mode='OBJECT') bpy.context.scene.frame_set(bpy.context.scene.frame_current + frame.duration) #self.log.info("Loaded Frame") bpy.context.scene.frame_end = bpy.context.scene.frame_current
def run(self, objects, set_location_func, set_rotation_func): apply_location_dict = {} apply_rotation_dict = {} receive = True sync = False try: data = self.sock.recv( 1024 ) except: data = None if self.next_sync: apply_location_dict = self.next_location_dict.copy() apply_rotation_dict = self.next_rotation_dict.copy() self.next_location_dict = {} self.next_rotation_dict = {} self.next_sync = False sync = True receive = False trash = data while(receive): data = trash decoded = OSC.decodeOSC(data) ob_name = str(decoded[0], "utf-8") try: if (ob_name.startswith("@")): # Something to play with: # values that begin with a @ are python expressions, # and there is one parameter after the address in the OSC message # if you set something such as # bpy.data.objects"['Cube']".location.x= {V} # into a OSC path for, say, a face shape smile controller # you can move an object by smiling to_evaluate = ob_name[1:] to_evaluate += str(decoded[2]) try: print(exec(to_evaluate)) except Exception as e: print(to_evaluate) print(str(e)) elif (ob_name.startswith("?")): # This one could be used for something such as mapping # "thumbs up" gesture for rendering # Add the following path to a gesture controller OSC path # ?bpy.ops.render.render() to_evaluate = ob_name[1:] try: print(exec(to_evaluate)) except Exception as e: print(to_evaluate) print(str(e)) elif len(decoded) == 3: #one value if ob_name == "NI_mate_sync": if sync: self.next_sync = True else: sync = True apply_location_dict = self.location_dict.copy() apply_rotation_dict = self.rotation_dict.copy() self.location_dict = {} self.rotation_dict = {} self.next_location_dict = {} self.next_rotation_dict = {} else: if sync: self.next_location_dict[ob_name] = Vector([decoded[2], 0, 0]) else: self.location_dict[ob_name] = Vector([decoded[2], 0, 0]) elif len(decoded) == 5: #location if sync: self.next_location_dict[ob_name] = Vector([decoded[2], -decoded[4], decoded[3]]) else: self.location_dict[ob_name] = Vector([decoded[2], -decoded[4], decoded[3]]) elif len(decoded) == 6: #quaternion if sync: self.next_rotation_dict[ob_name] = Quaternion((-decoded[2], decoded[3], -decoded[5], decoded[4])) else: self.rotation_dict[ob_name] = Quaternion((-decoded[2], decoded[3], -decoded[5], decoded[4])) elif len(decoded) == 9: #location & quaternion if sync: self.next_location_dict[ob_name] = Vector([decoded[2], -decoded[4], decoded[3]]) self.next_rotation_dict[ob_name] = Quaternion((-decoded[5], decoded[6], -decoded[8], decoded[7])) else: self.location_dict[ob_name] = Vector([decoded[2], -decoded[4], decoded[3]]) self.rotation_dict[ob_name] = Quaternion((-decoded[5], decoded[6], -decoded[8], decoded[7])) except: print("NI mate Tools error parsing OSC message: " + str(decoded)) pass try: trash = self.sock.recv(1024) except: break if sync: for key, value in apply_location_dict.items(): set_location_func(objects, key, value, self.original_locations) for key, value in apply_rotation_dict.items(): set_rotation_func(objects, key, value, self.original_rotations) self.location_dict = {} self.rotation_dict = {} else: for key, value in self.location_dict.items(): set_location_func(objects, key, value, self.location_dict) for key, value in self.rotation_dict.items(): set_rotation_func(objects, key, value, self.rotation_dict)
def ReadQuaternion(self): return Quaternion( (self.ReadFloat(), self.ReadFloat(), self.ReadFloat(), self.ReadFloat()))
def QuaternionLookRotation(fro, to, up): vector = (fro - to).normalized() vector2 = up.normalized().cross(vector).normalized() vector3 = vector.cross(vector2) m00 = vector2.x m01 = vector2.y m02 = vector2.z m10 = vector3.x m11 = vector3.y m12 = vector3.z m20 = vector.x m21 = vector.y m22 = vector.z num8 = (m00 + m11) + m22 q = Quaternion() if num8 > 0: num = sqrt(num8 + 1) q.w = num * 0.5 num = 0.5 / num q.x = (m12 - m21) * num q.y = (m20 - m02) * num q.z = (m01 - m10) * num elif m00 >= m11 and m00 >= m22: num7 = sqrt(((1 + m00) - m11) - m22) num4 = 0.5 / num7 q.x = 0.5 * num7 q.y = (m01 + m10) * num4 q.z = (m02 + m20) * num4 q.w = (m12 - m21) * num4 elif m11 > m22: num6 = sqrt(((1 + m11) - m00) - m22) num3 = 0.5 / num6 q.x = (m10 + m01) * num3 q.y = 0.5 * num6 q.z = (m21 + m12) * num3 q.w = (m20 - m02) * num3 else: num5 = sqrt(((1 + m22) - m00) - m11) num2 = 0.5 / num5 q.x = (m20 + m02) * num2 q.y = (m21 + m12) * num2 q.z = 0.5 * num5 q.w = (m01 - m10) * num2 return q
def __init__(self): self.location = Vector() self.rotation = Quaternion((1, 0, 0, 0))
def main(): #!!!!!!!!!!!!!!!!!!!!!!!! #Object animation offsetting is implemented with a hack: references to bones are overridden to point to the active object. #!!!!!!!!!!!!!!!!!!!!!!!! #scene properties: # Assign a collection class VectorPropertyItem(bpy.types.PropertyGroup): value = bpy.props.FloatVectorProperty() bpy.utils.register_class(VectorPropertyItem) class QuaternionPropertyItem(bpy.types.PropertyGroup): value = bpy.props.FloatVectorProperty(size=4) bpy.utils.register_class(QuaternionPropertyItem) #offseted transform bpy.types.Scene.OffLocs = \ bpy.props.CollectionProperty(type=VectorPropertyItem) bpy.types.Scene.OffRotsQ = \ bpy.props.CollectionProperty(type=QuaternionPropertyItem) bpy.types.Scene.OffRotsE = \ bpy.props.CollectionProperty(type=VectorPropertyItem) bpy.types.Scene.OffSca = \ bpy.props.CollectionProperty(type=VectorPropertyItem) #transform difference bpy.types.Scene.DiffLocs = \ bpy.props.CollectionProperty(type=VectorPropertyItem) bpy.types.Scene.DiffRotsQ = \ bpy.props.CollectionProperty(type=QuaternionPropertyItem) bpy.types.Scene.DiffRotsE = \ bpy.props.CollectionProperty(type=VectorPropertyItem) bpy.types.Scene.DiffScas = \ bpy.props.CollectionProperty(type=VectorPropertyItem) scene = context.scene blend_falloff = scene.GYAZ_OffsetAnimFalloff s1 = int(scene.GYAZ_OffsetAnimFalloffStrength_1) #s2 = int (scene.GYAZ_OffsetAnimFalloffStrength_2) s2 = 0 blend_falloff_strength = float(str(s1) + '.' + str(s2)) ######################################################################### ######################################################################### obj = bpy.context.active_object scene = bpy.context.scene current_frame = scene.frame_current first_frame = scene.frame_start last_frame = scene.frame_end OffLocs = scene.OffLocs OffRotsQ = scene.OffRotsQ OffRotsE = scene.OffRotsE OffScas = scene.OffSca DiffLocs = scene.DiffLocs DiffRotsQ = scene.DiffRotsQ DiffRotsE = scene.DiffRotsE DiffScas = scene.DiffScas #markers markers = scene.timeline_markers m1 = 0 m2 = 0 m3 = 0 m4 = 0 if self.Mode == 'SIMPLE_LOCAL' or self.Mode == 'LOCAL_2': if len(markers) != 0: markers = [markers[0].frame, markers[1].frame] sorted_markers = sorted(markers) m1 = sorted_markers[0] m2 = sorted_markers[1] if self.Mode == 'LOCAL_4': if len(markers) != 0: markers = [ markers[0].frame, markers[1].frame, markers[2].frame, markers[3].frame ] sorted_markers = sorted(markers) m1 = sorted_markers[0] m2 = sorted_markers[1] m3 = sorted_markers[2] m4 = sorted_markers[3] #get selected bones if obj.type == 'ARMATURE': selected_bones = [] bones = obj.data.bones for bone in bones: if bone.select == True: selected_bones.append(bone.name) #get active action's name action_name = (obj.animation_data.action.name if obj.animation_data is not None and obj.animation_data.action is not None else "") if action_name != None: if bpy.context.mode == 'POSE': pbones = obj.pose.bones #HACK: object animation if bpy.context.mode == 'OBJECT': selected_bones = [obj] pbone = obj for index, bone in enumerate(selected_bones): if bpy.context.mode == 'POSE': pbone = pbones[bone] #offseted transform item = OffLocs.add() item.value = pbone.location if pbone.rotation_mode == 'QUATERNION': item = OffRotsQ.add() item.value = pbone.rotation_quaternion else: item = OffRotsE.add() item.value = pbone.rotation_euler item = OffScas.add() item.value = pbone.scale #delete offset scene.frame_set(current_frame + 1) scene.frame_set(current_frame) #go on for index, bone in enumerate(selected_bones): if bpy.context.mode == 'POSE': pbone = pbones[bone] #HACK: object animation if bpy.context.mode == 'OBJECT': selected_bones = [obj] pbone = obj #initial transform IniLoc = pbone.location if pbone.rotation_mode == 'QUATERNION': IniRotQ = pbone.rotation_quaternion else: IniRotE = pbone.rotation_euler IniSca = pbone.scale #transform difference DiffLoc = Vector((OffLocs[index].value[:])) - IniLoc if pbone.rotation_mode == 'QUATERNION': DiffRotQ = Quaternion( (OffRotsQ[index].value[:])) - IniRotQ else: DiffRotE = Vector( (OffRotsE[index].value[:])) - Vector(IniRotE) DiffSca = Vector((OffScas[index].value[:])) - IniSca #function: offset fcurves def offset_transform_fcurves(attribute, offset_value, m1, m2, m3, m4): if attribute in fc.data_path: keys = fc.keyframe_points for key in keys: def adjust_fcurve_all(offset_value): def adjust_fcurve(i): if fc.array_index == i: key.co.y += offset_value[i] key.handle_left.y += offset_value[ i] key.handle_right.y += offset_value[ i] adjust_fcurve(0) adjust_fcurve(1) adjust_fcurve(2) adjust_fcurve(3) def falloff_selector(): if blend_falloff == 'LINEAR': new_percentage = lerp( interpolation_start, interpolation_end, blend_percentage) elif blend_falloff == 'SMOOTH': new_percentage = smooth_lerp( interpolation_start, interpolation_end, blend_percentage, blend_falloff_strength) elif blend_falloff == 'EASE_IN': new_percentage = ease_in_lerp( interpolation_start, interpolation_end, blend_percentage, blend_falloff_strength) elif blend_falloff == 'EASE_OUT': new_percentage = ease_out_lerp( interpolation_start, interpolation_end, blend_percentage, blend_falloff_strength) return new_percentage if self.Mode == 'GLOBAL': adjust_fcurve_all(offset_value) elif self.Mode == 'SIMPLE_LOCAL': frame = key.co.x if m1 <= frame <= m2: adjust_fcurve_all(offset_value) elif self.Mode == 'LOCAL_2': frame = key.co.x frame_offset = 0 #blend in if m1 != current_frame: frame_offset = 1 blend_start = m1 blend_end = current_frame interpolation_start = 0 interpolation_end = 1 blend_length = blend_end - blend_start if blend_start <= frame <= blend_end: current_blend_frame = frame - blend_start blend_percentage = current_blend_frame / blend_length adjust_fcurve_all( offset_value * falloff_selector()) #blend out if m2 != current_frame: blend_start = current_frame + frame_offset blend_end = m2 interpolation_start = 1 interpolation_end = 0 interpolation_strength = 2 blend_length = blend_end - blend_start if blend_start <= frame <= blend_end: current_blend_frame = frame - blend_start blend_percentage = current_blend_frame / blend_length adjust_fcurve_all( offset_value * falloff_selector()) elif self.Mode == 'LOCAL_4': frame = key.co.x #blend in blend_start = m1 blend_end = m2 interpolation_start = 0 interpolation_end = 1 interpolation_strength = 2 blend_length = blend_end - blend_start if blend_start <= frame <= blend_end: current_blend_frame = frame - blend_start blend_percentage = current_blend_frame / blend_length adjust_fcurve_all(offset_value * falloff_selector()) #keep offset if m2 + 1 <= frame <= m3: adjust_fcurve_all(offset_value) #blend out blend_start = m3 + 1 blend_end = m4 interpolation_start = 1 interpolation_end = 0 interpolation_strength = 2 blend_length = blend_end - blend_start if blend_start <= frame <= blend_end: current_blend_frame = frame - blend_start blend_percentage = current_blend_frame / blend_length new_percentage = smooth_lerp( interpolation_start, interpolation_end, blend_percentage, interpolation_strength) adjust_fcurve_all(offset_value * falloff_selector()) #filter f-curves for bone name action = bpy.data.actions[action_name] fcurves = action.fcurves for fc in fcurves: if bpy.context.mode == 'POSE': if 'pose.bones["' + bone + '"].location' in fc.data_path: offset_transform_fcurves( 'location', DiffLoc, m1, m2, m3, m4) if 'pose.bones["' + bone + '"].scale' in fc.data_path: offset_transform_fcurves( 'scale', DiffSca, m1, m2, m3, m4) if pbone.rotation_mode == 'QUATERNION': if 'pose.bones["' + bone + '"].rotation_quaternion' in fc.data_path: offset_transform_fcurves( 'rotation_quaternion', DiffRotQ, m1, m2, m3, m4) else: if 'pose.bones["' + bone + '"].rotation_euler' in fc.data_path: offset_transform_fcurves( 'rotation_euler', DiffRotE, m1, m2, m3, m4) #OBJECT ANIMATION if bpy.context.mode == 'OBJECT': if fc.data_path.startswith('location'): offset_transform_fcurves( 'location', DiffLoc, m1, m2, m3, m4) if fc.data_path.startswith('scale'): offset_transform_fcurves( 'scale', DiffSca, m1, m2, m3, m4) if pbone.rotation_mode == 'QUATERNION': if fc.data_path.startswith( 'rotation_quaternion'): offset_transform_fcurves( 'rotation_quaternion', DiffRotQ, m1, m2, m3, m4) else: if fc.data_path.startswith('rotation_euler'): offset_transform_fcurves( 'rotation_euler', DiffRotE, m1, m2, m3, m4) #################################################################### #################################################################### #CLEAN UP #clear collections OffLocs.clear() del OffLocs DiffLocs.clear() del DiffLocs if pbone.rotation_mode == 'QUATERNION': OffRotsQ.clear() del OffRotsQ DiffRotsQ.clear() del DiffRotsQ else: OffRotsE.clear() del OffRotsE DiffRotsE.clear() del DiffRotsE OffScas.clear() del OffScas DiffScas.clear() del DiffScas #delete markers scene.timeline_markers.clear() #force viewport update scene.frame_set(current_frame + 1) scene.frame_set(current_frame)
def main(): # time logging global start_time start_time = time.time() import argparse # parse commandline arguments log_message(sys.argv) parser = argparse.ArgumentParser( description='Generate synth dataset images.') parser.add_argument('--idx', type=int, help='idx of the requested sequence') parser.add_argument('--name', type=str, help='name of the requested sequence') parser.add_argument('--ishape', type=int, help='requested cut, according to the stride') parser.add_argument('--stride', type=int, help='stride amount, default 50') parser.add_argument('--direction', type=str, help='subject direction, default forward') parser.add_argument('--subject_id', type=int, help='local subject id, default 0') args = parser.parse_args(sys.argv[sys.argv.index("---") + 1:]) idx = args.idx name = args.name ishape = args.ishape stride = args.stride direction = args.direction subject_id = args.subject_id log_message("input idx: %d" % idx) log_message("input name: %s" % name) log_message("input ishape: %d" % ishape) log_message("input stride: %d" % stride) log_message("Subject direction: %s" % direction) log_message("Local subject id: %d" % subject_id) if idx == None: exit(1) if ishape == None: exit(1) if stride == None: log_message("WARNING: stride not specified, using default value 50") stride = 50 # import idx info (name, split) idx_info = load(open("pkl/idx_info.pickle", 'rb')) # get runpass (runpass, idx) = divmod(idx, len(idx_info)) log_message("runpass: %d" % runpass) log_message("output idx: %d" % idx) for dic in idx_info: if dic['name'] == name: idx_info = dic break else: idx_info = idx_info[idx] log_message("sequence: %s" % idx_info['name']) log_message("nb_frames: %f" % idx_info['nb_frames']) #log_message("use_split: %s" % idx_info['use_split']) # import configuration log_message("Importing configuration") import config params = config.load_file('config', 'SYNTH_DATA') smpl_data_folder = params['smpl_data_folder'] smpl_data_filename = params['smpl_data_filename'] bg_path = params['bg_path'] resy = params['resy'] resx = params['resx'] clothing_option = params['clothing_option'] # grey, nongrey or all tmp_path = params['tmp_path'] output_path = params['output_path'] output_types = params['output_types'] stepsize = params['stepsize'] clipsize = params['clipsize'] openexr_py2_path = params['openexr_py2_path'] # compute number of cuts nb_ishape = max( 1, int(np.ceil( (idx_info['nb_frames'] - (clipsize - stride)) / stride))) log_message("Max ishape: %d" % (nb_ishape - 1)) if ishape == None: exit(1) assert (ishape < nb_ishape) # name is set given idx name = idx_info['name'] output_path = join(output_path, 'run%d' % runpass, name.replace(" ", "")) params['output_path'] = output_path tmp_path = join( tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1))) params['tmp_path'] = tmp_path # check if already computed # + clean up existing tmp folders if any if exists(tmp_path) and tmp_path != "" and tmp_path != "/": os.system('rm -rf %s' % tmp_path) rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace( ' ', '')), (ishape + 1)) # create tmp directory if not exists(tmp_path): mkdir_safe(tmp_path) # >> don't use random generator before this point << # initialize RNG with seeds from sequence id import hashlib s = "synth_data:%d:%d:%d" % (idx, runpass, ishape) seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10** 8) log_message("GENERATED SEED %d from string '%s'" % (seed_number, s)) random.seed(seed_number) np.random.seed(seed_number) if (output_types['vblur']): vblur_factor = np.random.normal(0.5, 0.5) params['vblur_factor'] = vblur_factor log_message("Setup Blender") # create copy-spher.harm. directory if not exists sh_dir = join(tmp_path, 'spher_harm') if not exists(sh_dir): mkdir_safe(sh_dir) sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx)) os.system('cp spher_harm/sh.osl %s' % sh_dst) genders = {0: 'male', 1: 'female'} # pick random gender gender = genders[sum(divmod(subject_id, 2)) % 2] #genders[subject_id % 2]#choice(genders) scene = bpy.data.scenes['Scene'] scene.render.engine = 'CYCLES' bpy.data.materials['Material'].use_nodes = True scene.cycles.shading_system = True scene.use_nodes = True log_message("Listing background images") #bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split']) bg_names = join(bg_path, 'bg.txt') nh_txt_paths = [] with open(bg_names) as f: for line in f: nh_txt_paths.append(join(bg_path, line)) # grab clothing names log_message("clothing: %s" % clothing_option) with open(join(smpl_data_folder, 'textures', '%s_train.txt' % gender)) as f: txt_paths = f.read().splitlines() # if using only one source of clothing if clothing_option == 'nongrey': txt_paths = [k for k in txt_paths if 'nongrey' in k] elif clothing_option == 'grey': txt_paths = [k for k in txt_paths if 'nongrey' not in k] # random clothing texture cloth_img_name = choice( txt_paths) #txt_paths[subject_id]#choice(txt_paths) cloth_img_name = join(smpl_data_folder, cloth_img_name) cloth_img = bpy.data.images.load(cloth_img_name) # random background bg_img_name = choice(nh_txt_paths)[:-1] bg_img = bpy.data.images.load(bg_img_name) log_message("Loading parts segmentation") beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender))) log_message("Building materials tree") mat_tree = bpy.data.materials['Material'].node_tree create_sh_material(mat_tree, sh_dst, cloth_img) res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx) log_message("Loading smpl data") smpl_data = np.load(join(smpl_data_folder, smpl_data_filename)) log_message("Initializing scene") camera_distance = 11.0 #np.random.normal(8.0, 1) params['camera_distance'] = camera_distance ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender) setState0() ob.select = True bpy.context.scene.objects.active = ob segmented_materials = True #True: 0-24, False: expected to have 0-1 bg/fg log_message("Creating materials segmentation") # create material segmentation if segmented_materials: materials = create_segmentation(ob, params) prob_dressed = { 'leftLeg': .5, 'leftArm': .9, 'leftHandIndex1': .01, 'rightShoulder': .8, 'rightHand': .01, 'neck': .01, 'rightToeBase': .9, 'leftShoulder': .8, 'leftToeBase': .9, 'rightForeArm': .5, 'leftHand': .01, 'spine': .9, 'leftFoot': .9, 'leftUpLeg': .9, 'rightUpLeg': .9, 'rightFoot': .9, 'head': .01, 'leftForeArm': .5, 'rightArm': .5, 'spine1': .9, 'hips': .9, 'rightHandIndex1': .01, 'spine2': .9, 'rightLeg': .5 } else: materials = {'FullBody': bpy.data.materials['Material']} prob_dressed = {'FullBody': .6} orig_pelvis_loc = None random_zrot = get_zrot(name, direction) if direction == 'forward': orig_pelvis_loc = ( arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector( (-1., 0.75, -1.3)) elif direction == 'backward': orig_pelvis_loc = ( arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector( (-1., 0.75, 3.1)) orig_cam_loc = cam_ob.location.copy() print("CAM LOC:", orig_cam_loc, type(orig_cam_loc)) # unblocking both the pose and the blendshape limits for k in ob.data.shape_keys.key_blocks.keys(): bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10 bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10 log_message("Loading body data") cmu_parms, fshapes, name = load_body_data(smpl_data, ob, obname, name, gender=gender) log_message("Loaded body data for %s" % name) nb_fshapes = len(fshapes) #if idx_info['use_split'] == 'train': # fshapes = fshapes[:int(nb_fshapes*0.8)] #elif idx_info['use_split'] == 'test': # fshapes = fshapes[int(nb_fshapes*0.8):] # pick random real body shape shape = fshapes[ subject_id % nb_fshapes] #+random_shape(.5)#choice(fshapes) #+random_shape(.5) can add noise #shape = random_shape(3.) # random body shape ndofs = 10 scene.objects.active = arm_ob orig_trans = np.asarray(arm_ob.pose.bones[obname + '_Pelvis'].location).copy() # create output directory if not exists(output_path): mkdir_safe(output_path) # spherical harmonics material needs a script to be loaded and compiled scs = [] for mname, material in materials.items(): scs.append(material.node_tree.nodes['Script']) scs[-1].filepath = sh_dst scs[-1].update() rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1) rgb_path = join(tmp_path, rgb_dirname) data = cmu_parms[name] data = cut_sequence(name, data) fbegin = ishape * stepsize * stride fend = min(ishape * stepsize * stride + stepsize * clipsize, len(data['poses'])) log_message("Computing how many frames to allocate") N = len(data['poses'][fbegin:fend:stepsize]) log_message("Allocating %d frames in mat file" % N) # force recomputation of joint angles unless shape is all zeros curr_shape = np.zeros_like(shape) nframes = len(data['poses'][::stepsize]) matfile_info = join( output_path, name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1)) log_message('Working on %s' % matfile_info) # allocate dict_info = {} dict_info['bg'] = np.zeros((N, ), dtype=np.object) # background image path dict_info['camLoc'] = np.empty(3) # (1, 3) dict_info['clipNo'] = ishape + 1 dict_info['cloth'] = np.zeros( (N, ), dtype=np.object) # clothing texture image path dict_info['gender'] = np.empty(N, dtype='uint8') # 0 for male, 1 for female dict_info['joints2D'] = np.empty( (2, 24, N), dtype='float32') # 2D joint positions in pixel space dict_info['joints3D'] = np.empty( (3, 24, N), dtype='float32') # 3D joint positions in world coordinates dict_info['light'] = np.empty((9, N), dtype='float32') dict_info['pose'] = np.empty( (data['poses'][0].size, N), dtype='float32') # joint angles from SMPL (CMU) dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1) dict_info['shape'] = np.empty((ndofs, N), dtype='float32') dict_info['zrot'] = np.empty(N, dtype='float32') dict_info['camDist'] = camera_distance dict_info['stride'] = stride if name.replace(" ", "").startswith('h36m'): dict_info['source'] = 'h36m' else: dict_info['source'] = 'cmu' if (output_types['vblur']): dict_info['vblur_factor'] = np.empty(N, dtype='float32') # for each clipsize'th frame in the sequence get_real_frame = lambda ifr: ifr reset_loc = False batch_it = 0 curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene, cam_ob, smpl_data['regression_verts'], smpl_data['joint_regressor']) arm_ob.animation_data_clear() cam_ob.animation_data_clear() # create a keyframe animation with pose, translation, blendshapes and camera motion # LOOP TO CREATE 3D ANIMATION for seq_frame, (pose, trans) in enumerate( zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])): iframe = seq_frame scene.frame_set(get_real_frame(seq_frame)) # Change shape if is_arbitrary_shape and iframe % 2 == 0: shape = choice(fshapes) shape += np.random.normal(0, .1, shape.shape) # apply the translation, pose and shape to the character apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname, scene, cam_ob, get_real_frame(seq_frame)) dict_info['shape'][:, iframe] = shape[:ndofs] dict_info['pose'][:, iframe] = pose dict_info['gender'][iframe] = list(genders)[list( genders.values()).index(gender)] if (output_types['vblur']): dict_info['vblur_factor'][iframe] = vblur_factor arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion( Euler((0, 0, random_zrot), 'XYZ')) arm_ob.pose.bones[obname + '_root'].keyframe_insert( 'rotation_quaternion', frame=get_real_frame(seq_frame)) dict_info['zrot'][iframe] = random_zrot scene.update() # Bodies centered only in each minibatch of clipsize frames if seq_frame == 0 or reset_loc: reset_loc = False new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[ obname + '_Pelvis'].head.copy() cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() - orig_pelvis_loc.copy()) cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame)) dict_info['camLoc'] = np.array(cam_ob.location) scene.node_tree.nodes['Image'].image = bg_img for part, material in materials.items(): material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = ( 0, 0) # random light sh_coeffs = .7 * (2 * np.random.rand(9) - 1) sh_coeffs[0] = .5 + .9 * np.random.rand( ) # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter. sh_coeffs[1] = -.7 * np.random.rand() for ish, coeff in enumerate(sh_coeffs): for sc in scs: sc.inputs[ish + 1].default_value = coeff # iterate over the keyframes and render # LOOP TO RENDER for seq_frame, (pose, trans) in enumerate( zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])): scene.frame_set(get_real_frame(seq_frame)) iframe = seq_frame dict_info['bg'][iframe] = bg_img_name dict_info['cloth'][iframe] = cloth_img_name dict_info['light'][:, iframe] = sh_coeffs img_path = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame)) scene.render.use_antialiasing = False scene.render.filepath = img_path log_message("Rendering frame %d" % seq_frame) # disable render output logfile = '/dev/null' open(logfile, 'a').close() old = os.dup(1) sys.stdout.flush() os.close(1) os.open(logfile, os.O_WRONLY) # Render bpy.ops.render.render(write_still=True) # disable output redirection os.close(1) os.dup(old) os.close(old) # bone locations should be saved after rendering so that the bones are updated bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene, cam_ob) dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D) dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D) #Draw skeleton if is_visualization: draw_skeleton(img_path, dict_info['joints2D'][:, :, iframe]) reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any() arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion( (1, 0, 0, 0)) # save a .blend file for debugging: # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend')) # save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3) cmd_ffmpeg = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 ' '%s_c%04d.mp4' '' % ( join(rgb_path, 'Image%04d.png'), join(output_path, name.replace(' ', '')), (ishape + 1)) log_message("Generating RGB video (%s)" % cmd_ffmpeg) os.system(cmd_ffmpeg) if (output_types['vblur']): cmd_ffmpeg_vblur = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" ' '%s_c%04d.mp4' '' % ( join(res_paths['vblur'], 'Image%04d.png'), join(output_path, name.replace(' ', '') + '_vblur'), (ishape + 1)) log_message("Generating vblur video (%s)" % cmd_ffmpeg_vblur) os.system(cmd_ffmpeg_vblur) if (output_types['fg']): cmd_ffmpeg_fg = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 ' '%s_c%04d.mp4' '' % ( join(res_paths['fg'], 'Image%04d.png'), join(output_path, name.replace(' ', '') + '_fg'), (ishape + 1)) log_message("Generating fg video (%s)" % cmd_ffmpeg_fg) os.system(cmd_ffmpeg_fg) cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname, tmp_path, rgb_dirname) log_message("Tarballing the images (%s)" % cmd_tar) os.system(cmd_tar) # save annotation excluding png/exr data to _info.mat file import scipy.io scipy.io.savemat(matfile_info, dict_info, do_compression=True)
def test_roundtrip_only_needed_keyframes(self): animation = get_compressed_animation_empty() animation.header.flavor = 0 channel = TimeCodedAnimationChannel(num_time_codes=5, vector_len=1, pivot=1, type=1, time_codes=[ TimeCodedDatum(time_code=0, value=3.0), TimeCodedDatum(time_code=1, value=3.0), TimeCodedDatum(time_code=2, value=3.0), TimeCodedDatum(time_code=3, value=3.0), TimeCodedDatum(time_code=4, value=3.0) ]) channel_q = TimeCodedAnimationChannel( num_time_codes=7, vector_len=4, pivot=1, type=6, time_codes=[ TimeCodedDatum(time_code=0, value=Quaternion((0.1, 0.73, 0.7, 0.2))), TimeCodedDatum(time_code=1, value=Quaternion((0.1, 0.73, 0.7, 0.2))), TimeCodedDatum(time_code=2, value=Quaternion((0.1, 0.73, 0.7, 0.1))), TimeCodedDatum(time_code=3, value=Quaternion((0.1, 0.73, 0.7, 0.1))), TimeCodedDatum(time_code=4, value=Quaternion((0.1, 0.73, 0.7, 0.1))), TimeCodedDatum(time_code=5, value=Quaternion((0.2, 0.73, 0.7, 0.1))), TimeCodedDatum(time_code=6, value=Quaternion((0.2, 0.73, 0.7, 0.1))) ]) animation.time_coded_channels = [channel, channel_q] hierarchy = get_hierarchy() hierarchy.pivots = [ get_roottransform(), HierarchyPivot(name='bone', parent_id=0) ] self.filepath = self.outpath() + 'output' create_data(self, [], None, hierarchy, [], None, animation) channel = TimeCodedAnimationChannel(num_time_codes=2, vector_len=1, pivot=1, type=1, time_codes=[ TimeCodedDatum(time_code=0, value=3.0), TimeCodedDatum(time_code=4, value=3.0) ]) animation.time_coded_channels = [channel, channel_q] self.compare_data([], None, None, [], None, animation)
def __set(self, value): if not self.use_camera_axes: value = value * Quaternion((1, 0, 0), math.pi * 0.5) if self.is_region_3d or (not self.quadview_lock): self.region_data.view_rotation = value.copy() #.normalized() self.region_data.update()
def convert_quaternion(q): """Converts a glTF quaternion to Blender a quaternion.""" # xyzw -> wxyz return Quaternion([q[3], q[0], q[1], q[2]])
def invoke(self, context, event): context.scene.view_straighten_is_running = True # state 0 allows hold, 1 key released, 2 held, 3 snapping, 4 at head badkeys = {'NONE', 'LEFTMOUSE', 'RIGHTMOUSE', 'MIDDLEMOUSE'} self.hotkey = event.type if event.type not in badkeys else '' self.state = 0 if self.hotkey != '' else 1 self.interrupt = False self.persptoggle = False self.mpos_start = (event.mouse_region_x, event.mouse_region_y) self.mpos_cur = self.mpos_start self.now = 0.0 self.start = 0.0 self.factor = 0.0 self.near = Quaternion((0.0, 0.0, 0.0, 0.0)) self.nearname = '' self.directions = { "Front": (Quaternion((0.7071, 0.7071, 0.0000, 0.0000)), Quaternion((0.5000, 0.5000, -0.5000, 0.5000)), Quaternion((0.0000, 0.0000, -0.7071, 0.7071)), Quaternion((-0.5000, -0.5000, -0.5000, 0.5000))), "Right": (Quaternion((0.5000, 0.5000, 0.5000, 0.5000)), Quaternion((0.0000, 0.7071, 0.0000, 0.7071)), Quaternion((-0.5000, 0.5000, -0.5000, 0.5000)), Quaternion((-0.7071, 0.0000, -0.7071, 0.0000))), "Back": (Quaternion((0.0000, 0.0000, 0.7071, 0.7071)), Quaternion((-0.5000, 0.5000, 0.5000, 0.5000)), Quaternion((-0.7071, 0.7071, 0.0000, 0.0000)), Quaternion((-0.5000, 0.5000, -0.5000, -0.5000))), "Left": (Quaternion((0.5000, 0.5000, -0.5000, -0.5000)), Quaternion((0.7071, 0.0000, -0.7071, 0.0000)), Quaternion((0.5000, -0.5000, -0.5000, 0.5000)), Quaternion((0.0000, -0.7071, 0.0000, 0.7071))), "Top": (Quaternion((1.0000, 0.0000, 0.0000, 0.0000)), Quaternion((0.7071, 0.0000, 0.0000, 0.7071)), Quaternion((0.0000, 0.0000, 0.0000, 1.0000)), Quaternion((-0.7071, 0.0000, 0.0000, 0.7071))), "Bottom": (Quaternion((0.0000, 1.0000, 0.0000, 0.0000)), Quaternion((0.0000, 0.7071, -0.7071, 0.0000)), Quaternion((0.0000, 0.0000, -1.0000, 0.0000)), Quaternion((0.0000, -0.7071, -0.7071, 0.0000))) } view3d = context.space_data.region_3d self.startangle = view3d.view_rotation self.rotation = 0 neardot = 0.0 for side in self.directions: i = 0 for angle in self.directions[side]: dot = abs(angle.dot(view3d.view_rotation)) if dot > neardot: neardot = dot self.nearname = side self.near = angle self.rotation = i i = i + 1 if i < 3 else 0 if neardot >= 0.99997 and self.secondcall == 'TOGGLE': if self.hold != 'IGNORE': self.persptoggle = True self.status = 2 else: view3d.view_perspective = 'PERSP' if not view3d.is_perspective else 'ORTHO' view3d.update() context.scene.view_straighten_is_running = False return {'CANCELLED'} if self.text: self.textcolor = context.user_preferences.themes[ 'Default'].view_3d.space.text_hi self._handle = context.region.callback_add(draw_callback_px, (self, context), 'POST_PIXEL') if self.hold != 'IGNORE': # neighbors are ordered NESW self.neighbors = { "Front": ("Top", "Right", "Bottom", "Left"), "Right": ("Top", "Back", "Bottom", "Front"), "Back": ("Top", "Left", "Bottom", "Right"), "Left": ("Top", "Front", "Bottom", "Back"), "Top": ("Back", "Right", "Front", "Left"), "Bottom": ("Front", "Right", "Back", "Left") } context.window_manager.modal_handler_add(self) self._timer = context.window_manager.event_timer_add( 0.01, context.window) return {'RUNNING_MODAL'}
import CostumePy import time from Adafruit_BNO055 import BNO055 from mathutils import Quaternion bno = BNO055.BNO055() if not bno.begin(): raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?') def calibrate_rotation(_): global calibration, raw calibration = Quaternion([raw.w, raw.x, raw.y, raw.z]) calibration = Quaternion([1, 0, 0, 0]) raw = Quaternion([1, 0, 0, 0]) corrected = Quaternion([1, 0, 0, 0]) node = CostumePy.new_node("localiser") node.ui.add_button("calibrate_rotation", "Calibrate", "calibrate_rotation") node.ui.add_text("orientation") node.ui.add_text("temperature") node.ui.update() node.listen("calibrate_rotation", calibrate_rotation) if __name__ == "__main__": while node.running:
def __gather_children(blender_object, blender_scene, export_settings): children = [] # standard children for _child_object in blender_object.children: if _child_object.parent_bone: # this is handled further down, # as the object should be a child of the specific bone, # not the Armature object continue child_object = _child_object.proxy if _child_object.proxy else _child_object node = gather_node( child_object, child_object.library.name if child_object.library else None, blender_scene, None, export_settings) if node is not None: children.append(node) # blender dupli objects if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection: for dupli_object in blender_object.instance_collection.objects: if dupli_object.parent is not None: continue if dupli_object.type == "ARMATURE": continue # There is probably a proxy node = gather_node( dupli_object, dupli_object.library.name if dupli_object.library else None, blender_scene, blender_object.name, export_settings) if node is not None: children.append(node) # blender bones if blender_object.type == "ARMATURE": root_joints = [] if export_settings["gltf_def_bones"] is False: bones = blender_object.pose.bones else: bones, _, _ = gltf2_blender_gather_skins.get_bone_tree( None, blender_object) bones = [blender_object.pose.bones[b.name] for b in bones] for blender_bone in bones: if not blender_bone.parent: joint = gltf2_blender_gather_joints.gather_joint( blender_object, blender_bone, export_settings) children.append(joint) root_joints.append(joint) # handle objects directly parented to bones direct_bone_children = [ child for child in blender_object.children if child.parent_bone ] def find_parent_joint(joints, name): for joint in joints: if joint.name == name: return joint parent_joint = find_parent_joint(joint.children, name) if parent_joint: return parent_joint return None for child in direct_bone_children: # find parent joint parent_joint = find_parent_joint(root_joints, child.parent_bone) if not parent_joint: continue child_node = gather_node(child, None, None, None, export_settings) if child_node is None: continue blender_bone = blender_object.pose.bones[parent_joint.name] # fix rotation if export_settings[gltf2_blender_export_keys.YUP]: rot = child_node.rotation if rot is None: rot = [0, 0, 0, 1] rot_quat = Quaternion(rot) axis_basis_change = Matrix( ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) mat = child.matrix_parent_inverse @ child.matrix_basis mat = mat @ axis_basis_change _, rot_quat, _ = mat.decompose() child_node.rotation = [ rot_quat[1], rot_quat[2], rot_quat[3], rot_quat[0] ] # fix translation (in blender bone's tail is the origin for children) trans, _, _ = child.matrix_local.decompose() if trans is None: trans = [0, 0, 0] # bones go down their local y axis if blender_bone.matrix.to_scale()[1] >= 1e-6: bone_tail = [ 0, blender_bone.length / blender_bone.matrix.to_scale()[1], 0 ] else: bone_tail = [0, 0, 0] # If scale is 0, tail == head child_node.translation = [ trans[idx] + bone_tail[idx] for idx in range(3) ] parent_joint.children.append(child_node) return children
def _add_bone_to_scene(self, scene_node, armature): # Let's get all the data collection out of the way joint_index = scene_node.Attribute('JOINTINDEX', int) joint_binding_data = self.mesh_binding_data[ 'JointBindings'][joint_index] inv_bind_matrix = joint_binding_data['InvBindMatrix'] inv_bind_matrix = Matrix([inv_bind_matrix[:4], inv_bind_matrix[4:8], inv_bind_matrix[8:12], inv_bind_matrix[12:]]) inv_bind_matrix.transpose() bind_trans = joint_binding_data['BindTranslate'] bind_rot = joint_binding_data['BindRotate'] bind_sca = joint_binding_data['BindScale'] # Assign the bind matrix so we can do easy lookup of it later for # applying animations. # Ironically, the inverse bind matrix is strored uninverted, and the # bind matrix is stored inverted... self.inv_bind_matrices[scene_node.Name] = inv_bind_matrix # Let's create the bone now # All changes to Bones have to be in EDIT mode or _bad things happen_ with edit_object(armature) as data: bone = data.edit_bones.new(scene_node.Name) bone.use_inherit_rotation = True bone.use_inherit_scale = True self.scn.objects[scene_node.Name]['bind_data'] = ( Vector(bind_trans[:3]), Quaternion((bind_rot[3], bind_rot[0], bind_rot[1], bind_rot[2])), Vector(bind_sca[:3])) """ self.bind_matrices[scene_node.Name] = (Vector(bind_trans[:3]), Quaternion((bind_rot[3], bind_rot[0], bind_rot[1], bind_rot[2])), Vector(bind_sca[:3])) """ if scene_node.parent.Type == 'JOINT': bone.matrix = self.inv_bind_matrices[scene_node.parent.Name] bone.tail = inv_bind_matrix.inverted().to_translation() if bone.length == 0: bone.tail = bone.head + Vector([0, 10 ** (-4), 0]) if scene_node.parent.Type == 'JOINT': bone.parent = armature.data.edit_bones[scene_node.parent.Name] bone.use_connect = True # NMS defines some bones used in animations with 0 transform, eg. # Toy Cube. # This causes bone creation to fail, we need to move the tail # slightly. # Note that MMD Tools would have to deal with this too. while scene_node: if scene_node.Transform['Trans'] != (0.0, 0.0, 0.0): break bone.tail += Vector([0, 0, 10 ** (-4)]) scene_node = scene_node.parent
def create_cam_trajectory(name, locations, quaternions, start_frame=1, framenrs=None, no_keyframe_highlighting=False, select_ob=True, goto_last_keyframe=False): """ "name" : name of Camera to be created "locations" : list of cam center positions for each trajectory node "quaternions" : list of cam orientation (quaternion (qx, qy, qz, qw)) for each trajectory node "start_frame" : start frame of the trajectory "framenrs" : list of frame numbers for each trajectory node (should be in increasing order) "no_keyframe_highlighting" : if True, don't show framenumbers for each keyframe along trajectory "select_ob" : select the camera object (and trajectory) "goto_last_keyframe" : go to the last keyframe of the generated trajectory """ if not select_ob: ob_selection_backup = backup_ob_selection() anim_state_backup = list(backup_anim_state()) # Create the camera if bpy.context.mode != "OBJECT": bpy.ops.object.mode_set() # switch to object mode if name in bpy.data.objects and bpy.data.objects[name].type == "CAMERA": ob = bpy.data.objects[name] bpy.ops.object.select_all(action="DESELECT") ob.select = True bpy.context.scene.objects.active = ob bpy.context.object.animation_data_clear( ) # clear all previous keyframes bpy.context.scene.layers = bpy.context.object.layers # only activate object's layers to insert keyframes else: bpy.ops.object.camera_add() ob = bpy.context.object ob.name = name # Unhide object ob_hide_backup = ob.hide ob.hide = False ob.rotation_mode = "QUATERNION" # Create path of the camera for i, (location, quaternion) in enumerate(zip(locations, quaternions)): bpy.context.scene.frame_current = framenrs[ i] if framenrs != None else i + 1 ob.location = list(location) qx, qy, qz, qw = quaternion ob.rotation_quaternion = [qw, qx, qy, qz] # We assume the TUM format uses the OpenCV cam convention (Z-axis in direction of view, Y-axis down) # so we'll have to convert, since Blender follows OpenGL convention ob.rotation_quaternion *= Quaternion((1.0, 0.0, 0.0), radians(180.0)) bpy.ops.anim.keyframe_insert_menu(type="BUILTIN_KSI_LocRot") # Visualize path ob.animation_visualization.motion_path.show_keyframe_highlight = \ (framenrs != None and not no_keyframe_highlighting) if framenrs != None: bpy.ops.object.paths_calculate(start_frame=framenrs[0], end_frame=framenrs[-1]) else: bpy.ops.object.paths_calculate(start_frame=start_frame, end_frame=start_frame + len(locations)) # Restore hide-state ob.hide = ob_hide_backup if goto_last_keyframe: anim_state_backup[0] = bpy.context.scene.frame_current restore_anim_state(*anim_state_backup) if not select_ob: restore_ob_selection(*ob_selection_backup)
def set_convert_functions(gltf): if bpy.app.debug_value != 100: # Unit conversion factor in (Blender units) per meter u = 1.0 / bpy.context.scene.unit_settings.scale_length # glTF Y-Up space --> Blender Z-up space # X,Y,Z --> X,-Z,Y def convert_loc(x): return u * Vector([x[0], -x[2], x[1]]) def convert_quat(q): return Quaternion([q[3], q[0], -q[2], q[1]]) def convert_scale(s): return Vector([s[0], s[2], s[1]]) def convert_matrix(m): return Matrix([ [m[0], -m[8], m[4], m[12] * u], [-m[2], m[10], -m[6], -m[14] * u], [m[1], -m[9], m[5], m[13] * u], [m[3] / u, -m[11] / u, m[7] / u, m[15]], ]) # Batch versions operate in place on a numpy array def convert_locs_batch(locs): # x,y,z -> x,-z,y locs[:, [1, 2]] = locs[:, [2, 1]] locs[:, 1] *= -1 # Unit conversion if u != 1: locs *= u def convert_normals_batch(ns): ns[:, [1, 2]] = ns[:, [2, 1]] ns[:, 1] *= -1 # Correction for cameras and lights. # glTF: right = +X, forward = -Z, up = +Y # glTF after Yup2Zup: right = +X, forward = +Y, up = +Z # Blender: right = +X, forward = -Z, up = +Y # Need to carry Blender --> glTF after Yup2Zup gltf.camera_correction = Quaternion( (2**0.5 / 2, 2**0.5 / 2, 0.0, 0.0)) else: def convert_loc(x): return Vector(x) def convert_quat(q): return Quaternion([q[3], q[0], q[1], q[2]]) def convert_scale(s): return Vector(s) def convert_matrix(m): return Matrix([m[0::4], m[1::4], m[2::4], m[3::4]]) def convert_locs_batch(_locs): return def convert_normals_batch(_ns): return # Same convention, no correction needed. gltf.camera_correction = None gltf.loc_gltf_to_blender = convert_loc gltf.locs_batch_gltf_to_blender = convert_locs_batch gltf.quaternion_gltf_to_blender = convert_quat gltf.normals_batch_gltf_to_blender = convert_normals_batch gltf.scale_gltf_to_blender = convert_scale gltf.matrix_gltf_to_blender = convert_matrix
def roll_left(self, angle): """Roll the turtle left about the direction it is facing""" self.right.rotate(Quaternion(self.dir, radians(-angle))) self.right.normalize()
def __get(self): value = self.region_data.view_rotation.copy() #.normalized() if not self.use_camera_axes: value = value * Quaternion((1, 0, 0), -math.pi * 0.5) return value
def convert_quat(q): return Quaternion([q[3], q[0], -q[2], q[1]])
def angle_axis_to_quat(angle, axis): w = math.cos(angle / 2.0) xyz = axis.normalized() * math.sin(angle / 2.0) return Quaternion((w, xyz.x, xyz.y, xyz.z))
def primitive_Torus_ME(radius_main=2.0, radius_minor=0.5, seg_main=24, seg_minor=12, sec_from=0.0, sec_to=2 * PI, smoothed=True): # Prepare empty lists verts = [] edges = [] faces = [] loops = [] # Set minimums if seg_main < 3: seg_main = 3 if seg_minor < 3: seg_minor = 3 if sec_from > sec_to: sec_from, sec_to = sec_to, sec_from # Create the loops seg_angle = (sec_to - sec_from) / seg_main quatRight = Quaternion((-1, 0, 0), PI / 2) vecOffset = Vector((radius_main, 0, 0)) for i in range(seg_main): quat = Quaternion((0, 0, 1), (i * seg_angle) + sec_from) newVerts, loop = circ_V(radius_minor, seg_minor, len(verts)) rot_V(newVerts, quatRight) move_V(newVerts, vecOffset) rot_V(newVerts, quat) verts.extend(newVerts) loops.append(loop) # Close the shape if sec_to - sec_from < 2 * PI: quat = Quaternion((0, 0, 1), sec_to) newVerts, loop = circ_V(radius_minor, seg_minor, len(verts)) rot_V(newVerts, quatRight) move_V(newVerts, vecOffset) rot_V(newVerts, quat) verts.extend(newVerts) loops.append(loop) verts.append(quat * vecOffset) quat = Quaternion((0, 0, 1), sec_from) verts.append(quat * vecOffset) # Close caps faces.extend(fanClose(loops[0], len(verts) - 1, flipped=True)) faces.extend(fanClose(loops[-1], len(verts) - 2)) else: faces.extend(bridgeLoops(loops[-1], loops[0], True)) # Bridge all loops for i in range(1, len(loops)): faces.extend(bridgeLoops(loops[i - 1], loops[i], True)) return verts, edges, faces
def load(context, filepath ): with ProgressReport(context.window_manager) as progress: progress.enter_substeps(1, "Importing ADT OBJ %r..." % filepath) csvpath = filepath.replace('.obj', '_ModelPlacementInformation.csv') # Coordinate setup ## WoW coordinate sytem # Max Size: 51200 / 3 = 17066,66666666667 # Map Size: Max Size * 2 = 34133,33333333333 # ADT Size: Map Size / 64 = 533,3333333333333 max_size = 51200 / 3 map_size = max_size * 2 adt_size = map_size / 64 base_folder, adtname = os.path.split(filepath) adtsplit = adtname.split("_") mapname = adtsplit[0] map_x = int(adtsplit[1]) map_y = int(adtsplit[2].replace(".obj", "")) print(mapname) print(map_x) print(map_y) offset_x = adt_size * map_x offset_y = adt_size * map_y print(offset_x) print(offset_y) # Import ADT bpy.ops.import_scene.obj(filepath=filepath) bpy.ops.object.add(type='EMPTY') doodadparent = bpy.context.active_object doodadparent.parent = bpy.data.objects[mapname + '_' + str(map_x) + '_' + str(map_y)] doodadparent.name = "Doodads" doodadparent.rotation_euler = [0, 0, 0] doodadparent.rotation_euler.x = radians(-90) bpy.ops.object.add(type='EMPTY') wmoparent = bpy.context.active_object wmoparent.parent = bpy.data.objects[mapname + '_' + str(map_x) + '_' + str(map_y)] wmoparent.name = "WMOs" wmoparent.rotation_euler = [0, 0, 0] wmoparent.rotation_euler.x = radians(-90) # Make object active # bpy.context.scene.objects.active = obj # Read doodad definitions file with open(csvpath) as csvfile: reader = csv.DictReader(csvfile, delimiter=';') for row in reader: doodad_path, doodad_filename = os.path.split(filepath) newpath = os.path.join(doodad_path, row['ModelFile']) if row['Type'] == 'wmo': bpy.ops.object.add(type='EMPTY') parent = bpy.context.active_object parent.name = row['ModelFile'] parent.parent = wmoparent parent.location = (17066 - float(row['PositionX']), (17066 - float(row['PositionZ'])) * -1, float(row['PositionY'])) parent.rotation_euler = [0, 0, 0] #obj.rotation_euler.x += (radians(90 + float(row['RotationX']))) # TODO #obj.rotation_euler.y -= radians(float(row['RotationY'])) # TODO parent.rotation_euler.z = radians((-90 + float(row['RotationY']))) if row['ScaleFactor']: parent.scale = (float(row['ScaleFactor']), float(row['ScaleFactor']), float(row['ScaleFactor'])) bpy.ops.import_scene.obj(filepath=newpath) obj_objects = bpy.context.selected_objects[:] # Put ADT rotations in here for obj in obj_objects: obj.parent = parent wmocsvpath = newpath.replace('.obj', '_ModelPlacementInformation.csv') # Read WMO doodads definitions file with open(wmocsvpath) as wmocsvfile: wmoreader = csv.DictReader(wmocsvfile, delimiter=';') for wmorow in wmoreader: wmodoodad_path, wmodoodad_filename = os.path.split(filepath) wmonewpath = os.path.join(wmodoodad_path, wmorow['ModelFile']) # Import the doodad if(os.path.exists(wmonewpath)): bpy.ops.import_scene.obj(filepath=wmonewpath) # Select the imported doodad wmoobj_objects = bpy.context.selected_objects[:] for wmoobj in wmoobj_objects: # Prepend name wmoobj.name = "(" + wmorow['DoodadSet'] + ") " + wmoobj.name # Set parent wmoobj.parent = parent # Set position wmoobj.location = (float(wmorow['PositionX']) * -1, float(wmorow['PositionY']) * -1, float(wmorow['PositionZ'])) # Set rotation rotQuat = Quaternion((float(wmorow['RotationW']), float(wmorow['RotationX']), float(wmorow['RotationY']), float(wmorow['RotationZ']))) rotEul = rotQuat.to_euler() rotEul.x += radians(90); rotEul.z += radians(180); wmoobj.rotation_euler = rotEul # Set scale if wmorow['ScaleFactor']: wmoobj.scale = (float(wmorow['ScaleFactor']), float(wmorow['ScaleFactor']), float(wmorow['ScaleFactor'])) # Duplicate material removal script by Kruithne # Merge all duplicate materials for obj in bpy.context.scene.objects: if obj.type == 'MESH': i = 0 for mat_slot in obj.material_slots: mat = mat_slot.material obj.material_slots[i].material = bpy.data.materials[mat.name.split('.')[0]] i += 1 # Cleanup unused materials for img in bpy.data.images: if not img.users: bpy.data.images.remove(img) else: if(os.path.exists(newpath)): bpy.ops.import_scene.obj(filepath=newpath) obj_objects = bpy.context.selected_objects[:] for obj in obj_objects: # Set parent obj.parent = doodadparent # Set location obj.location.x = (17066 - float(row['PositionX'])) obj.location.y = (17066 - float(row['PositionZ'])) * -1 obj.location.z = float(row['PositionY']) obj.rotation_euler.x += radians(float(row['RotationZ'])) obj.rotation_euler.y += radians(float(row['RotationX'])) obj.rotation_euler.z = radians(90 + float(row['RotationY'])) # okay # Set scale if row['ScaleFactor']: obj.scale = (float(row['ScaleFactor']), float(row['ScaleFactor']), float(row['ScaleFactor'])) # Set doodad and WMO parent to 0 wmoparent.location = (0, 0, 0) doodadparent.location = (0, 0, 0) print("Deduplicating and cleaning up materials!") # Duplicate material removal script by Kruithne # Merge all duplicate materials for obj in bpy.context.scene.objects: if obj.type == 'MESH': i = 0 for mat_slot in obj.material_slots: mat = mat_slot.material obj.material_slots[i].material = bpy.data.materials[mat.name.split('.')[0]] i += 1 # Cleanup unused materials for img in bpy.data.images: if not img.users: bpy.data.images.remove(img) progress.leave_substeps("Finished importing: %r" % filepath) return {'FINISHED'}
def _read_quaternion(self, f): x, y, z, w = unpack('4f', f) return Quaternion((w, x, y, z))