def scale_verts_by_bone(self, pbone, armature, mesh_object, vert_co, weight=1.0): # verts = mesh_object.data.vertices bone = armature.data.bones[pbone.name] bone_head = self.init_bone_positions[bone.name]["head"] bone_tail = self.init_bone_positions[bone.name]["tail"] bone_axis_x = (bone_tail - bone_head).normalized().xz bone_axis_y = bone_axis_x.orthogonal().normalized() world_axis_x = Vector((bone_axis_x.dot(Vector((1, 0))), bone_axis_y.dot(Vector((1, 0))))) world_axis_y = Vector((bone_axis_x.dot(Vector((0, 1))), bone_axis_y.dot(Vector((0, 1))))) bone_system_origin = (mesh_object.matrix_world.inverted() * (armature.matrix_world * bone_head)).xz bone_scale = pbone.matrix.to_scale() bone_scale_2d = Vector(( self.lerp( 1.0, bone_scale.y, weight), self.lerp(1.0, bone_scale.x, weight) )) vert_delta_co = vert_co.xz vert_delta_co -= bone_system_origin vert_delta_co = Vector((bone_axis_x.dot(vert_delta_co), bone_axis_y.dot(vert_delta_co))) vert_delta_co = Vector((vert_delta_co.x * bone_scale_2d.x, vert_delta_co.y * bone_scale_2d.y)) vert_delta_co = Vector((world_axis_x.dot(vert_delta_co), world_axis_y.dot(vert_delta_co))) vert_delta_co += bone_system_origin scaled_vert_co = Vector((vert_delta_co.x, 0, vert_delta_co.y)) return scaled_vert_co
def unmirror_sym(obj_list): '''Unmirror symetrical elements.''' for object in obj_list: mesh = object.data # remove the mirror modifier # set object active bpy.context.scene.objects.active = object bpy.ops.object.modifier_remove(modifier='Mirror') # the first vertice gives us the coordinates for the backtransformation v = Vector((mesh.vertices[0].co[0], mesh.vertices[0].co[1], mesh.vertices[0].co[2])) # backtransformation mesh.transform(Matrix.Translation(-v)) #recalculate !!!!!!! odd behaviour if not done !!!!!!! mesh.update() # set location point back # adaption for FG CSYS if bpy.context.scene.csys == '1': object.location = (v) elif bpy.context.scene.csys == '0': u = v.copy() u.x = -u.x u.y = -u.y object.location = u
def __init__(self, d=3, p=None, v=None, p0=None, p1=None, z_axis=None): """ d=3 use 3d coords, d=2 use 2d pixels coords Init by either p: Vector or tuple origin v: Vector or tuple size and direction or p0: Vector or tuple 1 point location p1: Vector or tuple 2 point location Will convert any into Vector 3d both optionnals """ if p is not None and v is not None: self.p = Vector(p) self.v = Vector(v) elif p0 is not None and p1 is not None: self.p = Vector(p0) self.v = Vector(p1) - self.p else: self.p = Vector((0, 0, 0)) self.v = Vector((0, 0, 0)) if z_axis is not None: self.z_axis = z_axis else: self.z_axis = Vector((0, 0, 1)) GlBaseLine.__init__(self, d)
def moveup(self): bfvec = Vector((0, 0, 1)) bfvec.length = self.addonprefs.Speed * self.addonprefs.Scale * self.runmulti / self.divi if self.scn.FPS_Walk: self.addonprefs.Height += bfvec.length * self.addonprefs.Scale else: self.rv3d.view_location += bfvec
def __init__(self, context=None, event=None, recalcDPBU=True, dpf=200, expnames=('Dist {exp}',)): self.shift = None # *0.1. type:Vector. relativeに影響。 self.lock = None # lock direction. type:Vector. relativeに影響。 self.snap = False # type:Bool self.origin = Vector() # Rキーで変更 self.current = Vector() # (event.mouse_region_x, event.mouse_region_y, 0) self.relative = Vector() # shift,lockを考慮 self.dpbu = 1.0 # 初期化時、及びupdateの際に指定した場合に更新。 self.unit_pow = 1.0 # 上記と同様 self.dist = 0.0 # relativesnapを考慮 self.fac = 0.0 self.inputexp = False self.exp = InputExpression(names=expnames) #self.finaldist = 0.0 # exp等を考慮した最終的な値 self.exptargets = {} self.shortcuts = [] if event: self.origin = Vector((event.mouse_region_x, event.mouse_region_y, \ 0.0)) self.dpf = dpf # dot per fac self.update(context, event, recalcDPBU)
def region_2d_to_vector_3d(region, rv3d, coord): """ Return a direction vector from the viewport at the specific 2d region coordinate. :arg region: region of the 3D viewport, typically bpy.context.region. :type region: :class:`bpy.types.Region` :arg rv3d: 3D region data, typically bpy.context.space_data.region_3d. :type rv3d: :class:`bpy.types.RegionView3D` :arg coord: 2d coordinates relative to the region: (event.mouse_region_x, event.mouse_region_y) for example. :type coord: 2d vector :return: normalized 3d vector. :rtype: :class:`mathutils.Vector` """ from mathutils import Vector viewinv = rv3d.view_matrix.inverted() if rv3d.is_perspective: persinv = rv3d.perspective_matrix.inverted() out = Vector(((2.0 * coord[0] / region.width) - 1.0, (2.0 * coord[1] / region.height) - 1.0, -0.5 )) w = out.dot(persinv[3].xyz) + persinv[3][3] return ((persinv * out) / w) - viewinv.translation else: return viewinv.col[2].xyz.normalized()
def extrusion_to_matrix(entity): """ Converts an extrusion vector to a rotation matrix that denotes the transformation between world coordinate system and the entity's own coordinate system (described by the extrusion vector). """ def arbitrary_x_axis(extrusion_normal): world_y = Vector((0, 1, 0)) world_z = Vector((0, 0, 1)) if abs(extrusion_normal[0]) < 1 / 64 and abs(extrusion_normal[1]) < 1 / 64: a_x = world_y.cross(extrusion_normal) else: a_x = world_z.cross(extrusion_normal) a_x.normalize() return a_x, extrusion_normal.cross(a_x) az = Vector(entity.extrusion) ax, ay = arbitrary_x_axis(az) ax4 = ax.to_4d() ay4 = ay.to_4d() az4 = az.to_4d() ax4[3] = 0 ay4[3] = 0 az4[3] = 0 translation = Vector((0, 0, 0, 1)) if hasattr(entity, "elevation"): if type(entity.elevation) is tuple: translation = Vector(entity.elevation).to_4d() else: translation = (az * entity.elevation).to_4d() return Matrix((ax4, ay4, az4, translation)).transposed()
def add_torus(self, majSeg, minSeg, majRad, minRad): lv = [] circ = math.pi*2 majCirc = circ/majSeg minCirc = circ/minSeg index = 0 rings = [] for maj in range(majSeg): majTheta = majCirc*maj dx = math.cos(majTheta) * majRad dy = math.sin(majTheta) * majRad n = Vector((dx, dy, 0)) minorRing = [] for min in range(minSeg): minTheta = minCirc*min dn = math.cos(minTheta) * minRad dz = math.sin(minTheta) * minRad co = n + n.normalized() * dn + Vector((0, 0, dz)) co = co.to_tuple() lv.append(self.new_vertex((Vector((co))))) minorRing.append(index) index += 1 rings.append(minorRing) for ri in range(len(rings)-1): ring = rings[ri] nextRing = rings[ri+1] for i in range(len(ring)-1): self.new_face([lv[ring[i]], lv[nextRing[i]], lv[nextRing[i+1]], lv[ring[i+1]]]) self.new_face([lv[ring[0]], lv[ring[len(ring)-1]], lv[nextRing[len(nextRing)-1]], lv[nextRing[0]]]) ring = rings[len(rings)-1] nextRing = rings[0] for i in range(len(ring)-1): self.new_face([lv[ring[i]], lv[nextRing[i]], lv[nextRing[i+1]], lv[ring[i+1]]]) self.new_face([lv[ring[0]], lv[ring[len(ring)-1]], lv[nextRing[len(nextRing)-1]], lv[nextRing[0]]])
def proj_z(self, t, dz0, next=None, dz1=0): """ length of projection along crossing line / circle deformation unit vector for profil in z axis at line / line intersection so f(y) = position of point in yz plane """ return Vector((0, 1)), 1 """ NOTE (to myself): In theory this is how it has to be done so sections follow path, but in real world results are better when sections are z-up. So return a dumb 1 so f(y) = y """ if next is None: dz = dz0 / self.length else: dz = (dz1 + dz0) / (self.length + next.length) return Vector((0, 1)), sqrt(1 + dz * dz) # 1 / sqrt(1 + (dz0 / self.length) * (dz0 / self.length)) if next is None: return Vector((-dz0, self.length)).normalized(), 1 v0 = Vector((self.length, dz0)) v1 = Vector((next.length, dz1)) direction = Vector((-dz0, self.length)).normalized() + Vector((-dz1, next.length)).normalized() adj = v0 * v1 hyp = (v0.length * v1.length) c = min(1, max(-1, adj / hyp)) size = -cos(pi - 0.5 * acos(c)) return direction.normalized(), size
def generate(self): """ Generate the rig. Do NOT modify any of the original bones, except for adding constraints. The main armature should be selected and active before this is called. """ ctrl_bones = self.fk_limb.generate() thigh = ctrl_bones[0] shin = ctrl_bones[1] foot = ctrl_bones[2] foot_mch = ctrl_bones[3] # Position foot control bpy.ops.object.mode_set(mode='EDIT') eb = self.obj.data.edit_bones foot_e = eb[foot] vec = Vector(eb[self.org_bones[3]].vector) vec.normalize() foot_e.tail = foot_e.head + (vec * foot_e.length) foot_e.roll = eb[self.org_bones[3]].roll bpy.ops.object.mode_set(mode='OBJECT') # Create foot widget ob = create_widget(self.obj, foot) if ob is not None: verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)] edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)] mesh = ob.data mesh.from_pydata(verts, edges, []) mesh.update() mod = ob.modifiers.new("subsurf", 'SUBSURF') mod.levels = 2 return [thigh, shin, foot, foot_mch]
def viewrotate_apply(self, context, event): # FIXME vod = self.vod x, y = event.x, event.y if context.user_preferences.inputs.view_rotate_method == 'TRACKBALL': newvec = calctrackballvec(context.region, event.x, event.y) dvec = newvec - vod.trackvec angle = (dvec.length / (2.0 * TRACKBALLSIZE)) * math.pi angle = angle_wrap_rad(angle) axis = vod.trackvec.cross(newvec) q1 = Quaternion(axis, angle) vod.viewquat = q1 * vod.oldquat self.viewrotate_apply_dyn_ofs(vod.viewquat) else: zvec_global = Vector([0, 0, 1]) sensitivity = 0.007 m = vod.viewquat.to_matrix() m_inv = m.inverted() if (zvec_global - m_inv.col[2]).length > 0.001: xaxis = zvec_global.closs(m_inv.col[0]) if xaxis.dot(m_inv.col[0]) < 0: xaxis.negate() fac = zvec_global.angle(m_inv.col[2]) / math.pi fac = abs(fac - 0.5) * 2 fac *= fac xaxis = xaxis.lerp(m_inv.col[0], fac) else: xaxis = m_inv[0].copy() quat_local_x = Quaternion(xaxis, sensitivity * - (y - vod.oldy)) quat_local_x = vod.viewquat * quat_local_x def axis_angle_to_quat_single(axis, angle): angle_half = angle * 0.5 angle_cos = math.cos(angle_half) angle_sin = math.sin(angle_half) axis_index = ['X', 'Y', 'Z'].index(axis) q = Quaternion([angle_cos, 0, 0, 0]) q[axis_index + 1] = angle_sin return q quat_global_z = axis_angle_to_quat_single( 'Z', sensitivity * vod.reverse * (x - vod.oldx)) vod.viewquat = quat_local_x * quat_global_z self.viewrotate_apply_dyn_ofs(vod.viewquat) vod.viewquat.normalize() context.region_data.view_rotation = vod.viewquat.inverted() if vod.axis_snap: self.viewrotate_apply_snap(vod) vod.oldx = x vod.oldy = y ED_view3d_camera_lock_sync(vod.v3d, context.region_data) context.region.tag_redraw() pass
def test_orthogonal(self): angle_90d = math.pi / 2.0 for v in vector_data: v = Vector(v) if v.length_squared != 0.0: self.assertAlmostEqual(v.angle(v.orthogonal()), angle_90d)
def draw(self, scene=bpy.context.scene, maxdensity=None, matrix_world=None): """ draws the reflection plane in the scene """ base = self.rnor * self.roff #rme = bpy.data.meshes.new('rNormal') #normalverts = [base, base + self.rnor] #normaledge = [[0, 1]] #rme.from_pydata(normalverts,normaledge,[]) #ob_normal = bpy.data.objects.new("rNormal", rme) #scene.objects.link(ob_normal) n = Vector() # self rotation in (phi,theta,0) n.xyz = (-self.co.x, -self.co.y, 0) mesh = bpy.ops.mesh.primitive_plane_add( radius=2, location = base, rotation=n.zyx) obj = bpy.context.active_object obj.hide = True if matrix_world: obj.matrix_world = matrix_world * obj.matrix_world if maxdensity: material = bpy.data.materials.new('color') material.diffuse_color = (self.weight/maxdensity, 1 - self.weight/maxdensity, 1 - self.weight/maxdensity) mesh = obj.data mesh.materials.append(material)
def vecscorrect(vecs, mats): out = [] lengthve = len(vecs)-1 for i, m in enumerate(mats): out_ = [] k = i if k > lengthve: k = lengthve vec_c = Vector((0, 0, 0)) for v in vecs[k]: vec = v*m out_.append(vec) vec_c += vec vec_c = vec_c / len(vecs[k]) v = out_[1]-out_[0] w = out_[2]-out_[0] A = v.y*w.z - v.z*w.y B = -v.x*w.z + v.z*w.x C = v.x*w.y - v.y*w.x #D = -out_[0].x*A - out_[0].y*B - out_[0].z*C norm = Vector((A, B, C)).normalized() vec0 = Vector((0, 0, 1)) mat_rot_norm = vec0.rotation_difference(norm).to_matrix().to_4x4() out_pre = [] for v in out_: v_out = (v-vec_c) * mat_rot_norm out_pre.append(v_out[:]) out.append(out_pre) return out
def do_update_heat_map(node_list, nodes): """ Create a heat map for the node tree, Needs development. """ if not nodes.id_data.sv_user_colors: color_data = {node.name: (node.color[:], node.use_custom_color) for node in nodes} nodes.id_data.sv_user_colors = str(color_data) times = do_update_general(node_list, nodes) if not times: return t_max = max(times) addon_name = data_structure.SVERCHOK_NAME addon = bpy.context.user_preferences.addons.get(addon_name) if addon: # to use Vector.lerp cold = Vector(addon.preferences.heat_map_cold) hot = addon.preferences.heat_map_hot else: error("Cannot find preferences") cold = Vector((1, 1, 1)) hot = (.8, 0, 0) for name, t in zip(node_list, times): nodes[name].use_custom_color = True # linear scale. nodes[name].color = cold.lerp(hot, t / t_max)
def _is_flat_face(normal): a = Vector(normal[0]) for n in normal[1:]: dp = a.dot(Vector(n)) if dp < 0.99999 or dp > 1.00001: return False return True
def __get(self): # in object axes world_x = Vector((1, 0, 0)) world_z = Vector((0, 0, 1)) x = self.right # right y = self.forward # forward z = self.up # up if abs(y.z) > (1 - 1e-12): # sufficiently close to vertical roll = 0.0 xdir = x.copy() else: xdir = y.cross(world_z) rollPos = angle_signed(-y, x, xdir, 0.0) rollNeg = angle_signed(-y, x, -xdir, 0.0) if abs(rollNeg) < abs(rollPos): roll = rollNeg xdir = -xdir else: roll = rollPos xdir = Vector((xdir.x, xdir.y, 0)).normalized() yaw = angle_signed(-world_z, xdir, world_x, 0.0) zdir = xdir.cross(y).normalized() pitch = angle_signed(-xdir, zdir, world_z, 0.0) return Euler((pitch, roll, yaw), 'YXZ')
def ctx_camera_setup(context, location=(0.0, 0.0, 0.0), lookat=(0.0, 0.0, 0.0), # most likely the following vars can be left as defaults up=(0.0, 0.0, 1.0), lookat_axis='-Z', up_axis='Y', ): camera = bpy.data.cameras.new(whoami()) obj = bpy.data.objects.new(whoami(), camera) scene = context.scene scene.objects.link(obj) scene.camera = obj from mathutils import Vector, Matrix # setup transform view_vec = Vector(lookat) - Vector(location) rot_mat = view_vec.to_track_quat(lookat_axis, up_axis).to_matrix().to_4x4() tra_mat = Matrix.Translation(location) obj.matrix_world = tra_mat * rot_mat ctx_viewport_camera(context) return obj
def add_f_curve_modifiers(armature_object, strength, speed): wind_vector = Vector((1, 0, 0)) * strength fcurves = armature_object.animation_data.action.fcurves for f in fcurves: for m in f.modifiers: f.modifiers.remove(m) bones = organize_bones(armature_object) for b in bones: mass = b.bone.tail_radius ** 2 * b.length barycenter = b.tail * mass for c in b.children: mass += c["mass"] barycenter += Vector(c["barycenter"]) b["mass"] = mass b["barycenter"] = barycenter barycenter /= mass b.rotation_mode = 'XYZ' b.keyframe_insert('rotation_euler', frame=0, index=0) b.keyframe_insert('rotation_euler', frame=0, index=2) fcurves = armature_object.animation_data.action.fcurves for i in range(len(bones)): f0 = fcurves[2 * i] f1 = fcurves[2 * i + 1] b = bones[i] i_base = b.matrix.to_3x3().inverted() bone_vector = b.tail - b.head inertia_moment = bone_vector.length ** 2 * bones[i]["mass"] / 10000 damping = 0.5 * b.bone.tail_radius stiffness = b.bone.tail_radius ** 2 / b.length * 800 if b.parent is not None and len(b.parent.children) > 1: stiffness *= 2 # torque /= 3 # else: # torque = Vector((0, 0, 0)) torque = i_base * wind_vector.cross(bone_vector) / (b.bone.tail_radius) / 1000 f = sqrt(abs(damping ** 2 - 4 * inertia_moment * stiffness)) / (5*b.bone.tail_radius) * speed x_amplitude = torque.x z_amplitude = torque.z m0 = f0.modifiers.new(type='FNGENERATOR') m1 = f1.modifiers.new(type='FNGENERATOR') m0.function_type = 'SIN' m1.function_type = 'SIN' m0.amplitude = x_amplitude m1.amplitude = z_amplitude m0.phase_multiplier = f m1.phase_multiplier = f m0.value_offset = x_amplitude * 3 m1.value_offset = z_amplitude * 3
def rounded_primitive(cls, verts, radius, resolution=2.0): if not verts: return if len(verts) == 1: yield from cls.arc(verts[0], radius, resolution, skip_end=1) elif len(verts) == 2: v0, v1 = verts dv = v1 - v0 angle = Vector((0,1)).angle_signed(Vector((-dv.y, dv.x)), 0.0) yield from cls.arc(v0, radius, resolution, angle-math.pi, angle) yield from cls.arc(v1, radius, resolution, angle, angle+math.pi) elif radius == 0: yield from verts # exactly the same else: vref = Vector((0,1)) count = len(verts) for i0 in range(count): v0 = verts[i0] v1 = verts[(i0 + 1) % count] v2 = verts[(i0 + 2) % count] dv10 = v1 - v0 dv21 = v2 - v1 angle10 = vref.angle_signed(Vector((-dv10.y, dv10.x)), 0.0) angle21 = vref.angle_signed(Vector((-dv21.y, dv21.x)), 0.0) angle21 = angle10 + clamp_angle(angle21 - angle10) yield from cls.arc(v1, radius, resolution, angle10, angle21)
def vec_roll_to_mat3(axis, roll): """Computes 3x3 Matrix from rotation axis and its roll. :param axis: Rotation :type axis: Vector :param roll: Roll :type roll: float :return: 3x3 Matrix :rtype: Matrix """ nor = axis.normalized() target = Vector((0, 1, 0)) axis = target.cross(nor) if axis.dot(axis) > 1.0e-9: axis.normalize() theta = _math_utils.angle_normalized_v3v3(target, nor) b_matrix = Matrix.Rotation(theta, 4, axis) else: if target.dot(nor) > 0: up_or_down = 1.0 else: up_or_down = -1.0 b_matrix = Matrix() b_matrix[0] = (up_or_down, 0, 0, 0) b_matrix[1] = (0, up_or_down, 0, 0) b_matrix[2] = (0, 0, 1, 0) b_matrix[3] = (0, 0, 0, 1) roll_matrix = Matrix.Rotation(roll, 4, nor) return (roll_matrix * b_matrix).to_3x3()
def va(vx, vz, iang, sang, n): # shortcut Verts.append for i in range(n): v = Vector((vx, 0, vz)) ai = sang + iang*i E_rot = Euler((0, 0, ai), 'XYZ') v.rotate(E_rot) Verts.append((v.x, v.y, v.z))
def __init__(self, position=(0, 0, 0), orientation=(1, 0, 0), vitesse=1, angle=radians(90)): self.position = Vector(position) self.orientation = Vector(orientation).normalized() self.vitesse = vitesse self.angle = angle self.memoireEtat = [] self.comportement_initialisation()
def shape_circle(context, orientation): center = context.scene.cursor_location active = context.active_object zed = active.location[2] base_dir = active.location.xy - center.xy if orientation == 'XY': zero_dir = get_xy(base_dir).resized(3) else: zero_dir = base_dir.xy.resized(3) num_objects = len(context.selected_objects) delta_angle = 2 * math.pi / num_objects # sort objects based on angle to center sorted_objects = sorted(context.selected_objects, key=lambda ob: get_angle(base_dir, ob, center)) for i in range(num_objects): angle = delta_angle * i euler = Euler((0, 0, -angle)) direction = Vector(zero_dir) direction.rotate(euler) sorted_objects[i].location = center + direction sorted_objects[i].location[2] = zed
def write_camera(self, camera, name="Active Camera"): pos, target, up = camera.GetOrientation() bpy.ops.object.add(type='CAMERA', location=pos) ob = self.context.object ob.name = name z = (Vector(pos) - Vector(target)) x = Vector(up).cross(z) y = z.cross(x) x.normalize() y.normalize() z.normalize() ob.matrix_world.col[0] = x.resized(4) ob.matrix_world.col[1] = y.resized(4) ob.matrix_world.col[2] = z.resized(4) cam = ob.data aspect_ratio = camera.aspect_ratio fov = camera.fov if aspect_ratio == False: # we seem to be using dynamic / screen aspect ratio sketchupLog("CAMERA {} uses dynamic / screen aspect ratio ".format(name)) aspect_ratio = self.aspect_ratio if fov == False: sketchupLog("CAMERA {} is ortho ".format(name)) cam.type = 'ORTHO' else: cam.angle = (pi * fov / 180 ) * aspect_ratio cam.clip_end = self.prefs.camera_far_plane cam.name = name
def by_edge_dir(self, vertices, edges, faces): percent = self.inputs['Percent'].sv_get(default=[1.0])[0][0] direction = self.inputs['Direction'].sv_get()[0][0] dirvector = Vector(direction) dirlength = dirvector.length if dirlength <= 0: raise ValueError("Direction vector must have nonzero length!") values = [] for i, j in edges: u = vertices[i] v = vertices[j] edge = Vector(u) - Vector(v) if edge.length > 0: value = abs(edge.dot(dirvector)) / (edge.length * dirlength) else: value = 0 values.append(value) threshold = self.map_percent(values, percent) out_edges_mask = [(value >= threshold) for value in values] out_edges = [edge for (edge, mask) in zip (edges, out_edges_mask) if mask] out_verts_mask = self.select_verts_by_faces(out_edges, vertices) out_faces_mask = self.select_faces_by_verts(out_verts_mask, faces) return out_verts_mask, out_edges_mask, out_faces_mask
def stroke_normal(it): """ Compute the 2D normal at the stroke vertex pointed by the iterator 'it'. It is noted that Normal2DF0D computes normals based on underlying FEdges instead, which is inappropriate for strokes when they have already been modified by stroke geometry modifiers. """ # first stroke segment it_next = it.incremented() if it.is_begin: e = it_next.object.point_2d - it.object.point_2d n = Vector((e[1], -e[0])) return n.normalized() # last stroke segment it_prev = it.decremented() if it_next.is_end: e = it.object.point_2d - it_prev.object.point_2d n = Vector((e[1], -e[0])) return n.normalized() # two subsequent stroke segments e1 = it_next.object.point_2d - it.object.point_2d e2 = it.object.point_2d - it_prev.object.point_2d n1 = Vector((e1[1], -e1[0])).normalized() n2 = Vector((e2[1], -e2[0])).normalized() n = (n1 + n2) return n.normalized()
def getVector(self, point): vect = Vector((0.0, 0.0, 0.0)) for n in range(0, len(self.guides)): guide = self.guides[n] weight = self.weights[n] vect += guide.getVector(point).normalized() * weight return vect.normalized()
def focus_view_on(region_3d, location): r3d = region_3d a = r3d.view_location.copy() b = location mm = r3d.view_matrix.inverted() vr = mm.to_3x3() loc = mm.translation n = (a-loc).cross(b-loc).normalized() alp = math.acos( max(-1.0,min(1.0, (a-loc).normalized().dot( (b-loc).normalized() ) ))) zero = Vector() u0,v0,w0 = vr.transposed() u = rot_on( zero, n, alp, u0 ) v = rot_on( zero, n, alp, v0 ) w = rot_on( zero, n, alp, w0 ) if bpy.context.user_preferences.inputs.view_rotate_method == 'TURNTABLE': ez = Vector((0,0,1)) u2 = ez.cross(w) v2 = w.cross(u2) u,v = u2,v2 vr2 = Matrix((u,v,w)).transposed() mm2 = vr2.to_4x4() mm2[0][3] = loc[0] mm2[1][3] = loc[1] mm2[2][3] = loc[2] dist0 = (loc-location).length r3d.view_distance = dist0 r3d.view_matrix = mm2.inverted()
def _apply_planer_map(bm, uv_layer, size, offset, rotation, tex_aspect): scale = 1.0 / size sx = 1.0 * scale sy = 1.0 * scale ofx = offset[0] ofy = offset[1] rz = rotation * pi / 180.0 aspect = tex_aspect sel_faces = [f for f in bm.faces if f.select] # calculate average of normal n_ave = Vector((0.0, 0.0, 0.0)) for f in sel_faces: n_ave = n_ave + f.normal q = n_ave.rotation_difference(Vector((0.0, 0.0, 1.0))) # update UV coordinate for f in sel_faces: for l in f.loops: co = compat.matmul(q, l.vert.co) x = co.x * sx y = co.y * sy u = x * cos(rz) - y * sin(rz) + ofx v = -x * aspect * sin(rz) - y * aspect * cos(rz) + ofy l[uv_layer].uv = Vector((u, v))
def get_marker_coordinates_in_pixels(clip_size, track, frame_number): marker = track.markers.find_frame(frame_number) return Vector((marker.co[0] * clip_size[0], marker.co[1] * clip_size[1]))
def InitGlobals(): CurvesIntersector.ResetGlobals() global algoPOV global algoDIR algo = bpy.context.scene.curvetools.IntersectCurvesAlgorithm if algo == 'From View': regionView3D = Util.GetFirstRegionView3D() if regionView3D is None: print("### ERROR: regionView3D is None. Stopping.") return viewPerspective = regionView3D.view_perspective print("--", "viewPerspective:", viewPerspective) if viewPerspective == 'ORTHO': viewMatrix = regionView3D.view_matrix print("--", "viewMatrix:") print(viewMatrix) algoDIR = Vector((viewMatrix[2][0], viewMatrix[2][1], viewMatrix[2][2])) print("--", "algoDIR:", algoDIR) # ## TODO: doesn't work properly if viewPerspective == 'PERSP': viewMatrix = regionView3D.view_matrix print("--", "viewMatrix:") print(viewMatrix) algoPOV = regionView3D.view_location.copy() print("--", "algoPOV:", algoPOV) otherPOV = Vector((viewMatrix[0][3], viewMatrix[1][3], viewMatrix[2][3])) print("--", "otherPOV:", otherPOV) localPOV = Vector((0, 0, 0)) globalPOV = viewMatrix * localPOV print("--", "globalPOV:", globalPOV) perspMatrix = regionView3D.perspective_matrix print("--", "perspMatrix:") print(perspMatrix) globalPOVPersp = perspMatrix * localPOV print("--", "globalPOVPersp:", globalPOVPersp) if viewPerspective == 'CAMERA': camera = bpy.context.scene.camera if camera is None: print("### ERROR: camera is None. Stopping.") return print("--", "camera:", camera) cameraData = camera.data print("--", "cameraData.type:", cameraData.type) cameraMatrix = camera.matrix_world print("--", "cameraMatrix:") print(cameraMatrix) if cameraData.type == 'ORTHO': cameraMatrix = camera.matrix_world # algoDIR = Vector((cameraMatrix[2][0], cameraMatrix[2][1], cameraMatrix[2][2])) algoDIR = Vector((- cameraMatrix[0][2], - cameraMatrix[1][2], - cameraMatrix[2][2])) print("--", "algoDIR:", algoDIR) if cameraData.type == 'PERSP': algoPOV = camera.location.copy() print("--", "algoPOV:", algoPOV)
fullPath = os.path.join( folder, imgPath ) img = bpy.data.images.load(fullPath) img = bpy.data.images.get(imgPath) allimages.append(img) bpy.context.scene.render.resolution_x = img.size[0]*n bpy.context.scene.render.resolution_y = img.size[1]*n bpy.context.scene.use_nodes = True tree = bpy.context.scene.node_tree links = tree.links chunks = [allimages[x:x+n*n] for x in range(0, len(allimages), n*n)] for i, images in enumerate(chunks): for every_node in tree.nodes: tree.nodes.remove(every_node) image_location = Vector((0,0)) offset = Vector((0,-35)) comp_node = tree.nodes.new('CompositorNodeComposite') comp_node.location = 600,0 count = 0 for im in images: image_node = tree.nodes.new(type='CompositorNodeImage') image_node.image = im image_location += offset image_node.location = image_location image_node.hide = True image_node.width_hidden = 60 translate_node = tree.nodes.new(type='CompositorNodeTranslate') translate_node.location = image_location + Vector((170,0)) translate_node.hide = True translate_node.width_hidden = 60
def get_dimensions(obs): minx, miny, minz, maxx, maxy, maxz = get_bounds_snappable(obs) bbmin = Vector((minx, miny, minz)) bbmax = Vector((maxx, maxy, maxz)) dim = Vector((maxx - minx, maxy - miny, maxz - minz)) return dim, bbmin, bbmax
import pytest from mathutils import Vector from bpypolyskel import bpypolyskel verts = [ Vector((4.0, 10.0, 0.0)), Vector((4.0, 3.0, 0.0)), Vector((0.0, 3.0, 0.0)), Vector((0.0, 0.0, 0.0)), Vector((12.0, 0.0, 0.0)), Vector((12.0, 3.0, 0.0)), Vector((8.0, 3.0, 0.0)), Vector((8.0, 10.0, 0.0)) ] unitVectors = None holesInfo = None firstVertIndex = 0 numPolygonVerts = 8 faces = [] @pytest.mark.dependency() @pytest.mark.timeout(10) def test_polygonize(): global faces faces = bpypolyskel.polygonize(verts, firstVertIndex, numPolygonVerts, holesInfo, 0.0, 0.5, None, unitVectors)
def _setupNodes(self, context): if not self._needSetupNodes(context): # compositor nodes were already setup or even changes already # do nothing to prevent nodes damage return # Enable backdrop for all compositor spaces def setup_space(space): space.show_backdrop = True CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR', setup_space) sc = context.space_data scene = context.scene scene.use_nodes = True tree = scene.node_tree clip = sc.clip need_stabilization = False # Remove all the nodes if they came from default node setup. # This is simplest way to make it so final node setup is # is correct. self._wipeDefaultNodes(tree) # create nodes rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers') rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers') composite = self._findOrCreateNode(tree, 'CompositorNodeComposite') movieclip = tree.nodes.new(type='CompositorNodeMovieClip') distortion = tree.nodes.new(type='CompositorNodeMovieDistortion') if need_stabilization: stabilize = tree.nodes.new(type='CompositorNodeStabilize2D') scale = tree.nodes.new(type='CompositorNodeScale') invert = tree.nodes.new(type='CompositorNodeInvert') add_ao = tree.nodes.new(type='CompositorNodeMixRGB') add_shadow = tree.nodes.new(type='CompositorNodeMixRGB') mul_shadow = tree.nodes.new(type='CompositorNodeMixRGB') mul_image = tree.nodes.new(type='CompositorNodeMixRGB') vector_blur = tree.nodes.new(type='CompositorNodeVecBlur') alphaover = tree.nodes.new(type='CompositorNodeAlphaOver') viewer = tree.nodes.new(type='CompositorNodeViewer') # setup nodes movieclip.clip = clip distortion.clip = clip distortion.distortion_type = 'UNDISTORT' if need_stabilization: stabilize.clip = clip scale.space = 'RENDER_SIZE' rlayer_bg.scene = scene rlayer_bg.layer = "Background" rlayer_fg.scene = scene rlayer_fg.layer = "Foreground" add_ao.blend_type = 'ADD' add_ao.show_preview = False add_shadow.blend_type = 'ADD' add_shadow.show_preview = False mul_shadow.blend_type = 'MULTIPLY' mul_shadow.inputs["Fac"].default_value = 0.8 mul_shadow.show_preview = False mul_image.blend_type = 'MULTIPLY' mul_image.inputs["Fac"].default_value = 0.8 mul_image.show_preview = False vector_blur.factor = 0.75 # create links tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"]) if need_stabilization: tree.links.new(distortion.outputs["Image"], stabilize.inputs["Image"]) tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"]) else: tree.links.new(distortion.outputs["Image"], scale.inputs["Image"]) tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"]) tree.links.new(invert.outputs["Color"], add_shadow.inputs[1]) tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2]) tree.links.new(invert.outputs["Color"], add_ao.inputs[1]) tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2]) tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1]) tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2]) tree.links.new(scale.outputs["Image"], mul_image.inputs[1]) tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2]) tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"]) tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"]) tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"]) tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1]) tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2]) tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"]) tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"]) # place nodes movieclip.location = Vector((-300.0, 350.0)) distortion.location = movieclip.location distortion.location += Vector((200.0, 0.0)) if need_stabilization: stabilize.location = distortion.location stabilize.location += Vector((200.0, 0.0)) scale.location = stabilize.location scale.location += Vector((200.0, 0.0)) else: scale.location = distortion.location scale.location += Vector((200.0, 0.0)) rlayer_bg.location = movieclip.location rlayer_bg.location -= Vector((0.0, 350.0)) invert.location = rlayer_bg.location invert.location += Vector((250.0, 50.0)) add_ao.location = invert.location add_ao.location[0] += 200 add_ao.location[1] = rlayer_bg.location[1] add_shadow.location = add_ao.location add_shadow.location -= Vector((0.0, 250.0)) mul_shadow.location = add_ao.location mul_shadow.location += Vector((200.0, -50.0)) mul_image.location = mul_shadow.location mul_image.location += Vector((300.0, 200.0)) rlayer_fg.location = rlayer_bg.location rlayer_fg.location -= Vector((0.0, 500.0)) vector_blur.location[0] = mul_image.location[0] vector_blur.location[1] = rlayer_fg.location[1] alphaover.location[0] = vector_blur.location[0] + 350 alphaover.location[1] = \ (vector_blur.location[1] + mul_image.location[1]) / 2 composite.location = alphaover.location composite.location += Vector((200.0, -100.0)) viewer.location = composite.location composite.location += Vector((0.0, 200.0)) # ensure no nodes were creates on position of existing node self._offsetNodes(tree) scene.render.alpha_mode = 'TRANSPARENT' if hasattr(scene, 'cycles'): scene.cycles.film_transparent = True
def _offsetNodes(tree): for a in tree.nodes: for b in tree.nodes: if a != b and a.location == b.location: b.location += Vector((40.0, 20.0))
def vec_abs(v1): """ componentwise absolute value for vectors """ return Vector(abs(e1) for e1 in v1)
def vec_div(v1, v2): """ componentwise division for vectors """ return Vector(e1 / e2 for e1, e2 in zip(v1, v2))
def vec_round(v1, precision=0): return Vector(round(e1, precision) for e1 in v1)
def read_tet_file(self): """Read the .tet file. """ data = list() # Open the file file = open(self.tet_file, 'r') # Read every line for line in file: # If line is a comment, ignore if '#' in line: continue # Get the number of vertices and tets if 'tet' in line: data_count = line continue data.append(line.strip('\n')) # Close the file file.close() # Get the number of vertices and faces from the data_count data_count = data_count.split(' ') number_vertices = int(data_count[1]) faces = int(data_count[2]) nmv.logger.log('The volumetric mesh has [%d] vertices and [%d] faces' % (number_vertices, faces)) # Get the vertices vertices = list() for i in range(number_vertices): vertex_data = data[i].split() vertex = Vector((float(vertex_data[0]), float(vertex_data[1]), float(vertex_data[2]))) vertices.append(vertex) # Get the faces faces = list() for i in range(number_vertices, len(data)): face_data = data[i].split() face = [ int(face_data[0]), int(face_data[1]), int(face_data[2]), int(face_data[3]) ] faces.append(face) # Create the tetrahedrons tetrahedrons = list() for i, face in enumerate(faces): tetrahedron = self.Tetrahedron() tetrahedron.index = i tetrahedron.face = face tetrahedrons.append(tetrahedron) # Create the tetrahedral mesh data structure tetrahedral_mesh_data = self.TetrahedralMeshData() tetrahedral_mesh_data.vertices = vertices tetrahedral_mesh_data.tetrahedrons = tetrahedrons # Return the mesh data return tetrahedral_mesh_data
def vec_remainder(v1, v2): """ componentwise remainder for vectors """ return Vector(e1 % e2 for e1, e2 in zip(v1, v2))
# repoducibility. If the noise should be randomize, call # randomize_distance_bias laser_noise = [ 0.015798891682948433, 0.030289711937446478, 0.044832263895615468, 0.022628361223111119 ] ## If the laser noise has to be truely randomize, call this function prior ## to every scan def randomize_distance_bias(noise_mu=0.0, noise_sigma=0.04): for idx in range(len(laser_angles)): laser_noise[idx] = random.gauss(noise_mu, noise_sigma) mirror = [Vector([0, 0, 0]), Vector([0, 0, 0]), Vector([0, 0, 0])] norm_mirror = Vector([0, 1, 0]) #Create a Triangle that represents the 45° mirror #rotated around the main axis by <angle> #The mirror has a side length of 1 meter which is more #than enough to fit the small square mirror inside this triangle def createMirror(angle): mirror[0].xyz = [0, -1, -1] mirror[1].xyz = [-1, 1, 1] mirror[2].xyz = [1, 1, 1] mirror[0].rotate(Matrix.Rotation(angle, 4, norm_mirror)) mirror[1].rotate(Matrix.Rotation(angle, 4, norm_mirror)) mirror[2].rotate(Matrix.Rotation(angle, 4, norm_mirror)) n = geometry.normal(mirror[2], mirror[1], mirror[0])
def vec_mult(v1, v2): """ componentwise multiplication for vectors """ return Vector(e1 * e2 for e1, e2 in zip(v1, v2))
def extract(properties, *args, **kargs): filepath = bpy.path.abspath(properties.filepath) imagepaths = list( filter(None, bpy.path.abspath(properties.imagepath).split(';'))) if not os.path.exists(filepath): if not filepath: raise AttributeError(f'VisualSfM filepath must be provided') raise AttributeError(f'Unable to locate VisualSfM file:\n"{filepath}"') # # TODO: read list.txt to get image paths to detect image size # resolution_x = int(scene.render.resolution_x * (scene.render.resolution_percentage / 100)) with open(filepath, 'r') as f: lines = f.readlines() # TODO: read optional calibration from file (tuple stored against each camera as key 'principal' (x, y) floating-point pixels) if len(lines) == 0 or not lines[0].startswith('NVM_V3'): raise Exception('Not a valid NVM file') cameras = {} trackers = {} data = { 'trackers': trackers, 'cameras': cameras, } total_cameras = int(lines[2]) total_points = int(lines[4 + total_cameras]) # numbers: optional negative, digit(s), optional decimal point and following digit(s), optional scientific notation "e-0" num = r'-?\d+(?:\.\d+)?(?:e[+-]\d+)?' camera_re = re.compile( rf'^(?P<name>.*?)\s+(?P<f>{num})\s+(?P<QW>{num})\s+(?P<QX>{num})\s+(?P<QY>{num})\s+(?P<QZ>{num})\s+(?P<X>{num})\s+(?P<Y>{num})\s+(?P<Z>{num})\s+(?P<k1>{num})\s+{num}\s*$' ) for i in range(int(total_cameras)): # each camera uses 1 line match = camera_re.match(lines[3 + i]) if not match: raise Exception( f'Camera {i} did not match the format specification') # find any filename that exists filenames = [ fp for fp in [ os.path.join(*parts) for parts in zip(imagepaths, [ match.group('name'), ] * len(imagepaths)) ] if os.path.exists(fp) ] if not filenames: # wasn't in a root path, do we search sub directories? if properties.subdirs: for imagepath in imagepaths: for root, dirs, files in os.walk(imagepath): if match.group('name') in files: filenames = [ os.path.join(root, match.group('name')) ] # still didn't find file? if not filenames: raise AttributeError( f'VisualSfM image not found for camera {i}:\n"{match.group("name")}""' ) # create cameras q = Quaternion( tuple( map(float, [ match.group('QW'), match.group('QX'), match.group('QY'), match.group('QZ') ]))) """ https://github.com/SBCV/Blender-Addon-Photogrammetry-Importer/blob/75189215dffde50dad106144111a48f29b1fed32/photogrammetry_importer/file_handler/nvm_file_handler.py#L55 VisualSFM CAMERA coordinate system is the standard CAMERA coordinate system in computer vision (not the same as in computer graphics like in bundler, blender, etc.) That means the y axis in the image is pointing downwards (not upwards) the camera is looking along the positive z axis (points in front of the camera show a positive z value) The camera coordinate system in computer vision VISUALSFM uses camera matrices, which are rotated around the x axis by 180 degree i.e. the y and z axis of the CAMERA MATRICES are inverted """ R = q.to_matrix() R.rotate(Euler((pi, 0, 0))) R.transpose() c = Vector( tuple( map(float, [match.group('X'), match.group('Y'), match.group('Z')]))) t = -1 * R @ c R.transpose() # TODO: confirm whether the distortion coefficient needs inverting cameras.setdefault( i, { 'filename': filenames[0], 'f': float(match.group('f')), 'k': (float(match.group('k1')), 0, 0), 't': tuple(t), 'R': tuple(map(tuple, tuple(R))), 'trackers': {}, }) if 'resolution' not in data: data.setdefault('resolution', get_image_size(filenames[0])) marker_re = re.compile( rf'^(?P<X>{num})\s+(?P<Y>{num})\s+(?P<Z>{num})\s+(?P<R>\d+)\s+(?P<G>\d+)\s+(?P<B>\d+)\s+(?P<num_measurements>{num})\s+(?P<measurements>.*?)\s*$' ) measurement_re = re.compile( rf'^(?P<image_idx>\d+)\s+(?P<feature_idx>\d+)\s+(?P<X>{num})\s+(?P<Y>{num}).*' ) for i in range(int(total_points)): # each point uses a single line idx = 5 + int(total_cameras) + i match = marker_re.match(lines[idx]) if not match: raise AttributeError( f'VisualSfM marker {i} did not match the format specification') trackers.setdefault( i, { 'co': tuple( map(float, [match.group('X'), match.group('Y'), match.group('Z')])), 'rgb': tuple( map(int, [match.group('R'), match.group('G'), match.group('B')])), }) cur = match.group('measurements') for m in range(int(match.group('num_measurements'))): measurement_match = measurement_re.match(cur) if not measurement_match: raise AttributeError( f'VisualSfM marker {i} did not match measurement {m} format specification' ) # Let the measurement be (mx, my), which is relative to principal point (typically image center) # As for the image coordinate system, X-axis points right, and Y-axis points downward, so Z-axis points forward. cameras[int( measurement_match.group('image_idx'))]['trackers'].setdefault( i, (float(measurement_match.group('X')), -1 * float(measurement_match.group('Y')))) cur = cur[measurement_match.end(len(measurement_match.groups()) ):].strip() return data
def scan_advanced(scanner_object, rotation_speed=25.0, simulation_fps=24, angle_resolution=0.5, max_distance=90, evd_file=None, noise_mu=0.0, noise_sigma=0.03, start_angle=-35, end_angle=50, evd_last_scan=True, add_blender_mesh=False, add_noisy_blender_mesh=False, simulation_time=0.0, laser_mirror_distance=0.05, world_transformation=Matrix()): inv_scan_x = scanner_object.inv_scan_x inv_scan_y = scanner_object.inv_scan_y inv_scan_z = scanner_object.inv_scan_z start_time = time.time() current_time = simulation_time delta_rot = angle_resolution * math.pi / 180 evd_storage = evd.evd_file(evd_file) xaxis = Vector([1, 0, 0]) yaxis = Vector([0, 1, 0]) zaxis = Vector([0, 0, 1]) rays = [] ray_info = [] angles = end_angle - start_angle steps_per_rotation = angles / angle_resolution time_per_step = (1.0 / rotation_speed) / steps_per_rotation lines = (end_angle - start_angle) / angle_resolution for line in range(int(lines)): for laser_idx in range(len(laser_angles)): current_angle = start_angle + float(line) * angles / float(lines) [ray, origion, laser_angle] = calculateRay(laser_angles[laser_idx], deg2rad(current_angle), laser_mirror_distance) #TODO: Use the origin to cast the ray. Requires changes to the blender patch rot_angle = 1e-6 + current_angle + 180.0 timestamp = ( (rot_angle - 180.0) / angle_resolution) * time_per_step rot_angle = rot_angle % 360.0 ray_info.append([deg2rad(rot_angle), laser_angle, timestamp]) rays.extend([ray[0], ray[1], ray[2]]) returns = blensor.scan_interface.scan_rays(rays, max_distance, inv_scan_x=inv_scan_x, inv_scan_y=inv_scan_y, inv_scan_z=inv_scan_z) reusable_vector = Vector([0.0, 0.0, 0.0, 0.0]) for i in range(len(returns)): idx = returns[i][-1] reusable_vector.xyzw = [ returns[i][1], returns[i][2], returns[i][3], 1.0 ] vt = (world_transformation * reusable_vector).xyz v = [returns[i][1], returns[i][2], returns[i][3]] distance_noise = laser_noise[idx % len(laser_noise)] + random.gauss( noise_mu, noise_sigma) vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2) norm_vector = [ v[0] / vector_length, v[1] / vector_length, v[2] / vector_length ] vector_length_noise = vector_length + distance_noise reusable_vector.xyzw = [ norm_vector[0] * vector_length_noise, norm_vector[1] * vector_length_noise, norm_vector[2] * vector_length_noise, 1.0 ] v_noise = (world_transformation * reusable_vector).xyz evd_storage.addEntry(timestamp=ray_info[idx][2], yaw=(ray_info[idx][0] + math.pi) % (2 * math.pi), pitch=ray_info[idx][1], distance=vector_length, distance_noise=vector_length_noise, x=vt[0], y=vt[1], z=vt[2], x_noise=v_noise[0], y_noise=v_noise[1], z_noise=v_noise[2], object_id=returns[i][4], color=returns[i][5]) current_angle = start_angle + float(float(int(lines)) * angle_resolution) if evd_file: evd_storage.appendEvdFile() if not evd_storage.isEmpty(): scan_data = numpy.array(evd_storage.buffer) additional_data = None if scanner_object.store_data_in_mesh: additional_data = evd_storage.buffer if add_blender_mesh: mesh_utils.add_mesh_from_points_tf(scan_data[:, 5:8], "Scan", world_transformation, buffer=additional_data) if add_noisy_blender_mesh: mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11], "NoisyScan", world_transformation, buffer=additional_data) bpy.context.scene.update() end_time = time.time() scan_time = end_time - start_time print("Elapsed time: %.3f" % (scan_time)) return True, current_angle, scan_time
def MINMAX_INIT(): return (Vector( (+FLT_MAX, +FLT_MAX, +FLT_MAX)), Vector( (-FLT_MAX, -FLT_MAX, -FLT_MAX)))
def build_circuit_spines(morphology, blue_config, gid, material=None): """Builds all the spines on a spiny neuron using a BBP circuit. :param morphology: A given morphology. :param blue_config: BBP circuit configuration file. :param gid: Neuron gid. :param material: Spine material. :return: A list of all the reconstructed spines along the neuron. """ # Keep a list of all the spines objects spines_objects = [] # Import brain import brain # Load the circuit, silently please circuit = brain.Circuit(blue_config) # Get all the synapses for the corresponding gid. synapses = circuit.afferent_synapses({int(gid)}) # Load all the template spines and ignore the verbose messages of loading templates_spines_list = load_spines( nmv.consts.Paths.SPINES_MESHES_LQ_DIRECTORY) # Apply the shader for spine_object in templates_spines_list: # Apply the shader to each spine mesh nmv.shading.set_material_to_object(spine_object, material) # Get the local to global transforms local_to_global_transform = circuit.transforms({int(gid)})[0] # Local_to_global_transform transformation_matrix = Matrix() for i in range(4): transformation_matrix[i][:] = local_to_global_transform[i] # Invert the transformation matrix transformation_matrix = transformation_matrix.inverted() # Create a timer to report the performance building_timer = nmv.utilities.timer.Timer() nmv.logger.header('Building spines') building_timer.start() # Load the synapses from the file number_spines = len(synapses) for i, synapse in enumerate(synapses): # Show progress nmv.utilities.time_line.show_iteration_progress( 'Spines', i, number_spines) """ Ignore soma synapses """ # If the post-synaptic section id is zero, then revoke it, and continue post_section_id = synapse.post_section() if post_section_id == 0: continue # Get the pre-and post-positions in the global coordinates pre_position = synapse.pre_center_position() post_position = synapse.post_center_position() # Transform the spine positions to the circuit coordinates pre_position = Vector( (pre_position[0], pre_position[1], pre_position[2])) post_position = Vector( (post_position[0], post_position[1], post_position[2])) post_position = transformation_matrix * post_position pre_position = transformation_matrix * pre_position # Emanate a spine spine_object = emanate_a_spine(templates_spines_list, post_position, pre_position, i) # Append the spine to spines list spines_objects.append(spine_object) # Done nmv.utilities.time_line.show_iteration_progress('Spines', number_spines, number_spines, done=True) # Link the spines to the scene in a single step nmv.logger.info('Linking spines to the scene') for i in spines_objects: nmv.scene.link_object_to_scene(i) # Report the time building_timer.end() nmv.logger.info('Spines: [%f] seconds' % building_timer.duration()) # Delete the template spines nmv.scene.ops.delete_list_objects(templates_spines_list) # Return the spines objects list return spines_objects
def mode_callback(obj, data): for obj in set(bpy.context.selected_objects + [bpy.context.active_object]): if (not obj.data or not isinstance( obj.data, (bpy.types.Mesh, bpy.types.Curve, bpy.types.TextCurve)) or not obj.BIMObjectProperties.ifc_definition_id or not bpy.context.scene.BIMProjectProperties.is_authoring): return product = IfcStore.get_file().by_id( obj.BIMObjectProperties.ifc_definition_id) parametric = ifcopenshell.util.element.get_psets(product).get( "EPset_Parametric") if not parametric or parametric["Engine"] != "BlenderBIM.DumbProfile": return if obj.mode == "EDIT": IfcStore.edited_objs.add(obj) bm = bmesh.from_edit_mesh(obj.data) bmesh.ops.dissolve_limit(bm, angle_limit=pi / 180 * 1, verts=bm.verts, edges=bm.edges) bmesh.update_edit_mesh(obj.data) bm.free() else: material_usage = ifcopenshell.util.element.get_material(product) x, y = obj.dimensions[0:2] if not material_usage.CardinalPoint: new_origin = obj.matrix_world @ (Vector(obj.bound_box[0]) + (Vector((x, y, 0)) / 2)) elif material_usage.CardinalPoint == 1: new_origin = obj.matrix_world @ Vector(obj.bound_box[4]) elif material_usage.CardinalPoint == 2: new_origin = obj.matrix_world @ (Vector(obj.bound_box[0]) + (Vector((x, 0, 0)) / 2)) elif material_usage.CardinalPoint == 3: new_origin = obj.matrix_world @ Vector(obj.bound_box[0]) elif material_usage.CardinalPoint == 4: new_origin = obj.matrix_world @ (Vector(obj.bound_box[4]) + (Vector((0, y, 0)) / 2)) elif material_usage.CardinalPoint == 5: new_origin = obj.matrix_world @ (Vector(obj.bound_box[0]) + (Vector((x, y, 0)) / 2)) elif material_usage.CardinalPoint == 6: new_origin = obj.matrix_world @ (Vector(obj.bound_box[0]) + (Vector((0, y, 0)) / 2)) elif material_usage.CardinalPoint == 7: new_origin = obj.matrix_world @ Vector(obj.bound_box[7]) elif material_usage.CardinalPoint == 8: new_origin = obj.matrix_world @ (Vector(obj.bound_box[3]) + (Vector((x, 0, 0)) / 2)) elif material_usage.CardinalPoint == 9: new_origin = obj.matrix_world @ Vector(obj.bound_box[3]) if (obj.matrix_world.translation - new_origin).length < 0.001: return obj.data.transform( Matrix.Translation( (obj.matrix_world.inverted().to_quaternion() @ (obj.matrix_world.translation - new_origin)))) obj.matrix_world.translation = new_origin
def sendMesh(scene): context = bpy.context if bpy.context.edit_object is not None: currentObject = context.edit_object.name else: currentObject = context.active_object.name for name, target in Splash._targets.items(): currentTime = time.clock_gettime( time.CLOCK_REALTIME) - target._startTime worldMatrix = target._object.matrix_world normalMatrix = worldMatrix.copy() normalMatrix.invert() normalMatrix.transpose() if currentObject == name and bpy.context.edit_object is not None: if currentTime - target._frameTimeMesh < target._updatePeriodEdit: continue target._frameTimeMesh = currentTime mesh = bmesh.from_edit_mesh(target._object.data) bufferVert = bytearray() bufferPoly = bytearray() buffer = bytearray() vertNbr = 0 polyNbr = 0 uv_layer = mesh.loops.layers.uv.active if uv_layer is None: bpy.ops.uv.smart_project() uv_layer = mesh.loops.layers.uv.active for face in mesh.faces: polyNbr += 1 bufferPoly += struct.pack("i", len(face.verts)) for loop in face.loops: bufferPoly += struct.pack("i", vertNbr) v = loop.vert.co tmpVector = Vector((v[0], v[1], v[2], 1.0)) tmpVector = worldMatrix * tmpVector v = Vector((tmpVector[0], tmpVector[1], tmpVector[2])) n = loop.vert.normal tmpVector = Vector((n[0], n[1], n[2], 0.0)) tmpVector = normalMatrix * tmpVector n = Vector((tmpVector[0], tmpVector[1], tmpVector[2])) if uv_layer is None: uv = Vector((0, 0)) else: uv = loop[uv_layer].uv bufferVert += struct.pack("ffffffff", v[0], v[1], v[2], uv[0], uv[1], n[0], n[1], n[2]) vertNbr += 1 buffer += struct.pack("ii", vertNbr, polyNbr) buffer += bufferVert buffer += bufferPoly target._meshWriter.push(buffer, floor(currentTime * 1e9)) else: if currentTime - target._frameTimeMesh < target._updatePeriodObject: continue target._frameTimeMesh = currentTime if type(target._object.data) is bpy.types.Mesh: # Look for UV coords, create them if needed if len(target._object.data.uv_layers) == 0: bpy.ops.object.editmode_toggle() bpy.ops.uv.smart_project() bpy.ops.object.editmode_toggle() # Apply the modifiers to the object mesh = target._object.to_mesh(context.scene, True, 'PREVIEW') bufferVert = bytearray() bufferPoly = bytearray() buffer = bytearray() vertNbr = 0 polyNbr = 0 for poly in mesh.polygons: polyNbr += 1 bufferPoly += struct.pack("i", len(poly.loop_indices)) for idx in poly.loop_indices: bufferPoly += struct.pack("i", vertNbr) v = mesh.vertices[mesh.loops[idx].vertex_index].co tmpVector = Vector((v[0], v[1], v[2], 1.0)) tmpVector = worldMatrix * tmpVector v = Vector( (tmpVector[0], tmpVector[1], tmpVector[2])) n = mesh.vertices[ mesh.loops[idx].vertex_index].normal tmpVector = Vector((n[0], n[1], n[2], 0.0)) tmpVector = normalMatrix * tmpVector n = Vector( (tmpVector[0], tmpVector[1], tmpVector[2])) if len(mesh.uv_layers) != 0: uv = mesh.uv_layers[0].data[idx].uv else: uv = Vector((0, 0)) bufferVert += struct.pack("ffffffff", v[0], v[1], v[2], uv[0], uv[1], n[0], n[1], n[2]) vertNbr += 1 buffer += struct.pack("ii", vertNbr, polyNbr) buffer += bufferVert buffer += bufferPoly target._meshWriter.push(buffer, floor(currentTime * 1e9)) bpy.data.meshes.remove(mesh)
def OD_PasteFromExternal(_name, size): file = tempfile.gettempdir() + os.sep + "ODVertexData.txt" if os.path.exists(file): f = open(file) lines = f.readlines() f.close() else: print("Cannot find file") vertline = [] polyline = [] uvMaps = [] morphMaps = [] weightMaps = [] count = 0 #Parse File to see what Data we have here for line in lines: if line.startswith("VERTICES:"): vertline.append( [int(line.strip().split(":")[1].strip()), count]) if line.startswith("POLYGONS:"): polyline.append( [int(line.strip().split(":")[1].strip()), count]) if line.startswith("UV:"): uvMaps.append( [line.strip().split(":")[1:], count] ) # changed this to add the # of uv coordinates into the mix if line.startswith("MORPH"): morphMaps.append([line.split(":")[1].strip(), count]) if line.startswith("WEIGHT"): weightMaps.append([line.split(":")[1].strip(), count]) count += 1 #create Points for v in vertline: verts = [] for i in range(v[1] + 1, v[1] + v[0] + 1): x = lines[i].split(" ") pt = [ float(x[0].strip()), float(x[2].strip()) * -1, float(x[1].strip()) ] verts.append(pt) blenderMats = bpy.data.materials[:] blenderMatsNames = [] for bm in blenderMats: blenderMatsNames.append(bm.name) for polygons in polyline: faces = [] facesMat = [] objMats = [] for i in range(polygons[1] + 1, polygons[1] + polygons[0] + 1): pts = [] surf = (lines[i].split(";;")[1]).strip() for x in (lines[i].split(";;")[0]).strip().split(","): pts.append(int(x.strip())) faces.append(pts) if surf not in blenderMatsNames: blenderMatsNames.append(surf) bpy.data.materials.new(surf) #obj.data.materials.append(blenderSurf) if surf not in objMats: objMats.append(surf) facesMat.append(surf) #remove old object first obj = bpy.context.active_object if obj != None: me = obj.data bpy.ops.object.mode_set(mode='OBJECT') facesr = me.polygons for f in facesr: f.select = 1 bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.delete(type='FACE') bpy.ops.object.mode_set(mode='OBJECT') mesh = me mesh.from_pydata(verts, [], faces) mesh.update() mesh.update() else: # the rest keep like in this example # here the mesh data is constructed mesh = bpy.data.meshes.new(_name) mesh.from_pydata(verts, [], faces) mesh.update() mesh.update() # now generate an object to hold this data obj = bpy.data.objects.new(_name, mesh) # link the object to the scene (it is not visible so far!) bpy.context.scene.objects.link(obj) for i in range(len(obj.material_slots)): bpy.ops.object.material_slot_remove({'object': obj}) for mat in objMats: obj.data.materials.append(bpy.data.materials.get(mat)) for i in range(len(faces)): obj.data.polygons[i].material_index = objMats.index( facesMat[i]) # create vertex group lookup dictionary for names vgroup_names = { vgroup.index: vgroup.name for vgroup in obj.vertex_groups } # create dictionary of vertex group assignments per vertex vgroups = { v.index: [vgroup_names[g.group] for g in v.groups] for v in obj.data.vertices } for x in obj.vertex_groups: obj.vertex_groups.remove(x) #setup weightmaps for weightMap in weightMaps: vg = obj.vertex_groups.new(weightMap[0]) count = 0 for v in range(len(verts)): if lines[weightMap[1] + 1 + count].strip() != "None": vg.add([v], float(lines[weightMap[1] + 1 + count].strip()), "ADD") count += 1 if obj.data.shape_keys != None: bpy.ops.object.shape_key_remove(all=True) #create Base Shape Key if len(morphMaps) > 0: shapeKey = obj.shape_key_add(from_mix=False) #shapeKey.name = "Base" for vert in obj.data.vertices: shapeKey.data[vert.index].co = vert.co #Set Morph Map Values for morphMap in morphMaps: shapeKey = obj.shape_key_add(from_mix=False) shapeKey.name = morphMap[0] count = 0 for vert in obj.data.vertices: if lines[morphMap[1] + 1 + count].strip() != "None": x = float(lines[morphMap[1] + 1 + count].split(" ")[0]) y = float(lines[morphMap[1] + 1 + count].split(" ")[1]) z = float( lines[morphMap[1] + 1 + count].split(" ")[2]) * -1 newVert = Vector( (vert.co[0] + x, vert.co[1] + z, vert.co[2] + y)) shapeKey.data[vert.index].co = newVert count += 1 for x in mesh.uv_textures: mesh.uv_textures.remove(x) for uvMap in uvMaps: uv = mesh.uv_textures.new(uvMap[0][0]) bm = bmesh.new() bm.from_mesh(mesh) bm.faces.ensure_lookup_table() uv_layer = bm.loops.layers.uv[uv.name] count = 0 for i in range(int(uvMap[0][1])): line = lines[uvMap[1] + 1 + count] split = line.split(":") if len( split ) > 3: #check the format to see if it has a point and poly classifier, determining with that, whether the uv is discontinuous or continuous face = (bm.faces[int(split[2])].loops[count % (len( bm.faces[int(split[2])].loops))])[uv_layer].uv = [ float(split[0].split(" ")[0]), float(split[0].split(" ")[1]) ] else: pass count += 1 bm.to_mesh(mesh) bpy.context.scene.update() # return the object to the function caller for further stuff return obj
def importAnimations(self): lf = self.oid.oif trac = lf.getByPointer(self.link_TRAC) trac.getData() #trac.print() for i in bpy.context.scene.objects: i.select = False #deselect all objects bpy.context.scene.objects.active = None for anim in trac.data.anims: #anim = trac.data.anims[1] tram = lf.getByPointer(anim.link_TRAM) if tram.name != 'TRAMKONCOMrun_throw_fw': continue print(str(anim.weight) + ' - ' + tram.name + ':') tram.getData() #tram.print() tram.data.readBodyparts() tram.data.readPos() #print('bodyparts:') #print(tram.data.bodyparts) self.object.location = Vector(tram.data.pos) bpy.context.scene.frame_start = 0 bpy.context.scene.frame_end = tram.data.frames_num - 1 keyInterp = bpy.context.user_preferences.edit.keyframe_new_interpolation_type bpy.context.user_preferences.edit.keyframe_new_interpolation_type ='LINEAR' #self.object.animation_data_create() #self.object.animation_data.action = bpy.data.actions.new(name=tram.name) g = 3.1416/180 rotate = bpy.ops.transform.rotate for i in range(tram.data.bodyparts_num): #if i > 0: continue #bone = self.bones[i] bone = self.object.pose.bones[i] bpart = self.bodyparts[i] print(bpart) print(bone) #bpart.select = True #bpy.context.scene.objects.active = bpart trambp = tram.data.bodyparts[i] #obj = bpy.context.object #print(obj) #bone.animation_data_create() #bone.animation_data.action = bpy.data.actions.new(name=tram.name+"_"+BODYPARTS[i]) angles_sum = [0,0,0] frameN = 0 for frame in trambp.frames: frameN += frame.frames_num bpy.context.scene.frame_set(frameN) angles = [math.radians(a) for a in frame.angles] angles_delta = [self.getNearestAngle(a, b) for a,b in zip(angles_sum, angles)] angles_sum = [a+b for a,b in zip(angles_sum, angles_delta)] mode = 'ZYX' euler = Euler(angles_sum, mode) #bone.matrix = euler.to_matrix().to_4x4() bone.rotation_mode = mode bone.rotation_euler = euler bone.keyframe_insert(data_path='rotation_euler') #bone.rotation_mode = 'QUATERNION' #bone.rotation_quaternion = euler.to_quaternion() #bone.matrix = euler.to_quaternion().to_matrix().to_4x4() #bone.keyframe_insert(data_path='rotation_quaternion') #if i == 0: # print(frameN) # print(str(frame.angles) + ' - ' + str(bone.rotation_euler)) #bpart.select = False bpy.context.user_preferences.edit.keyframe_new_interpolation_type = keyInterp
def get_height(self, o): return (Vector(o.bound_box[1]) - Vector(o.bound_box[0])).length
def create_profile(self): # A cube verts = [ Vector((-1, -1, -1)), Vector((-1, -1, 1)), Vector((-1, 1, -1)), Vector((-1, 1, 1)), Vector((1, -1, -1)), Vector((1, -1, 1)), Vector((1, 1, -1)), Vector((1, 1, 1)), ] edges = [] faces = [ [0, 2, 3, 1], [2, 3, 7, 6], [4, 5, 7, 6], [0, 1, 5, 4], [1, 3, 7, 5], [0, 2, 6, 4], ] ifc_classes = ifcopenshell.util.type.get_applicable_entities( self.relating_type.is_a(), self.file.schema) # Standard cases are deprecated, so let's cull them ifc_class = [c for c in ifc_classes if "StandardCase" not in c][0] mesh = bpy.data.meshes.new(name="Dumb Profile") mesh.from_pydata(verts, edges, faces) obj = bpy.data.objects.new( tool.Model.generate_occurrence_name(self.relating_type, ifc_class), mesh) obj.location = self.location if self.collection_obj and self.collection_obj.BIMObjectProperties.ifc_definition_id: obj.location[2] = self.collection_obj.location[2] self.collection.objects.link(obj) bpy.ops.bim.assign_class(obj=obj.name, ifc_class=ifc_class, should_add_representation=False) if self.relating_type.is_a() in ["IfcBeamType", "IfcMemberType"]: obj.rotation_euler[0] = math.pi / 2 obj.rotation_euler[2] = math.pi / 2 element = self.file.by_id(obj.BIMObjectProperties.ifc_definition_id) blenderbim.core.type.assign_type(tool.Ifc, tool.Type, element=tool.Ifc.get_entity(obj), type=self.relating_type) profile_set_usage = ifcopenshell.util.element.get_material(element) pset = ifcopenshell.api.run("pset.add_pset", self.file, product=element, name="EPset_Parametric") ifcopenshell.api.run("pset.edit_pset", self.file, pset=pset, properties={"Engine": "BlenderBIM.DumbProfile"}) MaterialData.load(self.file) obj.select_set(True) return obj
def get_linear_length(self, o): x = (Vector(o.bound_box[4]) - Vector(o.bound_box[0])).length y = (Vector(o.bound_box[3]) - Vector(o.bound_box[0])).length z = (Vector(o.bound_box[1]) - Vector(o.bound_box[0])).length return max(x, y, z)
def integrate(vecs, times): res=[Vector((0.0,0.0,0.0))] for i in range(len(times)-1): res.append(res[-1]+(vecs[i+1]+vecs[i])/2*(times[i+1]-times[i])) return res
def invoke(self, context, event): if context.space_data.type == 'VIEW_3D': # print('name', __name__, __package__) preferences = context.user_preferences.addons[__name__].preferences create_new_obj = preferences.create_new_obj if context.mode == 'OBJECT' and \ (create_new_obj or context.object is None or context.object.type != 'MESH'): mesh = bpy.data.meshes.new("") obj = bpy.data.objects.new("", mesh) context.scene.objects.link(obj) context.scene.objects.active = obj # bgl.glEnable(bgl.GL_POINT_SMOOTH) self.is_editmode = bpy.context.object.data.is_editmode bpy.ops.object.mode_set(mode='EDIT') context.space_data.use_occlude_geometry = True self.scale = context.scene.unit_settings.scale_length self.unit_system = context.scene.unit_settings.system self.separate_units = context.scene.unit_settings.use_separate self.uinfo = get_units_info(self.scale, self.unit_system, self.separate_units) grid = context.scene.unit_settings.scale_length / context.space_data.grid_scale relative_scale = preferences.relative_scale self.scale = grid / relative_scale self.rd = bpy.utils.units.to_value(self.unit_system, 'LENGTH', str(1 / self.scale)) incremental = preferences.incremental self.incremental = bpy.utils.units.to_value( self.unit_system, 'LENGTH', str(incremental)) self.use_rotate_around_active = context.user_preferences.view.use_rotate_around_active context.user_preferences.view.use_rotate_around_active = True self.select_mode = context.tool_settings.mesh_select_mode[:] context.tool_settings.mesh_select_mode = (True, True, True) self.region = context.region self.rv3d = context.region_data self.rotMat = self.rv3d.view_matrix.copy() self.obj = bpy.context.active_object self.obj_matrix = self.obj.matrix_world.copy() self.obj_matinv = self.obj_matrix.inverted() # self.obj_glmatrix = bgl.Buffer(bgl.GL_FLOAT, [4, 4], self.obj_matrix.transposed()) self.bm = bmesh.from_edit_mesh(self.obj.data) self.cache = SnapCache() self.location = Vector() self.list_verts = [] self.list_verts_co = [] self.bool_update = False self.vector_constrain = () self.navigation_keys = NavigationKeys(context) self.keytab = False self.keyf8 = False self.type = 'OUT' self.len = 0 self.length_entered = "" self.line_pos = 0 self.out_color = preferences.out_color self.face_color = preferences.face_color self.edge_color = preferences.edge_color self.vert_color = preferences.vert_color self.center_color = preferences.center_color self.perpendicular_color = preferences.perpendicular_color self.constrain_shift_color = preferences.constrain_shift_color self.axis_x_color = tuple( context.user_preferences.themes[0].user_interface.axis_x) self.axis_y_color = tuple( context.user_preferences.themes[0].user_interface.axis_y) self.axis_z_color = tuple( context.user_preferences.themes[0].user_interface.axis_z) self.intersect = preferences.intersect self.create_face = preferences.create_face self.outer_verts = preferences.outer_verts self.snap_to_grid = preferences.increments_grid self._handle = bpy.types.SpaceView3D.draw_handler_add( self.draw_callback_px, (context, ), 'WINDOW', 'POST_VIEW') context.window_manager.modal_handler_add(self) return {'RUNNING_MODAL'} else: self.report({'WARNING'}, "Active space must be a View3d") return {'CANCELLED'}
def get_width(self, o): x = (Vector(o.bound_box[4]) - Vector(o.bound_box[0])).length y = (Vector(o.bound_box[3]) - Vector(o.bound_box[0])).length return min(x, y)
def modal(self, context, event): if self.modal_navigation(context, event): return {'RUNNING_MODAL'} context.area.tag_redraw() if event.ctrl and event.type == 'Z' and event.value == 'PRESS': bpy.ops.ed.undo() self.vector_constrain = None self.list_verts_co = [] self.list_verts = [] self.list_edges = [] self.list_faces = [] self.obj = bpy.context.active_object self.obj_matrix = self.obj.matrix_world.copy() self.bm = bmesh.from_edit_mesh(self.obj.data) return {'RUNNING_MODAL'} if event.type == 'MOUSEMOVE' or self.bool_update: if self.rv3d.view_matrix != self.rotMat: self.rotMat = self.rv3d.view_matrix.copy() self.bool_update = True self.cache.bedge = None else: self.bool_update = False mval = Vector((event.mouse_region_x, event.mouse_region_y)) self.location, self.type, self.geom, self.len = snap_utilities( self.cache, context, self.obj_matrix, self.bm, mval, outer_verts=(self.outer_verts and not self.keytab), constrain=self.vector_constrain, previous_vert=(self.list_verts[-1] if self.list_verts else None), ignore_obj=self.obj, increment=self.incremental) if self.snap_to_grid and self.type == 'OUT': loc = self.location / self.rd self.location = Vector( (round(loc.x), round(loc.y), round(loc.z))) * self.rd if self.keyf8 and self.list_verts_co: lloc = self.list_verts_co[-1] orig, view_vec = region_2d_to_orig_and_view_vector( self.region, self.rv3d, mval) location = intersect_point_line(lloc, orig, (orig + view_vec)) vec = (location[0] - lloc) ax, ay, az = abs(vec.x), abs(vec.y), abs(vec.z) vec.x = ax > ay > az or ax > az > ay vec.y = ay > ax > az or ay > az > ax vec.z = az > ay > ax or az > ax > ay if vec == Vector(): self.vector_constrain = None else: vc = lloc + vec try: if vc != self.vector_constrain[1]: type = 'X' if vec.x else 'Y' if vec.y else 'Z' if vec.z else 'shift' self.vector_constrain = [lloc, vc, type] except: type = 'X' if vec.x else 'Y' if vec.y else 'Z' if vec.z else 'shift' self.vector_constrain = [lloc, vc, type] if event.value == 'PRESS': if self.list_verts_co and (event.ascii in CharMap.ascii or event.type in CharMap.type): CharMap.modal(self, context, event) elif event.type in self.constrain_keys: self.bool_update = True if self.vector_constrain and self.vector_constrain[ 2] == event.type: self.vector_constrain = () else: if event.shift: if isinstance(self.geom, bmesh.types.BMEdge): if self.list_verts: loc = self.list_verts_co[-1] self.vector_constrain = ( loc, loc + self.geom.verts[1].co - self.geom.verts[0].co, event.type) else: self.vector_constrain = [ self.obj_matrix * v.co for v in self.geom.verts ] + [event.type] else: if self.list_verts: loc = self.list_verts_co[-1] else: loc = self.location self.vector_constrain = [ loc, loc + self.constrain_keys[event.type] ] + [event.type] elif event.type == 'LEFTMOUSE': point = self.obj_matinv * self.location # with constraint the intersection can be in a different element of the selected one if self.vector_constrain and self.geom: geom2 = get_closest_edge(self.bm, point, .001) else: geom2 = self.geom self.vector_constrain = None self.list_verts_co = draw_line(self, self.obj, self.bm, geom2, point) bpy.ops.ed.undo_push(message="Undo draw line*") elif event.type == 'TAB': self.keytab = self.keytab is False if self.keytab: context.tool_settings.mesh_select_mode = (False, False, True) else: context.tool_settings.mesh_select_mode = (True, True, True) elif event.type == 'F8': self.vector_constrain = None self.keyf8 = self.keyf8 is False elif event.value == 'RELEASE': if event.type in {'RET', 'NUMPAD_ENTER'}: if self.length_entered != "" and self.list_verts_co: try: text_value = bpy.utils.units.to_value( self.unit_system, 'LENGTH', self.length_entered) vector = (self.location - self.list_verts_co[-1]).normalized() location = (self.list_verts_co[-1] + (vector * text_value)) G_location = self.obj_matinv * location self.list_verts_co = draw_line(self, self.obj, self.bm, self.geom, G_location) self.length_entered = "" self.vector_constrain = None except: # ValueError: self.report({'INFO'}, "Operation not supported yet") elif event.type in {'RIGHTMOUSE', 'ESC'}: if self.list_verts_co == [] or event.type == 'ESC': bpy.types.SpaceView3D.draw_handler_remove( self._handle, 'WINDOW') context.tool_settings.mesh_select_mode = self.select_mode context.area.header_text_set() context.user_preferences.view.use_rotate_around_active = self.use_rotate_around_active if not self.is_editmode: bpy.ops.object.editmode_toggle() return {'FINISHED'} else: self.vector_constrain = None self.list_verts = [] self.list_verts_co = [] self.list_faces = [] a = "" if self.list_verts_co: if self.length_entered: pos = self.line_pos a = 'length: ' + self.length_entered[: pos] + '|' + self.length_entered[ pos:] else: length = self.len length = convert_distance(length, self.uinfo) a = 'length: ' + length context.area.header_text_set( "hit: %.3f %.3f %.3f %s" % (self.location[0], self.location[1], self.location[2], a)) return {'RUNNING_MODAL'}
def location_3d_to_region_2d(region, rv3d, coord): prj = rv3d.perspective_matrix * Vector((coord[0], coord[1], coord[2], 1.0)) width_half = region.width / 2.0 height_half = region.height / 2.0 return Vector((width_half + width_half * (prj.x / prj.w), height_half + height_half * (prj.y / prj.w), prj.z / prj.w))