def __init__(self): # NB: libcode param doesn't work self.shader = GPUShader( vertexcode=self.VERT_GLSL, fragcode=self.FRAG_GLSL, geocode=self.LIB_GLSL + self.GEOM_GLSL, defines=self.DEF_GLSL, ) # Horrific prototype code to ensure bgl draws at drawing scales # https://blender.stackexchange.com/questions/16493/is-there-a-way-to-fit-the-viewport-to-the-current-field-of-view def is_landscape(render): return render.resolution_x > render.resolution_y def get_scale(camera): if camera.data.BIMCameraProperties.diagram_scale == "CUSTOM": human_scale, fraction = camera.data.BIMCameraProperties.custom_diagram_scale.split( "|") else: human_scale, fraction = camera.data.BIMCameraProperties.diagram_scale.split( "|") numerator, denominator = fraction.split("/") return float(numerator) / float(denominator) camera = bpy.context.scene.camera render = bpy.context.scene.render if is_landscape(render): camera_width_model = camera.data.ortho_scale else: camera_width_model = camera.data.ortho_scale / render.resolution_y * render.resolution_x self.camera_width_mm = get_scale(camera) * camera_width_model self.font_id = blf.load( os.path.join(bpy.context.scene.BIMProperties.data_dir, "fonts", "OpenGost Type B TT.ttf"))
def __init__(self): # NB: libcode arg doesn't work self.prog = GPUShader( vertexcode=self.VERT_GLSL, fragcode=self.FRAG_GLSL, geocode=self.LIB_GLSL + self.GEOM_GLSL, defines=self.DEF_GLSL, )
def create_batch(self, meshobj_org): # checks --------------------------- if None == meshobj_org or 'MESH' != meshobj_org.type: print("No valid mesh object") return # get the active vertex group - otherwise exit activeVG = meshobj_org.vertex_groups.active if None == activeVG: print("No active vertex group") return # prepare mesh --------------------------- # https://docs.blender.org/api/blender2.8/bpy.types.Depsgraph.html # evaluate dependency graph of selected object depsgraph = bpy.context.evaluated_depsgraph_get() # get object with dependency graph applied meshobj_eval = meshobj_org.evaluated_get(depsgraph) # get vertex positions --------------------------- vertices = meshobj_eval.data.vertices vertPositions = [] for v in vertices: vertPositions.append(v.co) if (0 >= len(vertPositions)): print("No vert positions") return # get weight colors --------------------------- vertWeights = MGTOOLS_functions_helper.get_weights( meshobj_eval.data, activeVG) vertWeightColors = [] for w in vertWeights: vertWeightColors.append( MGTOOLS_functions_helper.convertWeight2Color(w)) # sanity checks --------------------------- if (len(vertWeightColors) != len(vertPositions)): print( "positions {}, weights {} and colors {} count differs".format( len(vertPositions), len(vertWeights), len(vertWeightColors))) return # prepare shader and draw --------------------------- self.shader = GPUShader(self.vertex_shader_simple, self.fragment_shader_simple) if (None == self.shader): print("Shader is not valid") return self.batch = batch_for_shader(self.shader, 'POINTS', { "position": vertPositions, "color": vertWeightColors, })
def show(self, object_matrix: Matrix, initial_centroid_matrix: Matrix) -> None: """Setup shaders and other required data to show the refconstructed cameras. Arguments: object_matrix {bpy.types.Object} -- user interface handle object matrix initial_centroid_matrix {Matrix} -- initial centroid matrix of the recontruction """ pos = list( map(lambda v: self._recon_matrix_world @ v, ReconCamera.SYMBOL_VERTICES)) # setup shader self._shader = GPUShader(ReconCamera._vertex_shader, ReconCamera._fragment_shader) self._batch = batch_for_shader(self._shader, 'LINES', {"position": pos}, indices=ReconCamera.SYMBOL_INDICES) self._object_matrix = object_matrix self._initial_centroid_matrix = initial_centroid_matrix
def show(self, object_matrix: Matrix, initial_centroid_matrix: Matrix, filtering_display_mode: str) -> None: """Setup shaders and other required data to display the point cloud. Arguments: object_matrix {bpy.types.Object} -- user interface handle object matrix initial_centroid_matrix {Matrix} -- initial centroid matrix of the recontruction filtering_display_mode {str} -- point cloud filtering diplay mode, from {sfm_flow.reconstruction.SFMFLOW_ReconstructionModelProperties} """ if filtering_display_mode == "cloud_filter.color": # override colors for discarded points positions = self.vertices colors = self.colors.copy() colors[self._discard_vertices] = self.filtered_points_color elif filtering_display_mode == "cloud_filter.filtered": # show only points that are not discarded positions = self.vertices_filtered colors = self.colors_filtered else: # default to "cloud_filter.all" # show all vertices with original colors positions = self.vertices colors = self.colors # # setup shader self._shader = GPUShader(PointCloud._vertex_shader, PointCloud._fragment_shader) self._batch = batch_for_shader( self._shader, 'POINTS', { "position": positions, "color": colors }, ) self._object_matrix = object_matrix self._initial_centroid_matrix = initial_centroid_matrix
class MGTOOLSOverlayManager(): draw_handle = None shader = None initialized = False # Shader ########################################################## vertex_shader_simple = ''' in vec3 position; in vec4 color; uniform mat4 perspective_matrix; uniform mat4 object_matrix; uniform float point_size; uniform float cutoff_radius; uniform float global_alpha; out vec4 f_color; out float f_cutoff_radius; void main() { gl_Position = perspective_matrix * object_matrix * vec4(position, 1.0f); gl_PointSize = point_size; f_color = vec4(color[0], color[1], color[2], global_alpha); f_cutoff_radius = cutoff_radius; } ''' fragment_shader_simple = ''' in vec4 f_color; in float f_cutoff_radius; out vec4 fragColor; void main() { vec2 cxy = 2.0f * gl_PointCoord - 1.0f; float r = dot(cxy, cxy); if(r > f_cutoff_radius){ discard; } fragColor = f_color; } ''' # Basics ########################################################## @classmethod def init(self): print("Init MGTOOLSOverlayManager") if (self.initialized): return self.draw_handle = bpy.types.SpaceView3D.draw_handler_add( self.draw_callback, (), "WINDOW", "POST_VIEW") bpy.app.handlers.load_pre.append(watcher) self.initialized = True @classmethod def deinit(self): if None != self.draw_handle: bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, 'WINDOW') self.draw_handle = None if True == watcher in bpy.app.handlers.load_pre: bpy.app.handlers.load_pre.remove(watcher) self.initialized = False # Drawing ########################################################## @classmethod def create_batch(self, meshobj_org): # checks --------------------------- if None == meshobj_org or 'MESH' != meshobj_org.type: print("No valid mesh object") return # get the active vertex group - otherwise exit activeVG = meshobj_org.vertex_groups.active if None == activeVG: print("No active vertex group") return # prepare mesh --------------------------- # https://docs.blender.org/api/blender2.8/bpy.types.Depsgraph.html # evaluate dependency graph of selected object depsgraph = bpy.context.evaluated_depsgraph_get() # get object with dependency graph applied meshobj_eval = meshobj_org.evaluated_get(depsgraph) # get vertex positions --------------------------- vertices = meshobj_eval.data.vertices vertPositions = [] for v in vertices: vertPositions.append(v.co) if (0 >= len(vertPositions)): print("No vert positions") return # get weight colors --------------------------- vertWeights = MGTOOLS_functions_helper.get_weights( meshobj_eval.data, activeVG) vertWeightColors = [] for w in vertWeights: vertWeightColors.append( MGTOOLS_functions_helper.convertWeight2Color(w)) # sanity checks --------------------------- if (len(vertWeightColors) != len(vertPositions)): print( "positions {}, weights {} and colors {} count differs".format( len(vertPositions), len(vertWeights), len(vertWeightColors))) return # prepare shader and draw --------------------------- self.shader = GPUShader(self.vertex_shader_simple, self.fragment_shader_simple) if (None == self.shader): print("Shader is not valid") return self.batch = batch_for_shader(self.shader, 'POINTS', { "position": vertPositions, "color": vertWeightColors, }) @classmethod def draw_callback(self): if False == self.initialized: return # print("Drawing...> num selected objects: {}".format(len(bpy.context.selected_objects))) if 0 >= len(bpy.context.selected_objects): return # ---------------------------------- # tmp: find first MESH-object - otherwise exit drawmesh = MGTOOLS_functions_macros.get_first_selected_mesh() if None == drawmesh: return # prepare mesh for drawing ---------------------------------- # Note: we need this to take changes from modifiers or armature deformation into account # drawmesh = obj.to_mesh(preserve_all_data_layers=False, depsgraph=None) # evaluate dependency graph of selected object # depsgraph = bpy.context.evaluated_depsgraph_get() # get object with dependency graph applied # object_eval = obj.evaluated_get(depsgraph) # get mesh # drawmesh = object_eval.data # ---------------------------------- # update the batch to show changed vertex weights and positions! self.create_batch(drawmesh) if (None == self.shader): return self.shader.bind() # update shader parameters mgtools_props_obj = bpy.context.object.mgtools pm = bpy.context.region_data.perspective_matrix self.shader.uniform_float("perspective_matrix", pm) self.shader.uniform_float("object_matrix", drawmesh.matrix_world) self.shader.uniform_float("point_size", mgtools_props_obj.p_weightdisplay_point_size) self.shader.uniform_float( "cutoff_radius", mgtools_props_obj.p_weightdisplay_point_radius) self.shader.uniform_float( "global_alpha", mgtools_props_obj.p_weightdisplay_global_alpha) # draw self.batch.draw(self.shader)
def execute(self, context): # import cProfile, pstats, io # pr = cProfile.Profile() # pr.enable() bgl.glEnable(bgl.GL_PROGRAM_POINT_SIZE) scene = context.scene render = scene.render image_settings = render.image_settings original_depth = image_settings.color_depth image_settings.color_depth = '8' scale = render.resolution_percentage / 100 width = int(render.resolution_x * scale) height = int(render.resolution_y * scale) pcv = context.object.point_cloud_visualizer cloud = PCVManager.cache[pcv.uuid] cam = scene.camera if (cam is None): self.report({'ERROR'}, "No camera found.") return {'CANCELLED'} render_suffix = pcv.render_suffix render_zeros = pcv.render_zeros offscreen = GPUOffScreen(width, height) offscreen.bind() # with offscreen.bind(): try: gpu.matrix.load_matrix(Matrix.Identity(4)) gpu.matrix.load_projection_matrix(Matrix.Identity(4)) bgl.glClear(bgl.GL_COLOR_BUFFER_BIT) o = cloud['object'] vs = cloud['vertices'] cs = cloud['colors'] dp = pcv.display_percent l = int((len(vs) / 100) * dp) if (dp >= 99): l = len(vs) vs = vs[:l] cs = cs[:l] # sort by depth mw = o.matrix_world depth = [] for i, v in enumerate(vs): vw = mw @ Vector(v) depth.append(world_to_camera_view(scene, cam, vw)[2]) zps = zip(depth, vs, cs) sps = sorted(zps, key=lambda a: a[0]) # split and reverse vs = [a for _, a, b in sps][::-1] cs = [b for _, a, b in sps][::-1] shader = GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader(shader, 'POINTS', { "position": vs, "color": cs, }) shader.bind() view_matrix = cam.matrix_world.inverted() camera_matrix = cam.calc_matrix_camera( bpy.context.depsgraph, x=render.resolution_x, y=render.resolution_y, scale_x=render.pixel_aspect_x, scale_y=render.pixel_aspect_y, ) perspective_matrix = camera_matrix @ view_matrix shader.uniform_float("perspective_matrix", perspective_matrix) shader.uniform_float("object_matrix", o.matrix_world) # shader.uniform_float("point_size", pcv.point_size) shader.uniform_float("point_size", pcv.render_point_size) shader.uniform_float("alpha_radius", pcv.alpha_radius) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4) # bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) except Exception as e: self.report({'ERROR'}, str(e)) return {'CANCELLED'} finally: offscreen.unbind() offscreen.free() # offscreen.free() # image from buffer image_name = "pcv_output" if (not image_name in bpy.data.images): bpy.data.images.new(image_name, width, height) image = bpy.data.images[image_name] image.scale(width, height) image.pixels = [v / 255 for v in buffer] # save as image file save_render( self, scene, image, render_suffix, render_zeros, ) # restore image_settings.color_depth = original_depth # pr.disable() # s = io.StringIO() # sortby = 'cumulative' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue()) return {'FINISHED'}
def load_ply_to_cache( operator, context, ): pcv = context.object.point_cloud_visualizer filepath = pcv.filepath __t = time.time() log('load data..') _t = time.time() points = [] try: points = BinPlyPointCloudReader(filepath).points except Exception as e: if (operator is not None): operator.report({'ERROR'}, str(e)) else: raise e if (len(points) == 0): operator.report({'ERROR'}, "No vertices loaded from file at {}".format(filepath)) return False _d = datetime.timedelta(seconds=time.time() - _t) log("completed in {}.".format(_d)) log('shuffle data..') _t = time.time() np.random.shuffle(points) _d = datetime.timedelta(seconds=time.time() - _t) log("completed in {}.".format(_d)) log('process data..') _t = time.time() if (not set(('x', 'y', 'z')).issubset(points.dtype.names)): # this is very unlikely.. operator.report({'ERROR'}, "Loaded data seems to miss vertex locations.") return False # # normals are not needed yet # if(not set(('nx', 'ny', 'nz')).issubset(points.dtype.names)): # operator.report({'ERROR'}, "Loaded data seems to miss vertex normals.") # return False vcols = True if (not set(('red', 'green', 'blue')).issubset(points.dtype.names)): vcols = False vs = np.column_stack(( points['x'], points['y'], points['z'], )) if (vcols): cs = np.column_stack(( points['red'] / 255, points['green'] / 255, points['blue'] / 255, np.ones( len(points), dtype=float, ), )) cs = cs.astype(np.float32) else: n = len(points) cs = np.column_stack(( np.full( n, 0.75, dtype=np.float32, ), np.full( n, 0.75, dtype=np.float32, ), np.full( n, 0.75, dtype=np.float32, ), np.ones( n, dtype=np.float32, ), )) u = str(uuid.uuid1()) o = context.object pcv.uuid = u d = PCVManager.new() d['uuid'] = u d['stats'] = len(vs) d['vertices'] = vs d['colors'] = cs d['length'] = len(vs) dp = pcv.display_percent l = int((len(vs) / 100) * dp) if (dp >= 99): l = len(vs) d['display_percent'] = l d['current_display_percent'] = l shader = GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader(shader, 'POINTS', { "position": vs[:l], "color": cs[:l], }) d['shader'] = shader d['batch'] = batch d['ready'] = True d['object'] = o d['name'] = o.name PCVManager.add(d) _d = datetime.timedelta(seconds=time.time() - _t) log("completed in {}.".format(_d)) log("-" * 50) __d = datetime.timedelta(seconds=time.time() - __t) log("load and process completed in {}.".format(__d)) log("-" * 50) return True
def execute(self, context): bgl.glEnable(bgl.GL_PROGRAM_POINT_SIZE) scene = context.scene render = scene.render image_settings = render.image_settings original_depth = image_settings.color_depth image_settings.color_depth = '8' scale = render.resolution_percentage / 100 width = int(render.resolution_x * scale) height = int(render.resolution_y * scale) pcv = context.object.point_cloud_visualizer cloud = PCVManager.cache[pcv.uuid] cam = scene.camera if (cam is None): self.report({'ERROR'}, "No camera found.") return {'CANCELLED'} render_suffix = pcv.render_suffix render_zeros = pcv.render_zeros offscreen = GPUOffScreen(width, height) offscreen.bind() try: gpu.matrix.load_matrix(Matrix.Identity(4)) gpu.matrix.load_projection_matrix(Matrix.Identity(4)) bgl.glClear(bgl.GL_COLOR_BUFFER_BIT) o = cloud['object'] vs = cloud['vertices'] cs = cloud['colors'] ns = cloud['normals'] dp = pcv.render_display_percent l = int((len(vs) / 100) * dp) if (dp >= 99): l = len(vs) vs = vs[:l] cs = cs[:l] ns = ns[:l] # sort by depth mw = o.matrix_world depth = [] for i, v in enumerate(vs): vw = mw @ Vector(v) depth.append(world_to_camera_view(scene, cam, vw)[2]) zps = zip(depth, vs, cs, ns) sps = sorted(zps, key=lambda a: a[0]) # split and reverse vs = [a for _, a, b, c in sps][::-1] cs = [b for _, a, b, c in sps][::-1] ns = [c for _, a, b, c in sps][::-1] shader = GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader(shader, 'POINTS', { "position": vs, "color": cs, "normal": ns, }) shader.bind() view_matrix = cam.matrix_world.inverted() camera_matrix = cam.calc_matrix_camera( bpy.context.depsgraph, x=render.resolution_x, y=render.resolution_y, scale_x=render.pixel_aspect_x, scale_y=render.pixel_aspect_y, ) perspective_matrix = camera_matrix @ view_matrix shader.uniform_float("perspective_matrix", perspective_matrix) shader.uniform_float("object_matrix", o.matrix_world) shader.uniform_float("point_size", pcv.render_point_size) shader.uniform_float("alpha_radius", pcv.alpha_radius) if (pcv.light_enabled and pcv.has_normals): cm = Matrix(( ( -1.0, 0.0, 0.0, 0.0, ), ( 0.0, -0.0, 1.0, 0.0, ), ( 0.0, -1.0, -0.0, 0.0, ), ( 0.0, 0.0, 0.0, 1.0, ), )) _, obrot, _ = o.matrix_world.decompose() mr = obrot.to_matrix().to_4x4() mr.invert() direction = cm @ pcv.light_direction direction = mr @ direction shader.uniform_float("light_direction", direction) inverted_direction = direction.copy() inverted_direction.negate() c = pcv.light_intensity shader.uniform_float("light_intensity", ( c, c, c, )) shader.uniform_float("shadow_direction", inverted_direction) c = pcv.shadow_intensity shader.uniform_float("shadow_intensity", ( c, c, c, )) shader.uniform_float("show_normals", float(pcv.show_normals)) else: z = (0, 0, 0) shader.uniform_float("light_direction", z) shader.uniform_float("light_intensity", z) shader.uniform_float("shadow_direction", z) shader.uniform_float("shadow_intensity", z) shader.uniform_float("show_normals", float(False)) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) except Exception as e: self.report({'ERROR'}, str(e)) return {'CANCELLED'} finally: offscreen.unbind() offscreen.free() # image from buffer image_name = "pcv_output" if (image_name not in bpy.data.images): bpy.data.images.new(image_name, width, height) image = bpy.data.images[image_name] image.scale(width, height) image.pixels = [v / 255 for v in buffer] # save as image file save_render( self, scene, image, render_suffix, render_zeros, ) # restore image_settings.color_depth = original_depth return {'FINISHED'}
{ gl_Position = u_ViewProjectionMatrix * vec4(position, 1.0f); } ''' fragment_shader_3d_dotted_line = ''' uniform float u_Scale; void main() { if (sin(u_Scale) > 0.5) discard; gl_FragColor = vec4(1.0); } ''' shader_3d_dotted_line = GPUShader(vertex_shader_3d_dotted_line, fragment_shader_3d_dotted_line) shader_3d_uniform_color = from_builtin('3D_UNIFORM_COLOR') def Draw_3D_DottedLine(context, _shader = shader_3d_dotted_line): batch = batch_for_shader( _shader, 'LINE_STRIP', {"position": coords, "arcLength": arc_lengths}, ) _shader.bind() _shader.uniform_float("u_ViewProjectionMatrix", context.region_data.perspective_matrix) _shader.uniform_float("u_Scale", 10) batch.draw(_shader) def Draw_3D_Lines(coords=[(0,0,0), (1, 1, 1)], _shader = shader_3d_uniform_color):
fshader = """ void main() { float r = 0.0, delta = 0.0, alpha = 0.0; vec2 cxy = 2.0 * gl_PointCoord - 1.0; r = dot(cxy, cxy); if (r > 1.0) { discard; } gl_FragColor = vec4(1.0, 1.0, 0.0, 1); } """ shader = GPUShader(vshader, fshader) def avg_edge_distance(bm): return sum([e.calc_length() for e in bm.edges]) / len(bm.edges) def draw(coords): glEnable(GL_BLEND) glPointSize(12) glDepthFunc(GL_ALWAYS) shader.bind() batch = batch_for_shader(shader, 'POINTS', {"pos": coords}) batch.draw(shader) glDisable(GL_BLEND)
class BaseDecorator: # base name of objects to decorate objecttype = "NOTDEFINED" DEF_GLSL = """ #define PI 3.141592653589793 #define MAX_POINTS 32 #define CIRCLE_SEGS 12 """ LIB_GLSL = """ #define matCLIP2WIN() vec4(winsize.x/2, winsize.y/2, 1, 1) #define matWIN2CLIP() vec4(2/winsize.x, 2/winsize.y, 1, 1) #define CLIP2WIN(v) (clip2win * v / v.w) #define WIN2CLIP(v) (win2clip * v * v.w) void arrow_head(in vec4 dir, in float size, in float angle, out vec4 head[3]) { vec4 nose = dir * size; float c = cos(angle), s = sin(angle); head[0] = nose; head[1] = vec4(mat2(c, -s, +s, c) * nose.xy, 0, 0); head[2] = vec4(mat2(c, +s, -s, c) * nose.xy, 0, 0); } void circle_head(in float size, out vec4 head[CIRCLE_SEGS]) { float angle_d = PI * 2 / CIRCLE_SEGS; for(int i = 0; i<CIRCLE_SEGS; i++) { float angle = angle_d * i; head[i] = vec4(cos(angle), sin(angle), 0, 0) * size; } } void cross_head(in vec4 dir, in float size, out vec4 head[3]) { vec4 nose = dir * size; float c = cos(PI/2), s = sin(PI/2); head[0] = nose; head[1] = vec4(mat2(c, -s, +s, c) * nose.xy, 0, 0); head[2] = vec4(mat2(c, +s, -s, c) * nose.xy, 0, 0); } """ VERT_GLSL = """ uniform mat4 viewMatrix; in vec3 pos; in uint topo; out vec4 gl_Position; out uint type; void main() { gl_Position = viewMatrix * vec4(pos, 1.0); type = topo; } """ GEOM_GLSL = """ uniform vec2 winsize; uniform float viewportDrawingScale; layout(lines) in; layout(line_strip, max_vertices=2) out; void main() { vec4 clip2win = matCLIP2WIN(); vec4 win2clip = matWIN2CLIP(); vec4 p0 = gl_in[0].gl_Position, p1 = gl_in[1].gl_Position; vec4 p0w = CLIP2WIN(p0), p1w = CLIP2WIN(p1); vec4 edge = p1w - p0w, dir = normalize(edge); vec4 gap = dir * 16.0; vec4 p; p = p0w + gap; gl_Position = WIN2CLIP(p); EmitVertex(); p = p1w - gap; gl_Position = WIN2CLIP(p); EmitVertex(); EndPrimitive(); } """ FRAG_GLSL = """ uniform vec4 color; out vec4 fragColor; void main() { fragColor = color; } """ def __init__(self): # NB: libcode param doesn't work self.shader = GPUShader( vertexcode=self.VERT_GLSL, fragcode=self.FRAG_GLSL, geocode=self.LIB_GLSL + self.GEOM_GLSL, defines=self.DEF_GLSL, ) # Horrific prototype code to ensure bgl draws at drawing scales # https://blender.stackexchange.com/questions/16493/is-there-a-way-to-fit-the-viewport-to-the-current-field-of-view def is_landscape(render): return render.resolution_x > render.resolution_y def get_scale(camera): if camera.data.BIMCameraProperties.diagram_scale == "CUSTOM": human_scale, fraction = camera.data.BIMCameraProperties.custom_diagram_scale.split( "|") else: human_scale, fraction = camera.data.BIMCameraProperties.diagram_scale.split( "|") numerator, denominator = fraction.split("/") return float(numerator) / float(denominator) camera = bpy.context.scene.camera render = bpy.context.scene.render if is_landscape(render): camera_width_model = camera.data.ortho_scale else: camera_width_model = camera.data.ortho_scale / render.resolution_y * render.resolution_x self.camera_width_mm = get_scale(camera) * camera_width_model self.font_id = blf.load( os.path.join(bpy.context.scene.BIMProperties.data_dir, "fonts", "OpenGost Type B TT.ttf")) def camera_zoom_to_factor(self, zoom): return math.pow(((zoom / 50) + math.sqrt(2)) / 2, 2) def get_objects(self, collection): """find relevant objects using class.objecttype returns: iterable of blender objects """ results = [] for obj in collection.all_objects: element = tool.Ifc.get_entity(obj) if not element: continue if element.is_a( "IfcAnnotation") and element.ObjectType == self.objecttype: results.append(obj) return results def get_path_geom(self, obj, topo=True): """Parses path geometry into line segments Args: obj: Blender object with data of type Curve topo: bool; if types of vertices are needed Returns: vertices: 3-tuples of coords indices: 2-tuples of each segment verices' indices topology: types of vertices 0: internal 1: beginning 2: ending """ vertices = [] indices = [] topology = [] idx = 0 for spline in obj.data.splines: spline_points = spline.bezier_points if spline.bezier_points else spline.points if len(spline_points) < 2: continue points = [obj.matrix_world @ p.co for p in spline_points] cnt = len(points) vertices.extend(p[:3] for p in points) if topo: topology.append(1) topology.extend([0] * max(0, cnt - 2)) topology.append(2) indices.extend((idx + i, idx + i + 1) for i in range(cnt - 1)) idx += cnt return vertices, indices, topology def get_mesh_geom(self, obj): """Parses mesh geometry into line segments Args: obj: Blender object with data of type Mesh Returns: vertices: 3-tuples of coords indices: 2-tuples of each segment verices' indices """ vertices = [obj.matrix_world @ v.co for v in obj.data.vertices] indices = [e.vertices for e in obj.data.edges] return vertices, indices def get_editmesh_geom(self, obj): """Parses editmode mesh geometry into line segments""" mesh = bmesh.from_edit_mesh(obj.data) vertices = [] indices = [] idx = 0 for edge in mesh.edges: vertices.extend(edge.verts) indices.append((idx, idx + 1)) idx += 2 vertices = [obj.matrix_world @ v.co for v in vertices] return vertices, indices def decorate(self, context, object): """perform actual drawing stuff""" raise NotImplementedError() def draw_lines(self, context, obj, vertices, indices, topology=None): region = context.region region3d = context.region_data color = context.scene.DocProperties.decorations_colour fmt = GPUVertFormat() fmt.attr_add(id="pos", comp_type="F32", len=3, fetch_mode="FLOAT") if topology: fmt.attr_add(id="topo", comp_type="U8", len=1, fetch_mode="INT") vbo = GPUVertBuf(len=len(vertices), format=fmt) vbo.attr_fill(id="pos", data=vertices) if topology: vbo.attr_fill(id="topo", data=topology) ibo = GPUIndexBuf(type="LINES", seq=indices) batch = GPUBatch(type="LINES", buf=vbo, elem=ibo) bgl.glEnable(bgl.GL_LINE_SMOOTH) bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST) bgl.glEnable(bgl.GL_BLEND) bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA) self.shader.bind() self.shader.uniform_float self.shader.uniform_float("viewMatrix", region3d.perspective_matrix) self.shader.uniform_float("winsize", (region.width, region.height)) self.shader.uniform_float("color", color) # Horrific prototype code factor = self.camera_zoom_to_factor( context.space_data.region_3d.view_camera_zoom) camera_width_px = factor * context.region.width mm_to_px = camera_width_px / self.camera_width_mm # 0.00025 is a magic constant number I visually discovered to get the right number. # It probably should be dynamically calculated using system.dpi or something. viewport_drawing_scale = 0.00025 * mm_to_px self.shader.uniform_float("viewportDrawingScale", viewport_drawing_scale) batch.draw(self.shader) def draw_label(self, context, text, pos, dir, gap=4, center=True, vcenter=False): """Draw text label Args: pos: bottom-center aligned and centered at segment middle """ # 0 is the default font, but we're fancier than that font_id = self.font_id dpi = context.preferences.system.dpi color = context.scene.DocProperties.decorations_colour ang = -Vector((1, 0)).angle_signed(dir) cos = math.cos(ang) sin = math.sin(ang) # Horrific prototype code factor = self.camera_zoom_to_factor( context.space_data.region_3d.view_camera_zoom) camera_width_px = factor * context.region.width mm_to_px = camera_width_px / self.camera_width_mm # 0.004118616 is a magic constant number I visually discovered to get the right number. # In particular it works only for the OpenGOST font and produces a 2.5mm font size. # It probably should be dynamically calculated using system.dpi or something. # font_size = 16 <-- this is a good default font_size = int(0.004118616 * mm_to_px) blf.size(font_id, font_size, dpi) w, h = 0, 0 if center or vcenter: w, h = blf.dimensions(font_id, text) if center: # horizontal centering pos -= Vector((cos, sin)) * w * 0.5 if vcenter: # vertical centering pos -= Vector((-sin, cos)) * h * 0.5 if gap: # side-shifting pos += Vector((-sin, cos)) * gap blf.enable(font_id, blf.ROTATION) blf.position(font_id, pos.x, pos.y, 0) blf.rotation(font_id, ang) blf.color(font_id, *color) # blf.enable(font_id, blf.SHADOW) # blf.shadow(font_id, 5, 0, 0, 0, 1) # blf.shadow_offset(font_id, 1, -1) blf.draw(font_id, text) blf.disable(font_id, blf.ROTATION) def format_value(self, context, value): unit_system = context.scene.unit_settings.system if unit_system == "IMPERIAL": precision = context.scene.BIMProperties.imperial_precision if precision == "NONE": precision = 256 elif precision == "1": precision = 1 elif "/" in precision: precision = int(precision.split("/")[1]) elif unit_system == "METRIC": precision = 3 else: return return bpy.utils.units.to_string(unit_system, "LENGTH", value, precision=precision, split_unit=True)
class ReconCamera(): """Representation of a reconstructed camera. Contains information about: - image filename - focal length - rotation - position - radial distortion """ FRAME_NUMBER_REGEX = re.compile(r'.*?([0-9]+)(/undistorted)*\.[a-zA-Z]+$') # camera display symbol vertices SYMBOL_VERTICES = tuple( map( lambda v: Matrix.Scale(0.10, 3) @ v, ( # scale # position Vector((0, 0, 0)), # viewport Vector((-0.5, +0.28, -1.)), Vector((+0.5, +0.28, -1.)), Vector((+0.5, -0.28, -1.)), Vector((-0.5, -0.28, -1.)), # up direction Vector((-0.35, +0.33, -1.)), Vector((0, +0.7, -1.)), Vector((+0.35, +0.33, -1.))))) # camera display symbol lines indices SYMBOL_INDICES = ((1, 2), (2, 3), (3, 4), (4, 1), (0, 1), (0, 2), (0, 3), (0, 4), (5, 6), (6, 7), (7, 5)) # camera display symbol vertex shader _vertex_shader = ''' in vec3 position; uniform mat4 perspective_matrix; uniform mat4 object_matrix; uniform mat4 initial_centroid_matrix; uniform vec3 color; out vec4 f_color; void main() { gl_Position = perspective_matrix * object_matrix * initial_centroid_matrix * vec4(position, 1.0f); f_color = vec4(color[0], color[1], color[2], 1.0f); } ''' # camera display symbol fragment shader _fragment_shader = ''' in vec4 f_color; out vec4 fragColor; void main() { fragColor = f_color; } ''' ################################################################################################ # Properties # # ============================================================================================== @property def matrix_world(self) -> Matrix: """Worldspace transformation matrix.""" return self._object_matrix @ self._initial_centroid_matrix @ self._recon_matrix_world # ============================================================================================== @property def position(self) -> Vector: """The camera position.""" return self.matrix_world.to_translation() # ============================================================================================== @property def rotation(self) -> Vector: """The camera rotation quaternion.""" return self.matrix_world.to_quaternion() # ============================================================================================== @property def scale(self) -> Vector: """The camera scale.""" return self.matrix_world.to_scale() # ============================================================================================== @property def look_at(self) -> Vector: """The camera look at direction Vector.""" return self.rotation @ Vector((0.0, 0.0, -1.0)) ################################################################################################ # Constructor # def __init__(self, filename: str, focal_length: float, matrix_world: Matrix, radial_distortion: float): """Initialize a reconstructed camera. Contains information about: image filename, focal length, rotation, position, and radial distortion Arguments: filename {str} -- filename of the associated image focal_length {float} -- focal length in millimeters matrix_world {Matrix} -- 4x4 world transformation matrix radial_distortion {float} -- radial distortion factor """ self.filename = filename self.frame_number = int( ReconCamera.FRAME_NUMBER_REGEX.match(self.filename).group(1)) self.focal_length = focal_length self.radial_distortion = radial_distortion self._recon_matrix_world = matrix_world # point cloud object matrix, to be set to UI control element world_matrix self._object_matrix = None # initial centroid translation matrix self._initial_centroid_matrix = None self._shader = None self._batch = None user_preferences = bpy.context.preferences addon_user_preferences_name = (__name__)[:__name__.index('.')] self._cam_color = user_preferences.addons[ addon_user_preferences_name].preferences.recon_camera_color # NOTE: when running in dev mode the color reference is lost on add-on reload (the color becomes random). # A possible solution is to create a copy but then i will loose the ability to change the color # of existing reconstructions from the preferences dialog. ################################################################################################ # Methods # # ============================================================================================== def show(self, object_matrix: Matrix, initial_centroid_matrix: Matrix) -> None: """Setup shaders and other required data to show the refconstructed cameras. Arguments: object_matrix {bpy.types.Object} -- user interface handle object matrix initial_centroid_matrix {Matrix} -- initial centroid matrix of the recontruction """ pos = list( map(lambda v: self._recon_matrix_world @ v, ReconCamera.SYMBOL_VERTICES)) # setup shader self._shader = GPUShader(ReconCamera._vertex_shader, ReconCamera._fragment_shader) self._batch = batch_for_shader(self._shader, 'LINES', {"position": pos}, indices=ReconCamera.SYMBOL_INDICES) self._object_matrix = object_matrix self._initial_centroid_matrix = initial_centroid_matrix # ============================================================================================== def draw(self, object_matrix: Matrix = None) -> None: """Point cloud draw function, to be called by a {SpaceView3D} draw_handler. Keyword Arguments: object_matrix {Matrix} -- optional matrix_world of the UI empty (default: {None}) """ if object_matrix: self._object_matrix = object_matrix # self._batch.draw(self._shader) self._shader.bind() self._shader.uniform_float("perspective_matrix", bpy.context.region_data.perspective_matrix) self._shader.uniform_float("object_matrix", self._object_matrix) self._shader.uniform_float("initial_centroid_matrix", self._initial_centroid_matrix) self._shader.uniform_float("color", self._cam_color) # ============================================================================================== def evaluate(self, scene: bpy.types.Scene) -> Dict: """Given a scene evaluate the camera pose w.r.t. the ground truth. Arguments: scene {scene} -- scene, includes the render camera that will be used as ground truth Returns: Dict -- evaluation result dictionary containing: 'position_distance' {float}: position distance (measure unit depends on the scene's unit) 'lookat_difference_rad' {float}: non-oriented angle between lookAt vectors, in radians 'lookat_difference_deg' {float}: non-oriented angle between lookAt vectors, in degrees 'rotation_difference_rad' {float}: angle to align reconstructed camera to gt, in radians 'rotation_difference_deg' {float}: angle to align reconstructed camera to gt, in degrees """ # get ground truth scene.frame_set(self.frame_number) gt_matrix_world = scene.camera.matrix_world gt_pos = gt_matrix_world.to_translation() gt_rotation = gt_matrix_world.to_quaternion() gt_lookat = get_camera_lookat(scene.camera) # # --- position evaluation pos_distance = euclidean_distance(gt_pos, self.position) logger.debug("Camera position distance: %f (GT=%s, recon=%s)", pos_distance, gt_pos, self.position) # # --- look-at evaluation # compute the non-oriented angle between look-at vectors (gt and reconstructed) cos_theta = (gt_lookat @ self.look_at) / (gt_lookat.length * self.look_at.length) if cos_theta > 1.0 and cos_theta < 1.1: # rounding error cos_theta = 1.0 theta_rad = acos(cos_theta) theta_deg = degrees(theta_rad) logger.debug("Camera look-at: %f deg, %f rad. (GT=%s, recon=%s)", theta_deg, theta_rad, gt_lookat, self.look_at) # # --- rotation evaluation # compute rotation angle to align reconstructed camera to gt rot_diff = self.rotation.conjugated() @ gt_rotation #rot_diff = self.rotation.rotation_difference(gt_rotation) rot_diff_rad = rot_diff.angle rot_diff_deg = degrees(rot_diff_rad) if rot_diff_deg > 180.0: # angle in range 0-360, equal to +0-180 or -0-180 rot_diff_deg = 360.0 - rot_diff_deg logger.debug("Camera rotation difference: %f deg (GT=%s, recon=%s)", rot_diff_deg, gt_rotation, self.rotation) # results = { "position_distance": pos_distance, "lookat_difference_rad": theta_rad, "lookat_difference_deg": theta_deg, "rotation_difference_rad": rot_diff_rad, "rotation_difference_deg": rot_diff_deg } return results
class BaseShader: """Wrapper for GPUShader To use for viewport decorations with geometry generated on GPU side. The Geometry shader works in clipping coords (aftre projecting before division and window scaling). To make window-scale geometry, vectors should be calculated in window space and than back-projected to clipping spae. Provide `winSize` uniform vector with halfsize of window, and use W2C and C2W macros. Replace glsl code in derived classes. Beware of unused attributes and uniforms: glsl compiler will optimize them out and blender fail with an exception. Vertex topology attribute to use with line segments: - 0 = inner - 1 = starting - 2 = ending - 3 = isolated """ TYPE = None # should be LINES|POINTS|etc DEF_GLSL = """ #define PI 3.141592653589793 """ VERT_GLSL = """ uniform mat4 viewMatrix; uniform mat4 modelMatrix; in vec3 pos; in uint topology; out vec4 gl_Position; out uint topo; void main() { gl_Position = viewMatrix * modelMatrix * vec4(pos, 1.0); topo = topology; } """ # prepended to geom_glsl LIB_GLSL = """ uniform vec2 winSize; // convert camera to window #define C2W(v) vec4(v.x * winSize.x / v.w, v.y * winSize.y / v.w, v.z / v.w, 1) // convert window to camera #define W2C(v) vec4(v.x * v.w / winSize.x, v.y * v.w / winSize.y, v.z * v.w, 1) void emitSegment(vec4 p0, vec4 p1) { gl_Position = p0; EmitVertex(); gl_Position = p1; EmitVertex(); EndPrimitive(); } void emitTriangle(vec4 p1, vec4 p2, vec4 p3) { gl_Position = p1; EmitVertex(); gl_Position = p2; EmitVertex(); gl_Position = p3; EmitVertex(); EndPrimitive(); } """ GEOM_GLSL = """ """ FRAG_GLSL = """ uniform vec4 color; out vec4 fragColor; void main() { fragColor = color; } """ def __init__(self): # NB: libcode arg doesn't work self.prog = GPUShader( vertexcode=self.VERT_GLSL, fragcode=self.FRAG_GLSL, geocode=self.LIB_GLSL + self.GEOM_GLSL, defines=self.DEF_GLSL, ) def batch(self, indices=None, **data): """Returns automatic GPUBatch filled with provided parameters""" batch = batch_for_shader(self.prog, self.TYPE, data, indices=indices) batch.program_set(self.prog) return batch def bind(self): self.prog.bind() def glenable(self): bgl.glEnable(bgl.GL_BLEND) bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA) bgl.glBlendEquation(bgl.GL_FUNC_ADD) # bgl.glEnable(bgl.GL_DEPTH_TEST) # bgl.glDepthFunc(bgl.GL_LEQUAL) # bgl.glDepthMask(True) def uniform_region(self, ctx): region = ctx.region region3d = ctx.region_data try: self.prog.uniform_float("viewMatrix", region3d.perspective_matrix) except ValueError: # unused uniform pass try: self.prog.uniform_float("winSize", (region.width / 2, region.height / 2)) except ValueError: # unused uniform pass
class PointCloud(): """Point cloud representation. Includes the actual 3D points and the methods to render them in the 3D view. """ # point cloud vertex shader _vertex_shader = ''' in vec3 position; in vec3 color; uniform mat4 perspective_matrix; uniform mat4 object_matrix; uniform mat4 initial_centroid_matrix; out vec4 f_color; void main() { gl_Position = perspective_matrix * object_matrix * initial_centroid_matrix * vec4(position, 1.0f); f_color = vec4(color[0], color[1], color[2], 1.0f); } ''' # point cloud fragment shader _fragment_shader = ''' in vec4 f_color; out vec4 fragColor; void main() { fragColor = f_color; } ''' ################################################################################################ # Properties # # ============================================================================================== @property def center(self) -> Vector: """The current centroid of the point cloud. Computed on-the-fly. Returns: Vector -- centroid 3D coordinates """ return np.mean(self.vertices, axis=0) # ============================================================================================== @property def vertices_filtered(self) -> np.ndarray: """Get the filtered point cloud. Returns: np.ndarray -- filtered vertices """ mask = np.ones(len(self.vertices), dtype=bool) mask[self._discard_vertices] = False return self.vertices[mask] # ============================================================================================== @property def colors_filtered(self) -> np.ndarray: """Get the filtered colors. Returns: np.ndarray -- filtered vertices colors """ mask = np.ones(len(self.colors), dtype=bool) mask[self._discard_vertices] = False return self.colors[mask] ################################################################################################ # Constructor # def __init__(self, point_count: int): # number of points in the cloud self.point_count = point_count # type: int # the points self.vertices = np.empty((point_count, 3), dtype=np.float32) # type: np.ndarray # the colors of the points, range [0-1] self.colors = np.empty((point_count, 3), dtype=np.float32) # type: np.ndarray # indices of vertices discarded during point cloud filtering self._discard_vertices = [] # type: List[int] # point cloud filter distance threshold self._filter_distance = float('inf') # type: float # last point set in the cloud self._last_point_set = -1 # type: int # point cloud object matrix, to be set to UI control element world_matrix self._object_matrix = None # type: Matrix # initial centroid translation matrix self._initial_centroid_matrix = None # type: Matrix self._shader = None # type: GPUShader self._batch = None # type: GPUBatch user_preferences = bpy.context.preferences addon_user_preferences_name = (__name__)[:__name__.index('.')] prefs = user_preferences.addons[ addon_user_preferences_name].preferences # type: AddonPreferences self.filtered_points_color = np.array( prefs.filtered_points_color) # type: np.ndarray ################################################################################################ # Methods # # ============================================================================================== def add_point(self, position: Vector, color: Color) -> None: """Add a reconstructed vertex to the cloud. Arguments: position {Vector} -- reconstructed point coordinates color {Color} -- reconstructed point color in range [0-1] Raises: RuntimeError: when trying to set more points than the cloud size """ if self._last_point_set + 1 >= self.point_count: raise RuntimeError( "Trying to set more points than cloud dimensions!") self._last_point_set += 1 self.vertices[self._last_point_set, :] = position[ 0:3] # load vertex coordinates self.colors[self._last_point_set, :] = color[0:3] # load colors # ============================================================================================== def show(self, object_matrix: Matrix, initial_centroid_matrix: Matrix, filtering_display_mode: str) -> None: """Setup shaders and other required data to display the point cloud. Arguments: object_matrix {bpy.types.Object} -- user interface handle object matrix initial_centroid_matrix {Matrix} -- initial centroid matrix of the recontruction filtering_display_mode {str} -- point cloud filtering diplay mode, from {sfm_flow.reconstruction.SFMFLOW_ReconstructionModelProperties} """ if filtering_display_mode == "cloud_filter.color": # override colors for discarded points positions = self.vertices colors = self.colors.copy() colors[self._discard_vertices] = self.filtered_points_color elif filtering_display_mode == "cloud_filter.filtered": # show only points that are not discarded positions = self.vertices_filtered colors = self.colors_filtered else: # default to "cloud_filter.all" # show all vertices with original colors positions = self.vertices colors = self.colors # # setup shader self._shader = GPUShader(PointCloud._vertex_shader, PointCloud._fragment_shader) self._batch = batch_for_shader( self._shader, 'POINTS', { "position": positions, "color": colors }, ) self._object_matrix = object_matrix self._initial_centroid_matrix = initial_centroid_matrix # ============================================================================================== def draw(self, object_matrix: Matrix = None) -> None: """Point cloud draw function, to be called by a {SpaceView3D} draw_handler. Keyword Arguments: object_matrix {Matrix} -- optional matrix_world of the UI empty (default: {None}) """ if object_matrix: self._object_matrix = object_matrix # self._batch.draw(self._shader) self._shader.bind() self._shader.uniform_float("perspective_matrix", bpy.context.region_data.perspective_matrix) self._shader.uniform_float("object_matrix", self._object_matrix) self._shader.uniform_float("initial_centroid_matrix", self._initial_centroid_matrix) # ============================================================================================== def _show_as_vertices_mesh(self, vertices: Union[np.array, List[Vector]] = None) -> None: """Show the point cloud as a mesh with only vertices. Used for debug purpose. Keyword Arguments: vertices {Union[np.array, List[Vector]]} -- optional list of vertices to show (default: {None}) """ mesh = bpy.data.meshes.new("pc_vertices_data") obj = bpy.data.objects.new("pc_vertices", mesh) bpy.context.scene.collection.objects.link(obj) # vts = self.vertices if vertices is None else [ tuple(v[0:3]) for v in vertices ] mesh.from_pydata(vts, [], []) mesh.update() # ============================================================================================== def filter_point_cloud(self, target_pc_kdtree: KDTree, initial_alignment: Matrix, distance_threshold: float) -> np.array: """Get a filtered version of the point cloud. The filtered cloud is also stored for later use. Optionally apply an initial alignment. Arguments: target_pc_kdtree {KDTree} -- KDTree of the point cloud to align to initial_alignment {Matrix} -- initial manual alignment, usually from the UI control empty distance_threshold {float} -- maximum allowed distance from ground truth Returns: np.array -- the filtered point cloud """ logger.info("Starting reconstructed point cloud filtering") src = np.copy(self.vertices) # # initial alignment src = PointCloud.transform(src, initial_alignment) # # filter distant points self._discard_vertices.clear() self._filter_distance = distance_threshold to_delete = [] for i, v in enumerate(src): if target_pc_kdtree.find(v)[2] > distance_threshold: to_delete.append(i) src = np.delete(src, to_delete, axis=0) self._discard_vertices = to_delete logger.info("Reconstructed points filtered. Discarded %i points!", len(to_delete)) if src.shape[0] == 0: logger.warning("Point cloud contains 0 points!") return self.vertices_filtered # ============================================================================================== def get_regsitration_to_target( self, target_pc: List[Vector], initial_alignment: Matrix, target_pc_kdtree: KDTree = None, max_iterations: int = 100, samples: int = 0, use_filtered_cloud: bool = True) -> Tuple[Matrix, float]: """Get the registration matrix to a target point cloud. Optionally apply an initial alignment. Implements a variant of the Iterative Closest Point algorithm. Arguments: target_pc {List[Vector]} -- the point cloud to align to initial_alignment {Matrix} -- initial manual alignment, usually from the UI control empty Keyword Arguments: target_pc_kdtree {KDTree} -- KDTree of the point cloud to align to, if {None} will be created internally starting from `target_pc` (default: {None}) max_iterations {int} -- maximum iterations allowed to the algorithm (default: {50}) samples {int} -- number of random vertices to be used for alignment, if <= 0 use the whole cloud (default: {0}) use_filtered_cloud {bool} -- if {True} the filtered point cloud is used to run the alignment, otherwise the full cloud is used (default: {True}) Returns: Matrix -- the combined transformation matrix to align the point cloud float -- registration error """ logger.info("Starting ICP, samples=%i, max_iterations=%i", samples, max_iterations) src_pc = self.vertices_filtered if use_filtered_cloud else self.vertices # target_pc = np.array(target_pc) src = np.ones((src_pc.shape[0], 4)) target = np.ones((len(target_pc), 4)) src[:, :3] = np.copy(src_pc) target[:, :3] = np.copy(target_pc) # # initial alignment src = PointCloud.transform(src, initial_alignment) # # build KDTree for target point cloud kdtree = target_pc_kdtree if kdtree is None: size = len(target_pc) kdtree = KDTree(size) for i, v in enumerate(target_pc): kdtree.insert(v, i) kdtree.balance() # # define samples if samples <= 0 or samples > src[:].shape[0]: logger.warning("Using %i points but were required %i!", src[:].shape[0], samples) samples = src[:].shape[0] # # randomize points indices = list(range(0, src[:].shape[0])) # current_iter = 0 previous_error = float('inf') transforms = [] while current_iter < max_iterations: shuffle(indices) s = list( zip(*[kdtree.find(src[i][0:3]) for i in indices[:samples]])) # s_vertices = s[0] s_indices = s[1] s_distances = s[2] # # get error mean_error = np.mean(s_distances) logger.info("ICP iteration %i, mean error: %f", current_iter, mean_error) if (previous_error - mean_error) < 0.0001: # best alignment reached break previous_error = mean_error # # find fit transform T = self.find_fit_transform(src[indices[:len(s_indices)]], target[s_indices, :]) transforms.append(T) # # update the current source cloud src = PointCloud.transform(src, T) # current_iter += 1 # # self._show_as_vertices_mesh(src) align_matrix = Matrix( reduce(lambda am, t: t @ am, transforms).tolist()) # aggregate transformations return align_matrix, previous_error # ============================================================================================== @staticmethod def find_fit_transform(src: np.array, trg: np.array) -> np.matrix: """Find the best fit transformation between two point clouds. Arguments: src {np.array} -- source point cloud, to be aligned trg {np.array} -- target point cloud, to align to Returns: np.matrix -- best alignment transform matrix """ d = src.shape[1] # # align centroids centroid_trg = np.mean(trg, axis=0) centroid_src = np.mean(src, axis=0) src_c = src - centroid_src trg_c = trg - centroid_trg # # compute rotation H = src_c.T @ trg_c u, _, vh = np.linalg.svd(H) R = vh.T @ u.T if np.linalg.det(R) < 0: vh[d - 1, :] *= -1. R = vh.T @ u.T # # compute translation t = centroid_trg.T[0:3] - (R @ centroid_src.T)[0:3] # # build transformation matrix T = R T[:3, 3] = t return np.array(T) # ============================================================================================== @staticmethod def transform(vertices: np.array, m: Union[np.matrix, Matrix]) -> np.array: """Apply a tranformation matrix to the given vertices. Arguments: vertices {np.array} -- vertices to be transformed, can be either of 3D or 4D coordinates. m {Union[np.matrix, Matrix]} -- 4x4 transformation matrix Raises: ValueError: if the transformation matrix is not of shape 4x4 Returns: np.array -- the transformed vertices, 3D or 4D based on the input `vertices`, in the 4D case the coordinate is normalized and w=1 """ assert vertices.shape[1] == 3 or vertices.shape[1] == 4 # if isinstance(m, Matrix): # convert to numpy if needed m = np.array(m) if m.shape != (4, 4): raise ValueError( "Transformation matrix must be of shape 4x4! (given {})". format(m.shape)) if vertices.shape[1] == 3: src = np.ones((vertices.shape[0], 4)) src[:, :3] = np.copy(vertices) else: src = np.copy(vertices) # for i, v in enumerate(src): v_new = m @ v src[i, :-1] = np.array([c / v_new[-1] for c in v_new[:-1]]) # x/w, y/w, z/w src[i, -1] = 1. # # self._show_as_vertices_mesh() if vertices.shape[1] == 3: return src[:, :-1] # go back to 3D vectors return src # keep 4D vectors # ============================================================================================== def evaluate(self, target_pc_kdtree: KDTree, use_filtered_cloud: bool) -> Dict: """Evaluate the point cloud w.r.t. the target point cloud. The evaluation is done in terms of euclidean distance between the clouds' points. Arguments: target_pc_kdtree {KDTree} -- target (ground truth) point cloud KDTree use_filtered_cloud {bool} -- if {True} the filtered cloud is used for evaluation, the full one otherwise Returns: Dict -- evaluation result dictionary containing: 'dist_mean' {float}: mean distance 'dist_std' {float}: standard deviation 'dist_min' {float}: minimum distance 'dist_max' {float}: maximum distance 'used_filtered_cloud' {bool}: if the evaluation used only the filtered cloud 'filter_threshold' {float}: the distance threshold used to filter the point cloud 'full_cloud_size' {int}: size of the whole reconstructed cloud 'used_cloud_size' {int}: size of the cloud used for the evaluation 'used_cloud_size_percent' {float}: percentage of cloud used for the evaluation (in range [0-1]) 'discarded_points' {int}: number of point not used in the evaluation 'elapsed_time' {float}: elapsed time in seconds note that the measure unit depends on the unit set in the scene. """ src_pc = self.vertices_filtered if use_filtered_cloud else self.vertices # # initial alignment src = PointCloud.transform( src_pc, self._object_matrix @ self._initial_centroid_matrix) # # get distances d = [euclidean_distance(v, target_pc_kdtree.find(v)[0]) for v in src] # no need to normalize points are 3D # d = [target_pc_kdtree.find(v)[2] for v in src] # # compute statistics d_mean = mean(d) d_std = stdev(d, d_mean) if len(d) > 1 else 0. d_min = min(d) d_max = max(d) # results = { "dist_mean": d_mean, "dist_std": d_std, "dist_min": d_min, "dist_max": d_max, "used_filtered_cloud": use_filtered_cloud, "filter_threshold": self._filter_distance if use_filtered_cloud else float('inf'), "full_cloud_size": len(self.vertices), "used_cloud_size": len(src), "used_cloud_size_percent": len(src) / len(self.vertices), "discarded_points": len(self.vertices) - len(src) } logger.debug( "Point cloud eval end. mean=%.3f, std=%.3f, min=%.3f, max=%.3f.", d_mean, d_std, d_min, d_max) return results # ============================================================================================== def clear_filtered_cloud(self) -> None: """Clear the list of points that were discarded due to distance treshold filtering.""" self._discard_vertices.clear() self._filter_distance = float('inf') # ============================================================================================== def has_filtered_cloud(self) -> bool: """Check if the point cloud has an active distance threshold filter. Returns: bool -- {True} if there is filtered, {False} otherwise """ return (self._discard_vertices is not None) and (len(self._discard_vertices) != 0)