Example #1
0
def screenshot(x, y, w, h, mode=bgl.GL_FRONT, type=bgl.GL_BYTE):
    """スクリーンショットを撮ってRGBAのfloatバッファを返す
    :param x: Window.x
    :type x: int
    :param y: Window.y
    :type y: int
    :param w: Window.width
    :type w: int
    :param h: Window.height
    :type h: int
    :param mode: 読み込み先
    :type mode: int
    :param type: バッファの型。bgl.GL_BYTE, bgl.GL_INT, ...
    :type type: int
    :return: スクリーンショット。float RGBA
    :rtype: bgl.Buffer
    """
    buf = bgl.Buffer(type, w * h * 4)
    mode_bak = bgl.Buffer(bgl.GL_INT, 1)
    bgl.glGetIntegerv(bgl.GL_READ_BUFFER, mode_bak)
    bgl.glReadBuffer(mode)
    bgl.glReadPixels(x, y, w, h, bgl.GL_RGBA, type, buf)
    bgl.glFinish()
    bgl.glReadBuffer(mode_bak[0])
    return buf
Example #2
0
def screenshot(x, y, w, h, mode=bgl.GL_FRONT, type=bgl.GL_BYTE):
    """スクリーンショットを撮ってRGBAのfloatバッファを返す
    :param x: Window.x
    :type x: int
    :param y: Window.y
    :type y: int
    :param w: Window.width
    :type w: int
    :param h: Window.height
    :type h: int
    :param mode: 読み込み先
    :type mode: int
    :param type: バッファの型。bgl.GL_BYTE, bgl.GL_INT, ...
    :type type: int
    :return: スクリーンショット。float RGBA
    :rtype: bgl.Buffer
    """
    buf = bgl.Buffer(type, w * h * 4)
    mode_bak = bgl.Buffer(bgl.GL_INT, 1)
    bgl.glGetIntegerv(bgl.GL_READ_BUFFER, mode_bak)
    bgl.glReadBuffer(mode)
    bgl.glReadPixels(x, y, w, h, bgl.GL_RGBA, type, buf)
    bgl.glFinish()
    bgl.glReadBuffer(mode_bak[0])
    return buf
    def capture(self, mouse_x=None, mouse_y=None):
        time_diff()
        self.__update_size()
        time_diff("__update_size")
        bgl.glReadBuffer(bgl.GL_FRONT)
        time_diff("glReadBuffer")
        bgl.glReadPixels(
            0,
            0,
            self.width,
            self.height,
            bgl.GL_RGBA,
            bgl.GL_UNSIGNED_BYTE,
            self.buffer,
        )
        time_diff("glReadPixels")
        # self.image.pixels = [v / 255 for v in self.buffer]
        remapped_buffer = remap(self.buffer, self.remapping_indexes)
        time_diff("remap")
        self.image.pixels = [v / 255 for v in remapped_buffer]

        if mouse_x is not None and mouse_y is not None:
            draw_cursor(self.image, mouse_x / self.width * IMAGE_WIDTH,
                        mouse_y / self.height * IMAGE_HEIGHT)

        time_diff("image.pixels")
Example #4
0
def draw_scene(self, context, projection_matrix):
    bgl.glEnable(bgl.GL_DEPTH_TEST)
    bgl.glDepthFunc(bgl.GL_LESS)

    # Get List of Mesh Objects
    objs = []
    deps = bpy.context.view_layer.depsgraph
    for obj_int in deps.object_instances:
        obj = obj_int.object
        if obj.type == 'MESH' and obj.hide_render == False:

            mat = obj_int.matrix_world
            obj_eval = obj.evaluated_get(deps)
            mesh = obj_eval.to_mesh(preserve_all_data_layers=True,
                                    depsgraph=bpy.context.view_layer.depsgraph)
            mesh.calc_loop_triangles()
            tris = mesh.loop_triangles
            vertices = []
            indices = []

            for vert in mesh.vertices:
                # Multipy vertex Position by Object Transform Matrix
                vertices.append(mat @ vert.co)

            for tri in tris:
                indices.append(tri.vertices)

            #shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
            shader = gpu.types.GPUShader(Base_Shader_3D.vertex_shader,
                                         DepthOnlyFrag.fragment_shader)
            batch = batch_for_shader(shader,
                                     'TRIS', {"pos": vertices},
                                     indices=indices)
            batch.program_set(shader)
            batch.draw()
            gpu.shader.unbind()
            obj_eval.to_mesh_clear()

    #Write to Image for Debug
    debug = False
    if debug:
        scene = context.scene
        render_scale = scene.render.resolution_percentage / 100
        width = int(scene.render.resolution_x * render_scale)
        height = int(scene.render.resolution_y * render_scale)

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                         bgl.GL_UNSIGNED_BYTE, buffer)

        image_name = "measureit_arch_depth"
        if image_name not in bpy.data.images:
            bpy.data.images.new(image_name, width, height)

        image = bpy.data.images[image_name]
        image.scale(width, height)
        image.pixels = [v / 255 for v in buffer]

    bgl.glDisable(bgl.GL_DEPTH_TEST)
Example #5
0
    def as_image(self, x, y, w, h, image_name):
        import bpy

        buffer = bgl.Buffer(bgl.GL_FLOAT, w * h)  # * 4)

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        check_error("glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)")

        bgl.glReadPixels(x, y, w, h, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT,
                         buffer)
        check_error("glReadPixels as_image")

        if image_name not in bpy.data.images:
            image = bpy.data.images.new(image_name, w, h)
        else:
            image = bpy.data.images[image_name]
            #if image.size[:] != (w, h):
            image.scale(w, h)

        pix = []
        for v in buffer:
            # pix.extend([v / self._offset_cur, 0, 0, 1])
            pix.extend([v, 0, 0, 1])

        image.pixels = pix
        # image.pixels = [v / 255 for v in buffer]
        image.update()
Example #6
0
def render_debug_cross(context, props: MasterProperties) -> (bgl.Buffer, int):
    """
    Render debug cross
    :returns buffer with image and draw call count
    """
    shaders = Shaders()

    offscreen = gpu.types.GPUOffScreen(props.resolution.resolution_x,
                                       props.resolution.resolution_y)
    draw_count = 0

    quad_batch = batch_quad(shaders.debug)

    with offscreen.bind():
        # black background
        bgl.glClearColor(0.0, 0.0, 0.0, 1.0)
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
        bgl.glEnable(bgl.GL_BLEND)
        bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE)

        shaders.debug.bind()

        for position in props.positions:
            pos = Vector((position.manual_x, position.manual_y))

            # set position from object
            if position.variant == 'auto' and position.auto_object is not None:
                world_pos = position.auto_object.matrix_world.to_translation()
                pos = bpy_extras.object_utils.world_to_camera_view(
                    context.scene, context.scene.camera, world_pos)

            uniforms = {
                "flare_position":
                pos.xy,
                "aspect_ratio":
                props.resolution.resolution_x / props.resolution.resolution_y,
            }

            set_float_uniforms(shaders.debug, uniforms)

            quad_batch.draw(shaders.debug)
            draw_count += 1

        # copy rendered image to RAM
        buffer = bgl.Buffer(
            bgl.GL_FLOAT,
            props.resolution.resolution_x * props.resolution.resolution_y * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, props.resolution.resolution_x,
                         props.resolution.resolution_y, bgl.GL_RGBA,
                         bgl.GL_FLOAT, buffer)

    return buffer, draw_count
Example #7
0
    def get_image(self):
        """
        sends serialized image, uint8 image

        """
        _buffer = bgl.Buffer(bgl.GL_INT, 4)
        bgl.glGetIntegerv(bgl.GL_VIEWPORT, _buffer)
        bgl.glReadBuffer(bgl.GL_FRONT)
        pix = bgl.Buffer(bgl.GL_INT, _buffer[2] * _buffer[3])
        bgl.glReadPixels(_buffer[0], _buffer[1], _buffer[2], _buffer[3], bgl.GL_LUMINANCE, bgl.GL_INT, pix)
        array = numpy.zeros((self.screen_w * self.screen_h), dtype=numpy.uint8)
        array[0:self.screen_w * self.screen_h] = pix
        self.get_data((self.screen_w, self.screen_h))
        for i in range(0, len(array), 400):
            self.get_data(array[i:i + 400])
Example #8
0
    def _read_buffer(self, mval):
        xmin = int(mval[0]) - self._dist_px
        ymin = int(mval[1]) - self._dist_px
        size_x = size_y = self.threshold

        if xmin < 0:
            #size_x += xmin
            xmin = 0

        if ymin < 0:
            #size_y += ymin
            ymin = 0

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(xmin, ymin, size_x, size_y, bgl.GL_RED_INTEGER,
                         bgl.GL_UNSIGNED_INT, self._snap_buffer)
def capture_under_cursor(buffer, mouse_x=0, mouse_y=0, type_flg="i") -> list:
    """
    フラットなrgba(float)のlistを返す
    """
    # GL_FLOATでバッファ作って読むと馬鹿みたいに重いのでGL_BYTE,GL_UNSIGNED_BYTEになってる
    bgl.glReadBuffer(bgl.GL_FRONT)
    bgl.glReadPixels(
        mouse_x,
        mouse_y,
        1,
        1,
        bgl.GL_RGBA,
        bgl.GL_UNSIGNED_BYTE,
        buffer,
    )
    if type_flg == "i":
        return [value for value in buffer]
    elif type_flg == "f":
        return [value / 255 for value in buffer]
Example #10
0
def gen_screenshot_texture(x, y, w, h, mode=None):
    scissor_is_enabled = bgl.Buffer(bgl.GL_BYTE, 1)
    bgl.glGetIntegerv(bgl.GL_SCISSOR_TEST, scissor_is_enabled)
    scissor_box = bgl.Buffer(bgl.GL_INT, 4)
    bgl.glGetIntegerv(bgl.GL_SCISSOR_BOX, scissor_box)
    bgl.glEnable(bgl.GL_SCISSOR_TEST)
    bgl.glScissor(x, y, w, h)

    mode_bak = bgl.Buffer(bgl.GL_INT, 1)
    bgl.glGetIntegerv(bgl.GL_READ_BUFFER, mode_bak)
    if mode is not None:
        bgl.glReadBuffer(mode)

    pixels = bgl.Buffer(bgl.GL_BYTE, 4 * w * h)
    # RGBAにしないと斜めになる
    # GL_UNSIGNED_BYTEでないと色が僅かにずれる
    bgl.glReadPixels(x, y, w, h, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, pixels)
    bgl.glFinish()

    if mode is not None:
        bgl.glReadBuffer(mode_bak[0])

    # 反転。確認用
    # for i in range(4 * w * h):
    #     if (i % 4) != 3:
    #         pixels[i] = 255 - pixels[i]

    tex = gen_texture()
    bgl.glBindTexture(bgl.GL_TEXTURE_2D, tex)
    bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, w, h, 0, bgl.GL_RGBA,
                     bgl.GL_UNSIGNED_BYTE, pixels)
    bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)

    if not scissor_is_enabled[0]:
        bgl.glDisable(bgl.GL_SCISSOR_TEST)
    bgl.glScissor(*scissor_box)

    return tex
Example #11
0
def gen_screenshot_texture(x, y, w, h, mode=None):
    scissor_is_enabled = bgl.Buffer(bgl.GL_BYTE, 1)
    bgl.glGetIntegerv(bgl.GL_SCISSOR_TEST, scissor_is_enabled)
    scissor_box = bgl.Buffer(bgl.GL_INT, 4)
    bgl.glGetIntegerv(bgl.GL_SCISSOR_BOX, scissor_box)
    bgl.glEnable(bgl.GL_SCISSOR_TEST)
    bgl.glScissor(x, y, w, h)

    mode_bak = bgl.Buffer(bgl.GL_INT, 1)
    bgl.glGetIntegerv(bgl.GL_READ_BUFFER, mode_bak)
    if mode is not None:
        bgl.glReadBuffer(mode)

    pixels = bgl.Buffer(bgl.GL_BYTE, 4 * w * h)
    # RGBAにしないと斜めになる
    # GL_UNSIGNED_BYTEでないと色が僅かにずれる
    bgl.glReadPixels(x, y, w, h, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, pixels)
    bgl.glFinish()

    if mode is not None:
        bgl.glReadBuffer(mode_bak[0])

    # 反転。確認用
    # for i in range(4 * w * h):
    #     if (i % 4) != 3:
    #         pixels[i] = 255 - pixels[i]

    tex = gen_texture()
    bgl.glBindTexture(bgl.GL_TEXTURE_2D, tex)
    bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, w, h, 0, bgl.GL_RGBA,
                     bgl.GL_UNSIGNED_BYTE, pixels)
    bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)

    if not scissor_is_enabled[0]:
        bgl.glDisable(bgl.GL_SCISSOR_TEST)
    bgl.glScissor(*scissor_box)

    return tex
def render_main(self, context, animation=False):
    # Save old info
    settings = bpy.context.scene.render.image_settings
    depth = settings.color_depth
    settings.color_depth = '8'

    # Get object list
    scene = context.scene
    objlist = context.scene.objects
    # --------------------
    # Get resolution
    # --------------------
    render_scale = scene.render.resolution_percentage / 100
    width = int(scene.render.resolution_x * render_scale)
    height = int(scene.render.resolution_y * render_scale)

    # --------------------------------------
    # Loop to draw all lines in Offsecreen
    # --------------------------------------
    offscreen = gpu.types.GPUOffScreen(width, height)
    view_matrix = Matrix([
        [2 / width, 0, 0, -1],
        [0, 2 / height, 0, -1],
        [0, 0, 1, 0],
        [0, 0, 0, 1]])

    with offscreen.bind():
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
        gpu.matrix.reset()
        gpu.matrix.load_matrix(view_matrix)
        gpu.matrix.load_projection_matrix(Matrix.Identity(4))

        # -----------------------------
        # Loop to draw all objects
        # -----------------------------
        for myobj in objlist:
            if myobj.visible_get() is True:
                if 'MeasureGenerator' in myobj:
                    op = myobj.MeasureGenerator[0]
                    draw_segments(context, myobj, op, None, None)
        # -----------------------------
        # Loop to draw all debug
        # -----------------------------
        if scene.measureit_debug is True:
            selobj = bpy.context.selected_objects
            for myobj in selobj:
                if scene.measureit_debug_objects is True:
                    draw_object(context, myobj, None, None)
                elif scene.measureit_debug_object_loc is True:
                    draw_object(context, myobj, None, None)
                if scene.measureit_debug_vertices is True:
                    draw_vertices(context, myobj, None, None)
                elif scene.measureit_debug_vert_loc is True:
                    draw_vertices(context, myobj, None, None)
                if scene.measureit_debug_edges is True:
                    draw_edges(context, myobj, None, None)
                if scene.measureit_debug_faces is True or scene.measureit_debug_normals is True:
                    draw_faces(context, myobj, None, None)
        # -----------------------------
        # Draw a rectangle frame
        # -----------------------------
        if scene.measureit_rf is True:
            rfcolor = scene.measureit_rf_color
            rfborder = scene.measureit_rf_border
            rfline = scene.measureit_rf_line

            bgl.glLineWidth(rfline)
            x1 = rfborder
            x2 = width - rfborder
            y1 = int(ceil(rfborder / (width / height)))
            y2 = height - y1
            draw_rectangle((x1, y1), (x2, y2), rfcolor)

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)

    offscreen.free()

    # -----------------------------
    # Create image
    # -----------------------------
    image_name = "measureit_output"
    if not image_name in bpy.data.images:
        bpy.data.images.new(image_name, width, height)

    image = bpy.data.images[image_name]
    image.scale(width, height)
    image.pixels = [v / 255 for v in buffer]

    # Saves image
    if image is not None and (scene.measureit_render is True or animation is True):
        ren_path = bpy.context.scene.render.filepath
        filename = "mit_frame"
        if len(ren_path) > 0:
            if ren_path.endswith(path.sep):
                initpath = path.realpath(ren_path) + path.sep
            else:
                (initpath, filename) = path.split(ren_path)

        ftxt = "%04d" % scene.frame_current
        outpath = path.realpath(path.join(initpath, filename + ftxt + ".png"))
        save_image(self, outpath, image)

    # restore default value
    settings.color_depth = depth
    def execute(self, context):
        # import cProfile, pstats, io
        # pr = cProfile.Profile()
        # pr.enable()

        bgl.glEnable(bgl.GL_PROGRAM_POINT_SIZE)

        scene = context.scene
        render = scene.render
        image_settings = render.image_settings

        original_depth = image_settings.color_depth
        image_settings.color_depth = '8'

        scale = render.resolution_percentage / 100
        width = int(render.resolution_x * scale)
        height = int(render.resolution_y * scale)

        pcv = context.object.point_cloud_visualizer
        cloud = PCVManager.cache[pcv.uuid]
        cam = scene.camera
        if (cam is None):
            self.report({'ERROR'}, "No camera found.")
            return {'CANCELLED'}

        render_suffix = pcv.render_suffix
        render_zeros = pcv.render_zeros

        offscreen = GPUOffScreen(width, height)
        offscreen.bind()
        # with offscreen.bind():
        try:
            gpu.matrix.load_matrix(Matrix.Identity(4))
            gpu.matrix.load_projection_matrix(Matrix.Identity(4))

            bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

            o = cloud['object']
            vs = cloud['vertices']
            cs = cloud['colors']

            dp = pcv.display_percent
            l = int((len(vs) / 100) * dp)
            if (dp >= 99):
                l = len(vs)
            vs = vs[:l]
            cs = cs[:l]

            # sort by depth
            mw = o.matrix_world
            depth = []
            for i, v in enumerate(vs):
                vw = mw @ Vector(v)
                depth.append(world_to_camera_view(scene, cam, vw)[2])
            zps = zip(depth, vs, cs)
            sps = sorted(zps, key=lambda a: a[0])
            # split and reverse
            vs = [a for _, a, b in sps][::-1]
            cs = [b for _, a, b in sps][::-1]

            shader = GPUShader(vertex_shader, fragment_shader)
            batch = batch_for_shader(shader, 'POINTS', {
                "position": vs,
                "color": cs,
            })
            shader.bind()

            view_matrix = cam.matrix_world.inverted()
            camera_matrix = cam.calc_matrix_camera(
                bpy.context.depsgraph,
                x=render.resolution_x,
                y=render.resolution_y,
                scale_x=render.pixel_aspect_x,
                scale_y=render.pixel_aspect_y,
            )
            perspective_matrix = camera_matrix @ view_matrix

            shader.uniform_float("perspective_matrix", perspective_matrix)
            shader.uniform_float("object_matrix", o.matrix_world)
            # shader.uniform_float("point_size", pcv.point_size)
            shader.uniform_float("point_size", pcv.render_point_size)
            shader.uniform_float("alpha_radius", pcv.alpha_radius)
            batch.draw(shader)

            buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
            # bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
            bgl.glReadBuffer(bgl.GL_BACK)
            bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                             bgl.GL_UNSIGNED_BYTE, buffer)

        except Exception as e:
            self.report({'ERROR'}, str(e))
            return {'CANCELLED'}

        finally:
            offscreen.unbind()
            offscreen.free()
        # offscreen.free()

        # image from buffer
        image_name = "pcv_output"
        if (not image_name in bpy.data.images):
            bpy.data.images.new(image_name, width, height)
        image = bpy.data.images[image_name]
        image.scale(width, height)
        image.pixels = [v / 255 for v in buffer]

        # save as image file
        save_render(
            self,
            scene,
            image,
            render_suffix,
            render_zeros,
        )

        # restore
        image_settings.color_depth = original_depth

        # pr.disable()
        # s = io.StringIO()
        # sortby = 'cumulative'
        # ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        # ps.print_stats()
        # print(s.getvalue())

        return {'FINISHED'}
Example #14
0
def encodeImageRGBMGPU(image, maxRange, outDir, quality):
    input_image = bpy.data.images[image.name]
    image_name = input_image.name

    offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])

    image = input_image

    vertex_shader = '''

        uniform mat4 ModelViewProjectionMatrix;

        in vec2 texCoord;
        in vec2 pos;
        out vec2 texCoord_interp;

        void main()
        {
        //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
        //gl_Position.z = 1.0;
        gl_Position = vec4(pos.xy, 100, 100);
        texCoord_interp = texCoord;
        }

    '''
    fragment_shader = '''
        in vec2 texCoord_interp;
        out vec4 fragColor;

        uniform sampler2D image;

        //Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx

        const float PI = 3.1415926535897932384626433832795;
        const float HALF_MIN = 5.96046448e-08; // Smallest positive half.

        const float LinearEncodePowerApprox = 2.2;
        const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
        const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);

        const float Epsilon = 0.0000001;
        #define saturate(x)         clamp(x, 0.0, 1.0)

        float maxEps(float x) {
            return max(x, Epsilon);
        }

        float toLinearSpace(float color)
        {
            return pow(color, LinearEncodePowerApprox);
        }

        vec3 toLinearSpace(vec3 color)
        {
            return pow(color, vec3(LinearEncodePowerApprox));
        }

        vec4 toLinearSpace(vec4 color)
        {
            return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
        }

        vec3 toGammaSpace(vec3 color)
        {
            return pow(color, vec3(GammaEncodePowerApprox));
        }

        vec4 toGammaSpace(vec4 color)
        {
            return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
        }

        float toGammaSpace(float color)
        {
            return pow(color, GammaEncodePowerApprox);
        }

        float square(float value)
        {
            return value * value;
        }

        // Check if configurable value is needed.
        const float rgbdMaxRange = 255.0;

        vec4 toRGBM(vec3 color) {

            vec4 rgbm;
            color *= 1.0/6.0;
            rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) );
            rgbm.a = clamp(floor(D) / 255.0, 0., 1.);
            rgbm.rgb = color / rgbm.a;

            return 

            float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
            float D      = max(rgbdMaxRange / maxRGB, 1.);
            D            = clamp(floor(D) / 255.0, 0., 1.);
            vec3 rgb = color.rgb * D;
            
            // Helps with png quantization.
            rgb = toGammaSpace(rgb);

            return vec4(rgb, D); 
        }

        vec3 fromRGBD(vec4 rgbd) {
            // Helps with png quantization.
            rgbd.rgb = toLinearSpace(rgbd.rgb);

            // return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);

            return rgbd.rgb / rgbd.a;
        }

        void main()
        {

        fragColor = toRGBM(texture(image, texCoord_interp).rgb);

        }

    '''

    x_screen = 0
    off_x = -100
    off_y = -100
    y_screen_flip = 0
    sx = 200
    sy = 200

    vertices = (
                (x_screen + off_x, y_screen_flip - off_y), 
                (x_screen + off_x, y_screen_flip - sy - off_y), 
                (x_screen + off_x + sx, y_screen_flip - sy - off_y),
                (x_screen + off_x + sx, y_screen_flip - off_x))

    if input_image.colorspace_settings.name != 'Linear':
        input_image.colorspace_settings.name = 'Linear'

    # Removing .exr or .hdr prefix
    if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
        image_name = image_name[:-4]

    target_image = bpy.data.images.get(image_name + '_encoded')
    if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
        print(image_name + '_encoded')
    if not target_image:
        target_image = bpy.data.images.new(
                name = image_name + '_encoded',
                width = input_image.size[0],
                height = input_image.size[1],
                alpha = True,
                float_buffer = False
                )

    shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
    batch = batch_for_shader(
        shader, 'TRI_FAN',
        {
            "pos": vertices,
            "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
        },
    )

    if image.gl_load():
        raise Exception()
    
    with offscreen.bind():
        bgl.glActiveTexture(bgl.GL_TEXTURE0)
        bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)

        shader.bind()
        shader.uniform_int("image", 0)
        batch.draw(shader)
        
        buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)

    offscreen.free()
    
    target_image.pixels = [v / 255 for v in buffer]
    input_image = target_image
    
    #Save LogLuv
    if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
        print(input_image.name)
    input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
    #input_image.filepath_raw = outDir + "_encoded.png"
    input_image.file_format = "PNG"
    bpy.context.scene.render.image_settings.quality = quality
    #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
    input_image.save()
Example #15
0
def encodeLogLuvGPU(image, outDir, quality):

    bpy.app.driver_namespace["logman"].append("Starting LogLuv encode for: " + str(image.name))

    input_image = bpy.data.images[image.name]
    image_name = input_image.name

    offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])

    image = input_image

    vertex_shader = '''

        uniform mat4 ModelViewProjectionMatrix;

        in vec2 texCoord;
        in vec2 pos;
        out vec2 texCoord_interp;

        void main()
        {
        //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
        //gl_Position.z = 1.0;
        gl_Position = vec4(pos.xy, 100, 100);
        texCoord_interp = texCoord;
        }

    '''
    fragment_shader = '''
        in vec2 texCoord_interp;
        out vec4 fragColor;

        uniform sampler2D image;
        
        const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
        vec4 LinearToLogLuv( in vec4 value )  {
            vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
            Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
            vec4 vResult;
            vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
            float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
            vResult.w = fract( Le );
            vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
            return vResult;
            //return vec4(Xp_Y_XYZp,1);
        }
        
        const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
        vec4 LogLuvToLinear( in vec4 value ) {
            float Le = value.z * 255.0 + value.w;
            vec3 Xp_Y_XYZp;
            Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
            Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
            Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
            vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
            //return vec4( max( vRGB, 0.0 ), 1.0 );
            return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 );
        }

        void main()
        {
        //fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454)));
        fragColor = LinearToLogLuv(texture(image, texCoord_interp));
        //fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp)));
        }

    '''

    x_screen = 0
    off_x = -100
    off_y = -100
    y_screen_flip = 0
    sx = 200
    sy = 200

    vertices = (
                (x_screen + off_x, y_screen_flip - off_y), 
                (x_screen + off_x, y_screen_flip - sy - off_y), 
                (x_screen + off_x + sx, y_screen_flip - sy - off_y),
                (x_screen + off_x + sx, y_screen_flip - off_x))

    if input_image.colorspace_settings.name != 'Linear':
        input_image.colorspace_settings.name = 'Linear'

    # Removing .exr or .hdr prefix
    if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
        image_name = image_name[:-4]

    target_image = bpy.data.images.get(image_name + '_encoded')
    if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
        print(image_name + '_encoded')
    if not target_image:
        target_image = bpy.data.images.new(
                name = image_name + '_encoded',
                width = input_image.size[0],
                height = input_image.size[1],
                alpha = True,
                float_buffer = False
                )

    shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
    batch = batch_for_shader(
        shader, 'TRI_FAN',
        {
            "pos": vertices,
            "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
        },
    )

    if image.gl_load():
        raise Exception()
    
    with offscreen.bind():
        bgl.glActiveTexture(bgl.GL_TEXTURE0)
        bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)

        shader.bind()
        shader.uniform_int("image", 0)
        batch.draw(shader)
        
        buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)

    offscreen.free()
    
    target_image.pixels = [v / 255 for v in buffer]
    input_image = target_image
    
    #Save LogLuv
    if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
        print(input_image.name)
    input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
    #input_image.filepath_raw = outDir + "_encoded.png"
    input_image.file_format = "PNG"
    bpy.context.scene.render.image_settings.quality = quality
    #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
    input_image.save()
    def snap(self, mval):
        t = time.time()

        ret = None
        self.mval[:] = mval

        gpu_Indices_enable_state()
        self._offscreen.bind()

        proj_mat = self.rv3d.perspective_matrix.copy()

        if self.proj_mat != proj_mat:
            self.proj_mat = proj_mat
            GPU_Indices.set_ProjectionMatrix(self.proj_mat)
            self.update_all()

        ray_dir, ray_orig = self.get_ray(mval)
        for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:],
                                     self.drawn_count):

            obj = snap_obj.data[0]

            # origins
            if obj.__class__.__name__ == 'list':
                in_threshold = (self._snap_mode & ORIGIN)
            else:
                # allow to hide some objects from snap, eg active object when moving
                if obj is None or obj.name in self._exclude:
                    # print("exclude %s" % obj.name)
                    continue

                bbmin = Vector(obj.bound_box[0])
                bbmax = Vector(obj.bound_box[6])

                # check objects under ray using bound box
                if bbmin != bbmax:
                    MVP = proj_mat @ snap_obj.mat
                    mat_inv = snap_obj.mat.inverted()
                    ray_orig_local = mat_inv @ ray_orig
                    ray_dir_local = ray_dir @ snap_obj.mat
                    in_threshold = intersect_boundbox_threshold(
                        self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)

                else:
                    dist = self._max_pixel_dist(snap_obj.mat.translation)
                    in_threshold = dist < self._dist_px

            # print("ray_orig %s  ray_dir %s  %s in_threshold %s" % (ray_orig, ray_dir, obj.name, in_threshold))

            if in_threshold:

                # create shader and data for detail analysis
                if len(snap_obj.data) == 1:
                    # tim = time.time()

                    snap_obj.data.append(GPU_Indices(obj))
                    # print("create data %.4f" % (time.time() - tim))

                snap_obj.data[1].set_draw_mode(
                    (self._snap_mode &
                     (KNOT | SEGS | SEGS_CENTER | SEGS_PERPENDICULAR
                      | SEGS_PARALLEL)) > 0, (self._snap_mode & ORIGIN) > 0)
                snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)
                snap_obj.data[1].draw(self._offset_cur)

                self._offset_cur += snap_obj.data[1].get_tot_elems()

                self.snap_objects[self.drawn_count], self.snap_objects[i] = \
                    self.snap_objects[i], self.snap_objects[self.drawn_count]
                self.drawn_count += 1

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(
            int(self.mval[0]) - self._dist_px,
            int(self.mval[1]) - self._dist_px, self.threshold, self.threshold,
            bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)
        bgl.glReadBuffer(bgl.GL_BACK)

        snap_obj, index = self._get_nearest_index()

        if snap_obj:
            ret = self._get_loc(snap_obj, index)

        # print(ret)

        # if dither_enabled:
        #     bgl.glEnable(bgl.GL_DITHER)  # dithering and AA break color coding, so disable #
        # if multisample_enabled:
        #     bgl.glEnable(bgl.GL_MULTISAMPLE)

        self._offscreen.unbind()
        gpu_Indices_restore_state()

        # print("curve snap %s %.4f" % (len(self.snap_objects), time.time() - t))

        return snap_obj, ret
Example #17
0
def render_lens_flare(context, props: MasterProperties) -> (bgl.Buffer, int):
    """
    Renders lens flare effect to buffer
    :returns buffer with effect
    """
    max_x = props.resolution.resolution_x
    max_y = props.resolution.resolution_y

    # render kinda circles
    blades = props.camera.blades
    if blades == 0:
        blades = 256

    shaders = Shaders()

    offscreen = gpu.types.GPUOffScreen(max_x, max_y)
    ghost_fb = gpu.types.GPUOffScreen(max_x, max_y)

    ghost_batch = batch_from_blades(blades, shaders.ghost)
    quad_batch = batch_quad(shaders.flare)

    draw_count = 0

    noise_tex = NoiseTexture()

    # clear framebuffer
    with offscreen.bind():
        # black background
        bgl.glClearColor(0.0, 0.0, 0.0, 1.0)
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
        bgl.glEnable(bgl.GL_BLEND)
        bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE)

    for position in props.positions:
        pos = Vector((position.manual_x, position.manual_y))

        # set position from object
        if position.variant == 'auto' and position.auto_object is not None:
            world_pos = position.auto_object.matrix_world.to_translation()
            pos = bpy_extras.object_utils.world_to_camera_view(
                context.scene, context.scene.camera, world_pos)

        flare_vector = pos.xy - Vector((0.5, 0.5))
        flare_vector.normalize()

        # first render ghosts one by one
        for ghost in props.ghosts:
            # calculate position
            ghost_x = ((pos.x - 0.5) * 2.0) * ghost.offset
            ghost_y = ((pos.y - 0.5) * 2.0) * ghost.offset
            # add perpendicular offset
            ghost_x += flare_vector.y * ghost.perpendicular_offset
            ghost_y += -flare_vector.x * ghost.perpendicular_offset

            with ghost_fb.bind():
                render_ghost(props, ghost, shaders.ghost, ghost_batch,
                             flare_vector, pos)
                draw_count += 1

            with offscreen.bind():
                # now copy to final buffer
                bgl.glActiveTexture(bgl.GL_TEXTURE0)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, ghost_fb.color_texture)

                # disable wrapping
                bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S,
                                    bgl.GL_CLAMP_TO_BORDER)
                bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T,
                                    bgl.GL_CLAMP_TO_BORDER)

                border_color = bgl.Buffer(bgl.GL_FLOAT, 4,
                                          [0.0, 0.0, 0.0, 1.0])

                bgl.glTexParameterfv(bgl.GL_TEXTURE_2D,
                                     bgl.GL_TEXTURE_BORDER_COLOR, border_color)

                bgl.glActiveTexture(bgl.GL_TEXTURE2)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D,
                                  props.spectrum_image.bindcode)

                bgl.glActiveTexture(bgl.GL_TEXTURE1)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, noise_tex.gl_code)

                copy_ghost(shaders.copy, quad_batch, ghost, props,
                           Vector((ghost_x, ghost_y)))
                draw_count += 1

        # finally render flare on top
        with offscreen.bind():
            bgl.glActiveTexture(bgl.GL_TEXTURE0)
            bgl.glBindTexture(bgl.GL_TEXTURE_2D, noise_tex.gl_code)

            render_flare(props, pos.xy, shaders.flare, quad_batch)
            draw_count += 1

    with offscreen.bind():
        # copy rendered image to RAM
        buffer = bgl.Buffer(bgl.GL_FLOAT, max_x * max_y * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, max_x, max_y, bgl.GL_RGBA, bgl.GL_FLOAT, buffer)

    offscreen.free()
    ghost_fb.free()
    noise_tex.free()

    return buffer, draw_count
def render_main_svg(self, context, animation=False):

    # Save old info
    scene = context.scene
    sceneProps= scene.MeasureItArchProps
    sceneProps.is_render_draw = True
    sceneProps.is_vector_draw = True

    clipdepth = context.scene.camera.data.clip_end
    path = scene.render.filepath
    objlist = context.view_layer.objects

    # --------------------
    # Get resolution
    # --------------------

    render_scale = scene.render.resolution_percentage / 100
    width = int(scene.render.resolution_x * render_scale)
    height = int(scene.render.resolution_y * render_scale)

    offscreen = gpu.types.GPUOffScreen(width, height)
    
    view_matrix_3d = scene.camera.matrix_world.inverted()
    projection_matrix = scene.camera.calc_matrix_camera(context.view_layer.depsgraph, x=width, y=height)

    # Render Depth Buffer
    with offscreen.bind():
        # Clear Depth Buffer, set Clear Depth to Cameras Clip Distance
        bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)
        bgl.glClearDepth(clipdepth)
        bgl.glEnable(bgl.GL_DEPTH_TEST)
        bgl.glDepthFunc(bgl.GL_LEQUAL)  

        gpu.matrix.reset()
        gpu.matrix.load_matrix(view_matrix_3d)
        gpu.matrix.load_projection_matrix(projection_matrix)

        texture_buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)

        draw_scene(self, context, projection_matrix) 

        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, texture_buffer)

        if 'depthbuffer' in sceneProps:
            del sceneProps['depthbuffer']
        sceneProps['depthbuffer'] = texture_buffer
    offscreen.free()

    if True:
        if not str('test') in bpy.data.images:
            bpy.data.images.new(str('test'), width, height)
        image = bpy.data.images[str('test')]
        image.scale(width, height)
        image.pixels = [v / 255 for v in texture_buffer]



    # Setup Output Path
    ren_path = bpy.context.scene.render.filepath
    filename = "mit_vector"
    ftxt = "%04d" % scene.frame_current
    outpath = (ren_path + filename + ftxt + '.svg')

    # Setup basic svg
    svg = svgwrite.Drawing(
            outpath,
            debug=False,
            size=('{}mm'.format(width), '{}mm'.format(height)),
            viewBox=('0 0 {} {}'.format(width,height)),
            id='root',
        )



    # -----------------------------
    # Loop to draw all objects
    # -----------------------------
    for myobj in objlist:
        if myobj.visible_get() is True:
            mat = myobj.matrix_world
            if 'DimensionGenerator' in myobj:
                measureGen = myobj.DimensionGenerator[0]
                if 'alignedDimensions' in measureGen:
                    for linDim in measureGen.alignedDimensions:
                        draw_alignedDimension(context, myobj, measureGen,linDim,mat,svg=svg)
                if 'angleDimensions' in measureGen:
                    for dim in measureGen.angleDimensions:
                        draw_angleDimension(context, myobj, measureGen,dim,mat,svg=svg)
                if 'axisDimensions' in measureGen:
                    for dim in measureGen.axisDimensions:
                        draw_axisDimension(context, myobj, measureGen,dim,mat,svg=svg)
                if 'boundsDimensions' in measureGen:
                    for dim in measureGen.boundsDimensions:
                        draw_boundsDimension(context, myobj, measureGen,dim,mat,svg=svg)
                if 'arcDimensions' in measureGen:
                    for dim in measureGen.arcDimensions:
                        draw_arcDimension(context, myobj, measureGen,dim,mat,svg=svg)
                if 'areaDimensions' in measureGen:
                    for dim in measureGen.areaDimensions:
                        draw_areaDimension(context, myobj, measureGen,dim,mat,svg=svg)

            if 'LineGenerator' in myobj:
                # Draw Line Groups
                op = myobj.LineGenerator[0]
                draw_line_group(context, myobj, op, mat,svg=svg)
            
            #if 'AnnotationGenerator' in myobj:
            #    op = myobj.AnnotationGenerator[0]
            #    draw_annotation(context, myobj, op, mat)                
        
        if False:
            # Draw Instance 
            deps = bpy.context.view_layer.depsgraph
            for obj_int in deps.object_instances:
                if obj_int.is_instance:
                    myobj = obj_int.object
                    mat = obj_int.matrix_world

                    if 'LineGenerator' in myobj:
                        lineGen = myobj.LineGenerator[0]
                        draw_line_group(context,myobj,lineGen,mat)
                    
                    if sceneProps.instance_dims:
                        if 'AnnotationGenerator' in myobj:
                            annotationGen = myobj.AnnotationGenerator[0]
                            draw_annotation(context,myobj,annotationGen,mat)

                        if 'DimensionGenerator' in myobj:
                            DimGen = myobj.DimensionGenerator[0]
                            for alignedDim in DimGen.alignedDimensions:
                                draw_alignedDimension(context, myobj, DimGen, alignedDim,mat)
                            for angleDim in DimGen.angleDimensions:
                                draw_angleDimension(context, myobj, DimGen, angleDim,mat)
                            for axisDim in DimGen.axisDimensions:
                                draw_axisDimension(context,myobj,DimGen,axisDim,mat)
            
 

    svg.save(pretty=True)
    # restore default value
    sceneProps.is_render_draw = False
    sceneProps.is_vector_draw = False
    return True
def render(image, check_size):
    vertex_shader = '''
        in vec2 pos;
        in vec2 texCoord;

        out vec2 uvInterp;

        void main()
        {
            uvInterp = texCoord;
            gl_Position = vec4(pos, 0.0, 1.0);
        }
    '''

    fragment_shader = '''
        uniform sampler2D image;
        uniform vec4 pattern_color;
        uniform float check_size;

        in vec2 uvInterp;
        
        float dist(vec2 p0, vec2 pf)
        {
            return sqrt((pf.x-p0.x)*(pf.x-p0.x)+(pf.y-p0.y)*(pf.y-p0.y));
        }
        
        vec4 checker(vec2 uv, float check_size)
        {
          uv -= 0.5;
          
          float result = mod(floor(check_size * uv.x) + floor(check_size * uv.y), 2.0);
          float fin = sign(result);
          return vec4(fin, fin, fin, 1.0);
        }
        
        void main()
        {
            vec4 texture_color = texture(image, uvInterp);
            if(texture_color.w == 0.0){
                discard;
            }
            if(texture_color.xyz == vec3(1.0, 1.0, 1.0)) {
                discard;
            }
            
            vec2 res = vec2(512, 512);            
            vec4 final_color = pattern_color;
            float d = dist(res.xy*0.5, gl_FragCoord.xy)*0.001;
            final_color = mix(pattern_color, vec4(pattern_color.xyz*0.3, 1.0), d);

            texture_color = mix(checker(uvInterp, check_size), final_color, 0.9);
            
            gl_FragColor = texture_color;
        }
    '''

    shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
    batch = batch_for_shader(
        shader,
        'TRI_FAN',
        {
            "pos": ((-1, -1), (1, -1), (1, 1), (-1, 1)),
            "texCoord": ((0, 0), (1, 0), (1, 1), (0, 1)),
        },
    )
    if image.gl_load():
        return

    offscreen = gpu.types.GPUOffScreen(PREVIEW_WIDTH, PREVIEW_HEIGHT)
    with offscreen.bind():
        bgl.glActiveTexture(bgl.GL_TEXTURE0)
        bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
        shader.bind()
        shader.uniform_int("image", 0)
        shader.uniform_float("pattern_color", get_active_color())
        shader.uniform_float("check_size", check_size)
        batch.draw(shader)

        buffer = bgl.Buffer(bgl.GL_FLOAT, PREVIEW_WIDTH * PREVIEW_HEIGHT * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, PREVIEW_WIDTH, PREVIEW_HEIGHT, bgl.GL_RGBA,
                         bgl.GL_FLOAT, buffer)

    offscreen.free()
    image.gl_free()
    return buffer
Example #20
0
    def snap_get(self, mval, main_snap_obj = None):
        ret = None, None, None
        self.mval[:] = mval
        snap_vert = self._snap_mode & VERT != 0
        snap_edge = self._snap_mode & EDGE != 0
        snap_face = self._snap_mode & FACE != 0

        _Internal.gpu_Indices_enable_state()
        self._offscreen.bind()

        #bgl.glDisable(bgl.GL_DITHER) # dithering and AA break color coding, so disable #
        #multisample_enabled = bgl.glIsEnabled(bgl.GL_MULTISAMPLE)
        #bgl.glDisable(bgl.GL_MULTISAMPLE)
        bgl.glEnable(bgl.GL_DEPTH_TEST)

        proj_mat = self.rv3d.perspective_matrix.copy()
        if self.proj_mat != proj_mat:
            self.proj_mat = proj_mat
            _Internal.gpu_Indices_set_ProjectionMatrix(self.proj_mat)
            self.update_drawing()

        ray_dir, ray_orig = self.get_ray(mval)
        for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:], self.drawn_count):
            obj = snap_obj.data[0]
            try:
                bbmin = Vector(obj.bound_box[0])
                bbmax = Vector(obj.bound_box[6])
            except ReferenceError:
                self.snap_objects.remove(snap_obj)
                continue

            if bbmin != bbmax:
                MVP = proj_mat @ snap_obj.mat
                mat_inv = snap_obj.mat.inverted()
                ray_orig_local = mat_inv @ ray_orig
                ray_dir_local = mat_inv.to_3x3() @ ray_dir
                in_threshold = _Internal.intersect_boundbox_threshold(
                        self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)
            else:
                proj_co = _Internal.project_co_v3(self, snap_obj.mat.translation)
                dist = self.mval - proj_co
                in_threshold = abs(dist.x) < self._dist_px and abs(dist.y) < self._dist_px
                #snap_obj.data[1] = primitive_point

            if in_threshold:
                if len(snap_obj.data) == 1:
                    from .mesh_drawing import GPU_Indices_Mesh
                    is_bound = obj.display_type == 'BOUNDS'
                    draw_face = snap_face and not is_bound and obj.display_type != 'WIRE'
                    draw_edge = snap_edge and not is_bound
                    draw_vert = snap_vert and not is_bound
                    snap_obj.data.append(GPU_Indices_Mesh(self.depsgraph, obj, draw_face, draw_edge, draw_vert))

                snap_obj.data[1].set_draw_mode(snap_face, snap_edge, snap_vert)
                snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)

                if snap_obj == main_snap_obj:
                    snap_obj.data[1].Draw(self._offset_cur, -0.0001)
                else:
                    snap_obj.data[1].Draw(self._offset_cur)
                self._offset_cur += snap_obj.data[1].get_tot_elems()

                tmp = self.snap_objects[self.drawn_count]
                self.snap_objects[self.drawn_count] = self.snap_objects[i]
                self.snap_objects[i] = tmp

                self.drawn_count += 1

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)

        bgl.glReadPixels(
                int(self.mval[0]) - self._dist_px, int(self.mval[1]) - self._dist_px,
                self.threshold, self.threshold, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)

        #bgl.glReadBuffer(bgl.GL_BACK)
        #import numpy as np
        #a = np.array(self._snap_buffer)
        #print(a)

        snap_obj, index = self._get_nearest_index()
        #print("index:", index)
        if snap_obj:
            ret = self._get_loc(snap_obj, index)

        bgl.glDisable(bgl.GL_DEPTH_TEST)

        self._offscreen.unbind()
        _Internal.gpu_Indices_restore_state()

        return (snap_obj, *ret)
Example #21
0
def render_main(self, context, animation=False):
    # Save old info
    settings = bpy.context.scene.render.image_settings
    depth = settings.color_depth
    settings.color_depth = '8'

    # Get object list
    scene = context.scene
    objlist = context.scene.objects
    # --------------------
    # Get resolution
    # --------------------
    render_scale = scene.render.resolution_percentage / 100
    width = int(scene.render.resolution_x * render_scale)
    height = int(scene.render.resolution_y * render_scale)

    # --------------------------------------
    # Loop to draw all lines in Offsecreen
    # --------------------------------------
    offscreen = gpu.types.GPUOffScreen(width, height)
    view_matrix = Matrix([[2 / width, 0, 0, -1], [0, 2 / height, 0, -1],
                          [0, 0, 1, 0], [0, 0, 0, 1]])

    with offscreen.bind():
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
        gpu.matrix.reset()
        gpu.matrix.load_matrix(view_matrix)
        gpu.matrix.load_projection_matrix(Matrix.Identity(4))

        # -----------------------------
        # Loop to draw all objects
        # -----------------------------
        for myobj in objlist:
            if myobj.visible_get() is True:
                if 'MeasureGenerator' in myobj:
                    op = myobj.MeasureGenerator[0]
                    draw_segments(context, myobj, op, None, None)
        # -----------------------------
        # Loop to draw all debug
        # -----------------------------
        if scene.measureit_debug is True:
            selobj = bpy.context.selected_objects
            for myobj in selobj:
                if scene.measureit_debug_objects is True:
                    draw_object(context, myobj, None, None)
                elif scene.measureit_debug_object_loc is True:
                    draw_object(context, myobj, None, None)
                if scene.measureit_debug_vertices is True:
                    draw_vertices(context, myobj, None, None)
                elif scene.measureit_debug_vert_loc is True:
                    draw_vertices(context, myobj, None, None)
                if scene.measureit_debug_edges is True:
                    draw_edges(context, myobj, None, None)
                if scene.measureit_debug_faces is True or scene.measureit_debug_normals is True:
                    draw_faces(context, myobj, None, None)
        # -----------------------------
        # Draw a rectangle frame
        # -----------------------------
        if scene.measureit_rf is True:
            rfcolor = scene.measureit_rf_color
            rfborder = scene.measureit_rf_border
            rfline = scene.measureit_rf_line

            bgl.glLineWidth(rfline)
            x1 = rfborder
            x2 = width - rfborder
            y1 = int(ceil(rfborder / (width / height)))
            y2 = height - y1
            draw_rectangle((x1, y1), (x2, y2), rfcolor)

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                         bgl.GL_UNSIGNED_BYTE, buffer)

    offscreen.free()

    # -----------------------------
    # Create image
    # -----------------------------
    image_name = "measureit_output"
    if not image_name in bpy.data.images:
        bpy.data.images.new(image_name, width, height)

    image = bpy.data.images[image_name]
    image.scale(width, height)
    image.pixels = [v / 255 for v in buffer]

    # Saves image
    if image is not None and (scene.measureit_render is True
                              or animation is True):
        ren_path = bpy.context.scene.render.filepath
        filename = "mit_frame"
        if len(ren_path) > 0:
            if ren_path.endswith(path.sep):
                initpath = path.realpath(ren_path) + path.sep
            else:
                (initpath, filename) = path.split(ren_path)

        ftxt = "%04d" % scene.frame_current
        outpath = path.realpath(path.join(initpath, filename + ftxt + ".png"))
        save_image(self, outpath, image)

    # restore default value
    settings.color_depth = depth
Example #22
0
    def _execute_inner(self, obs):
        dim = self.dim
        dimhalf = dim * .5
        offbuf = gpu.types.GPUOffScreen(dim, dim)
        sample = sample_sphere if self.dom == 'SPHERE' \
            else sample_hemisphere

        # Construct depthpass shader
        shader = gpu.types.GPUShader(
            vertexcode='''
            uniform mat4 mvp;
            in vec3 pos;
            void main() {
                gl_Position = mvp * vec4(pos, 1);
            }''',
            fragcode='''
            out vec4 col;
            void main() {
                col = vec4(0, 0, 1, 1);
            }'''
        )
        shader.bind()

        # Create batch from all objects in edit mode
        verts, indcs, geoinfo = combine_meshes(obs)
        batch = batch_for_shader(
            shader, 'TRIS',
            {"pos": verts},
            indices=indcs,
        )
        batch.program_set(shader)

        # Find the center and bounds of all objects to calculate the
        # encompassing radius of the (hemi-)sphere on which render
        # positions will be sampled
        bounds, centr = get_bounds_and_center(verts)
        rad = np.linalg.norm(bounds[:2]) * .5 + 1
        del indcs, bounds

        # Spawn debug sphere with calculated radius
        if self._debug_spawn_sphere:
            bpy.ops.mesh.primitive_uv_sphere_add(
                radius=rad,
                location=centr,
                )

        # Render the objects from several views and mark seen vertices
        visibl = np.zeros(len(verts), dtype=np.bool)
        for _ in range(self.samplecnt):
            # Generate random points on the chosen domain from which
            # to render the objects
            # Chose rotation so the 'camera' looks to the center
            samplepos, (theta, phi) = sample(rad)
            view_mat_inv = make_transf_mat(
                transl=samplepos + centr,
                rot=(phi, 0, theta + np.pi * .5),
                )

            # Spawn debug camera at sampled position
            if self._debug_spawn_cams:
                bpy.ops.object.camera_add()
                bpy.context.object.matrix_world = Matrix(view_mat_inv)

            # Build the Model View Projection matrix from chosen
            # render position and radius
            # The model matrix has already been applied to the vertices
            # befor creating the batch
            mvp = make_proj_mat(
                fov=90,
                clip_start=rad * .25,
                clip_end=rad * 1.5,
                dimx=dim,
                dimy=dim,
                ) @ np.linalg.inv(view_mat_inv)
            shader.uniform_float("mvp", Matrix(mvp))
            del view_mat_inv, samplepos, theta, phi

            with offbuf.bind():
                # Render the selected objects into the offscreen buffer
                bgl.glDepthMask(bgl.GL_TRUE)
                bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)
                bgl.glEnable(bgl.GL_DEPTH_TEST)
                batch.draw()

                # Write texture back to CPU
                pxbuf = bgl.Buffer(bgl.GL_FLOAT, dim * dim)
                bgl.glReadBuffer(bgl.GL_BACK)
                bgl.glReadPixels(0, 0, dim, dim, bgl.GL_DEPTH_COMPONENT,
                                 bgl.GL_FLOAT, pxbuf)

            # Map depth values from [0, 1] to [-1, 1]
            pxbuf = np.asanyarray(pxbuf) * 2 - 1
            pxbuf.shape = (dim, dim)

            # Transform verts of active object to clip space
            tverts = mvp @ append_one(verts).T
            # Perspective divide to transform to NDCs [-1, 1]
            tverts /= tverts[3]

            # Find pixel coordinates of each vertex' projected position
            # by remapping x and y coordinates from NDCs to [0, dim]
            # Add .5 to make sure the flooring from conversion to int
            # is actually rounding
            uvs = tverts[:2] * dimhalf + (dimhalf + .5)
            uvs = uvs.astype(np.int32)

            # Map all vertices outside the view frustum to (0, 0)
            # so they don't sample the pixel array out of bounds
            invalid = np.any((uvs < 0) | (dim <= uvs), axis=0)
            uvs.T[invalid] = (0, 0)

            # For each vertex, get the depth at its projected pixel
            # and its distance to the render position
            imgdpth = pxbuf[(uvs[1], uvs[0])]
            camdist = tverts[2]
            # Set the distance of invalid vertices past [-1, 1] so they
            # won't be selected
            camdist[invalid] = 2

            # A vertex is visible if it's inside the view frustum
            # (valid) and not occluded by any face.
            # A vertex is occluded when its depth sampled from the
            # image is smaller than its distance to the camera.
            # A small error margin is added to prevent self-occlusion.
            # The result is logically or-ed with the result from other
            # render positions.
            visibl |= camdist <= (imgdpth + .001)

            # Create debug image of the rendered view
            if self._debug_create_img:
                # Grayscale to RGBA and [-1, 1] to [0, 1]
                pxbuf = np.repeat(pxbuf, 4) * .5 + .5
                pxbuf.shape = (dim, dim, 4)
                # Alpha channel is 1
                pxbuf[:, :, 3] = 1
                # Mark projected vertex positions in red
                pxbuf[(uvs[1], uvs[0])] = (1, 0, 0, 1)

                imgname = "Debug"
                if imgname not in bpy.data.images:
                    bpy.data.images.new(imgname, dim, dim)
                image = bpy.data.images[imgname]
                image.scale(dim, dim)
                image.pixels = pxbuf.ravel()

        # Split visible flag list back in original objects
        offbuf.free()
        start = 0
        for o, (end, _) in zip(obs, geoinfo):
            o.data.vertices.foreach_set('select', visibl[start:end])
            start = end
Example #23
0
    def snap_get(self, mval):
        ret = None, None
        self.mval[:] = mval
        snap_vert = self._snap_mode & VERT != 0
        snap_edge = self._snap_mode & EDGE != 0
        snap_face = self._snap_mode & FACE != 0

        _Internal.gpu_Indices_enable_state()
        self._offscreen.bind()

        #bgl.glDisable(bgl.GL_DITHER) # dithering and AA break color coding, so disable #
        #multisample_enabled = bgl.glIsEnabled(bgl.GL_MULTISAMPLE)
        #bgl.glDisable(bgl.GL_MULTISAMPLE)
        bgl.glEnable(bgl.GL_DEPTH_TEST)

        proj_mat = self.rv3d.perspective_matrix.copy()
        if self.proj_mat != proj_mat:
            self.proj_mat = proj_mat
            _Internal.gpu_Indices_set_ProjectionMatrix(self.proj_mat)
            self.update_all()

        ray_dir, ray_orig = self.get_ray(mval)
        for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:],
                                     self.drawn_count):
            obj = snap_obj.data[0]
            bbmin = Vector(obj.bound_box[0])
            bbmax = Vector(obj.bound_box[6])

            if bbmin != bbmax:
                MVP = proj_mat * snap_obj.mat
                mat_inv = snap_obj.mat.inverted()
                ray_orig_local = mat_inv * ray_orig
                ray_dir_local = mat_inv.to_3x3() * ray_dir
                in_threshold = _Internal.intersect_boundbox_threshold(
                    self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)
            else:
                proj_co = _Internal.project_co_v3(self,
                                                  snap_obj.mat.translation)
                dist = self.mval - proj_co
                in_threshold = abs(dist.x) < self._dist_px and abs(
                    dist.y) < self._dist_px
                #snap_obj.data[1] = primitive_point

            if in_threshold:
                if len(snap_obj.data) == 1:
                    from .mesh_drawing import GPU_Indices_Mesh
                    snap_obj.data.append(
                        GPU_Indices_Mesh(obj, snap_face, snap_edge, snap_vert))
                snap_obj.data[1].set_draw_mode(snap_face, snap_edge, snap_vert)
                snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)
                snap_obj.data[1].Draw(self._offset_cur)
                self._offset_cur += snap_obj.data[1].get_tot_elems()

                self.snap_objects[self.drawn_count], self.snap_objects[
                    i] = self.snap_objects[i], self.snap_objects[
                        self.drawn_count]
                self.drawn_count += 1

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(
            int(self.mval[0]) - self._dist_px,
            int(self.mval[1]) - self._dist_px, self.threshold, self.threshold,
            bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)
        bgl.glReadBuffer(bgl.GL_BACK)

        snap_obj, index = self._get_nearest_index()
        #print(index)
        if snap_obj:
            ret = self._get_loc(snap_obj, index)

        self._offscreen.unbind()
        _Internal.gpu_Indices_restore_state()

        return snap_obj, ret[0], ret[1]
Example #24
0
    def modal(self, context, event):
        # if event.type in {'ESC'}:
        #     self.cancel(context)
        #     return {'CANCELLED'}

        if event.type == 'TIMER':

            if not self.source in bpy.data.texts:
                print(
                    f'File name {self.source} not found. Ready to create one')

                if self.file_exist(self.source):
                    print("It's a file")
                    bpy.ops.text.open(filepath=self.source)
                    self.fromFile = True
                else:
                    bpy.data.texts.new(self.source)
                    print("It's not a file, populate with the default shader")
                    bpy.data.texts[self.source].write(self.default_shader)

            recompile = False

            if not bpy.data.texts[self.source].is_in_memory and bpy.data.texts[
                    self.source].is_modified:
                print("Have been modify")
                #                bpy.ops.text.resolve_conflict(resolution='RELOAD')
                #                self.file_reload(self.source)
                text = bpy.data.texts[self.source]
                ctx = context.copy()
                #                ctx["edit_text"] = text
                #                bpy.ops.text.reload(ctx)
                #Ensure  context area is not None
                ctx['area'] = ctx['screen'].areas[0]
                oldAreaType = ctx['area'].type
                ctx['area'].type = 'TEXT_EDITOR'
                ctx['edit_text'] = text
                bpy.ops.text.resolve_conflict(ctx, resolution='RELOAD')
                #Restore context
                ctx['area'].type = oldAreaType

            if self.current_shader != bpy.data.texts[self.source].as_string():
                recompile = True

            if recompile:
                print("Recompile... ")

                fragment_shader = bpy.data.texts[self.source].as_string()
                self.current_shader = fragment_shader

                offscreen = gpu.types.GPUOffScreen(self.width, self.height)

                with offscreen.bind():
                    bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

                    try:
                        shader = gpu.types.GPUShader(self.vertex_shader,
                                                     fragment_shader)
                    except Exception as Err:
                        print(Err)
                        recompile = False

                    if recompile:
                        batch = batch_for_shader(
                            shader,
                            'TRI_FAN',
                            {
                                'a_position':
                                ((-1, -1), (1, -1), (1, 1), (-1, 1))
                            },
                        )

                        shader.bind()

                        #                        try:
                        #                            shader.uniform_float('u_time', bpy.context.scene.frame_float/bpy.context.scene.render.fps)
                        #                        except ValueError:
                        #                            print('Uniform: u_time not used')

                        try:
                            shader.uniform_float('u_resolution',
                                                 (self.width, self.height))
                        except ValueError:
                            print('Uniform: u_resolution not used')

                        batch.draw(shader)

                        buffer = bgl.Buffer(bgl.GL_BYTE,
                                            self.width * self.height * 4)
                        bgl.glReadBuffer(bgl.GL_BACK)
                        bgl.glReadPixels(0, 0, self.width, self.height,
                                         bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE,
                                         buffer)

                offscreen.free()

                if recompile:
                    print("Success recompiling")
                    name = self.source.split(".")[0]

                    if not name in bpy.data.images:
                        bpy.data.images.new(name, self.width, self.height)
                    image = bpy.data.images[name]
                    image.scale(self.width, self.height)
                    image.pixels = [v / 255 for v in buffer]

        return {'PASS_THROUGH'}
Example #25
0
    def snap_get(self, mval):
        ret = None, None
        self.mval[:] = mval
        snap_vert = self._snap_mode & VERT != 0
        snap_edge = self._snap_mode & EDGE != 0
        snap_face = self._snap_mode & FACE != 0

        _Internal.gpu_Indices_enable_state()
        self._offscreen.bind()

        #bgl.glDisable(bgl.GL_DITHER) # dithering and AA break color coding, so disable #
        #multisample_enabled = bgl.glIsEnabled(bgl.GL_MULTISAMPLE)
        #bgl.glDisable(bgl.GL_MULTISAMPLE)
        bgl.glEnable(bgl.GL_DEPTH_TEST)

        proj_mat = self.rv3d.perspective_matrix.copy()
        if self.proj_mat != proj_mat:
            self.proj_mat = proj_mat
            _Internal.gpu_Indices_set_ProjectionMatrix(self.proj_mat)
            self.update_all()

        ray_dir, ray_orig = self.get_ray(mval)
        for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:], self.drawn_count):
            obj = snap_obj.data[0]
            bbmin = Vector(obj.bound_box[0])
            bbmax = Vector(obj.bound_box[6])

            if bbmin != bbmax:
                MVP = proj_mat * snap_obj.mat
                mat_inv = snap_obj.mat.inverted()
                ray_orig_local = mat_inv * ray_orig
                ray_dir_local = mat_inv.to_3x3() * ray_dir
                in_threshold = _Internal.intersect_boundbox_threshold(self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)
            else:
                proj_co = _Internal.project_co_v3(self, snap_obj.mat.translation)
                dist = self.mval - proj_co
                in_threshold = abs(dist.x) < self._dist_px and abs(dist.y) < self._dist_px
                #snap_obj.data[1] = primitive_point

            if in_threshold:
                if len(snap_obj.data) == 1:
                    from .mesh_drawing import GPU_Indices_Mesh
                    snap_obj.data.append(GPU_Indices_Mesh(obj, snap_face, snap_edge, snap_vert))
                snap_obj.data[1].set_draw_mode(snap_face, snap_edge, snap_vert)
                snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)
                snap_obj.data[1].Draw(self._offset_cur)
                self._offset_cur += snap_obj.data[1].get_tot_elems()

                self.snap_objects[self.drawn_count], self.snap_objects[i] = self.snap_objects[i], self.snap_objects[self.drawn_count]
                self.drawn_count += 1

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(
                int(self.mval[0]) - self._dist_px, int(self.mval[1]) - self._dist_px,
                self.threshold, self.threshold, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)
        bgl.glReadBuffer(bgl.GL_BACK)

        snap_obj, index = self._get_nearest_index()
        #print(index)
        if snap_obj:
            ret = self._get_loc(snap_obj, index)

        self._offscreen.unbind()
        _Internal.gpu_Indices_restore_state()

        return snap_obj, ret[0], ret[1]
Example #26
0
def make_equirectangular_from_sky(base_path, sky_name):
    textures = [
        sky_name + "_up", sky_name + "_dn", sky_name + "_ft", sky_name + "_bk",
        sky_name + "_lf", sky_name + "_rt"
    ]
    cube = [None for x in range(6)]

    biggest_h = 1
    biggest_w = 1

    for index, tex in enumerate(textures):
        image = Image.load_file(base_path + "/" + tex)

        if image != None:
            cube[index] = image
            if image.gl_load():
                raise Exception()
            if biggest_h < image.size[1]:
                biggest_h = image.size[1]
            if biggest_w < image.size[0]:
                biggest_w = image.size[0]

    equi_w = min(8192, biggest_w * 4)
    equi_h = min(4096, biggest_h * 2)

    offscreen = gpu.types.GPUOffScreen(equi_w, equi_h)
    with offscreen.bind():
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
        with gpu.matrix.push_pop():
            # reset matrices -> use normalized device coordinates [-1, 1]
            gpu.matrix.load_matrix(Matrix.Identity(4))
            gpu.matrix.load_projection_matrix(Matrix.Identity(4))

            if cube[0] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE0)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[0].bindcode)
            if cube[1] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE1)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[1].bindcode)
            if cube[2] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE2)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[2].bindcode)
            if cube[3] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE3)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[3].bindcode)
            if cube[4] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE4)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[4].bindcode)
            if cube[5] != None:
                bgl.glActiveTexture(bgl.GL_TEXTURE5)
                bgl.glBindTexture(bgl.GL_TEXTURE_2D, cube[5].bindcode)

            #now draw
            shader.bind()
            shader.uniform_int("tex_up", 0)
            shader.uniform_int("tex_dn", 1)
            shader.uniform_int("tex_ft", 2)
            shader.uniform_int("tex_bk", 3)
            shader.uniform_int("tex_lf", 4)
            shader.uniform_int("tex_rt", 5)
            shader.uniform_float("clamp_value", 1.0 / biggest_h)
            batch.draw(shader)

        buffer = bgl.Buffer(bgl.GL_FLOAT, equi_w * equi_h * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, equi_w, equi_h, bgl.GL_RGBA, bgl.GL_FLOAT,
                         buffer)

    offscreen.free()

    image = bpy.data.images.get(sky_name)
    if image == None:
        image = bpy.data.images.new(sky_name, width=equi_w, height=equi_h)
    image.scale(equi_w, equi_h)
    image.pixels = buffer
    image.pack()
    return image
def render_main(self, context, animation=False):

    # Save old info
    scene = context.scene
    sceneProps= scene.MeasureItArchProps
    sceneProps.is_render_draw = True
    bgl.glEnable(bgl.GL_MULTISAMPLE)

    clipdepth = context.scene.camera.data.clip_end
    objlist = context.view_layer.objects

    # --------------------
    # Get resolution
    # --------------------

    render_scale = scene.render.resolution_percentage / 100
    width = int(scene.render.resolution_x * render_scale)
    height = int(scene.render.resolution_y * render_scale)


    # --------------------------------------
    # Draw all lines in Offsecreen
    # --------------------------------------
    offscreen = gpu.types.GPUOffScreen(width, height)
    
    view_matrix = Matrix([
        [2 / width, 0, 0, -1],
        [0, 2 / height, 0, -1],
        [0, 0, 1, 0],
        [0, 0, 0, 1]])

    view_matrix_3d = scene.camera.matrix_world.inverted()
    projection_matrix = scene.camera.calc_matrix_camera(context.view_layer.depsgraph, x=width, y=height)
    
    with offscreen.bind():
        # Clear Depth Buffer, set Clear Depth to Cameras Clip Distance
        bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)
        bgl.glClearDepth(clipdepth)
        bgl.glEnable(bgl.GL_DEPTH_TEST)
        bgl.glDepthFunc(bgl.GL_LEQUAL)  

        gpu.matrix.reset()
        gpu.matrix.load_matrix(view_matrix_3d)
        gpu.matrix.load_projection_matrix(projection_matrix)

        draw_scene(self, context, projection_matrix) 

        
        # Clear Color Keep on depth info
        bgl.glClearColor(0,0,0,0)
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

        # -----------------------------
        # Loop to draw all objects
        # -----------------------------
        for myobj in objlist:
            if myobj.visible_get() is True:
                mat = myobj.matrix_world

                sheetGen = myobj.SheetGenerator
                for sheet_view in sheetGen.sheet_views:
                    draw_sheet_views(context,myobj,sheetGen,sheet_view,mat)

                if 'DimensionGenerator' in myobj:
                    measureGen = myobj.DimensionGenerator[0]
                    if 'alignedDimensions' in measureGen:
                        for linDim in measureGen.alignedDimensions:
                            draw_alignedDimension(context, myobj, measureGen,linDim,mat)
                    if 'angleDimensions' in measureGen:
                        for dim in measureGen.angleDimensions:
                            draw_angleDimension(context, myobj, measureGen,dim,mat)
                    if 'axisDimensions' in measureGen:
                        for dim in measureGen.axisDimensions:
                            draw_axisDimension(context, myobj, measureGen,dim,mat)
                    if 'boundsDimensions' in measureGen:
                        for dim in measureGen.boundsDimensions:
                            draw_boundsDimension(context, myobj, measureGen,dim,mat)
                    if 'arcDimensions' in measureGen:
                        for dim in measureGen.arcDimensions:
                            draw_arcDimension(context, myobj, measureGen,dim,mat)
                    if 'areaDimensions' in measureGen:
                        for dim in measureGen.areaDimensions:
                            draw_areaDimension(context, myobj, measureGen,dim,mat)

                if 'LineGenerator' in myobj:
                    # Set 3D Projection Martix
                    gpu.matrix.reset()
                    gpu.matrix.load_matrix(view_matrix_3d)
                    gpu.matrix.load_projection_matrix(projection_matrix)

                    # Draw Line Groups
                    op = myobj.LineGenerator[0]
                    draw_line_group(context, myobj, op, mat)
             
                if 'AnnotationGenerator' in myobj:
                    # Set 3D Projection Martix
                    gpu.matrix.reset()
                    gpu.matrix.load_matrix(view_matrix_3d)
                    gpu.matrix.load_projection_matrix(projection_matrix)

                    # Draw Line Groups
                    op = myobj.AnnotationGenerator[0]
                    draw_annotation(context, myobj, op, mat)                
       
        # Draw Instance 
        deps = bpy.context.view_layer.depsgraph
        for obj_int in deps.object_instances:
            if obj_int.is_instance:
                myobj = obj_int.object
                mat = obj_int.matrix_world

                if 'LineGenerator' in myobj:
                    lineGen = myobj.LineGenerator[0]
                    draw_line_group(context,myobj,lineGen,mat)
                
                if sceneProps.instance_dims:
                    if 'AnnotationGenerator' in myobj:
                        annotationGen = myobj.AnnotationGenerator[0]
                        draw_annotation(context,myobj,annotationGen,mat)

                    if 'DimensionGenerator' in myobj:
                        DimGen = myobj.DimensionGenerator[0]
                        for alignedDim in DimGen.alignedDimensions:
                            draw_alignedDimension(context, myobj, DimGen, alignedDim,mat)
                        for angleDim in DimGen.angleDimensions:
                            draw_angleDimension(context, myobj, DimGen, angleDim,mat)
                        for axisDim in DimGen.axisDimensions:
                            draw_axisDimension(context,myobj,DimGen,axisDim,mat)
        

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
    offscreen.free()

    
    # -----------------------------
    # Create image
    # -----------------------------
    image_name = "measureit_arch_output"
    if image_name not in bpy.data.images:
        bpy.data.images.new(image_name, width, height)

    image = bpy.data.images[image_name]
    image.scale(width, height)
    image.pixels = [v / 255 for v in buffer]

    # Saves image
    if image is not None and (scene.measureit_arch_render is True or animation is True):
        ren_path = bpy.context.scene.render.filepath
        filename = "mit_frame"
        ftxt = "%04d" % scene.frame_current
        outpath = (ren_path + filename + ftxt + '.png')
        save_image(self, outpath, image)

    # restore default value
    sceneProps.is_render_draw = False
    return True, buffer
Example #28
0
    def snap_get(self, mval, main_snap_obj = None):
        ret = None, None, None
        self.mval[:] = mval
        snap_vert = self._snap_mode & VERT != 0
        snap_edge = self._snap_mode & EDGE != 0
        snap_face = self._snap_mode & FACE != 0

        _Internal.gpu_Indices_enable_state()
        self._offscreen.bind()

        #bgl.glDisable(bgl.GL_DITHER) # dithering and AA break color coding, so disable #
        #multisample_enabled = bgl.glIsEnabled(bgl.GL_MULTISAMPLE)
        #bgl.glDisable(bgl.GL_MULTISAMPLE)
        bgl.glEnable(bgl.GL_DEPTH_TEST)

        proj_mat = self.rv3d.perspective_matrix.copy()
        if self.proj_mat != proj_mat:
            self.proj_mat = proj_mat
            _Internal.gpu_Indices_set_ProjectionMatrix(self.proj_mat)
            self.update_drawing()

        ray_dir, ray_orig = self.get_ray(mval)
        for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:], self.drawn_count):
            obj = snap_obj.data[0]
            try:
                bbmin = Vector(obj.bound_box[0])
                bbmax = Vector(obj.bound_box[6])
            except ReferenceError:
                self.snap_objects.remove(snap_obj)
                continue

            if bbmin != bbmax:
                MVP = proj_mat @ snap_obj.mat
                mat_inv = snap_obj.mat.inverted()
                ray_orig_local = mat_inv @ ray_orig
                ray_dir_local = mat_inv.to_3x3() @ ray_dir
                in_threshold = _Internal.intersect_boundbox_threshold(
                        self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)
            else:
                proj_co = _Internal.project_co_v3(self, snap_obj.mat.translation)
                dist = self.mval - proj_co
                in_threshold = abs(dist.x) < self._dist_px and abs(dist.y) < self._dist_px
                #snap_obj.data[1] = primitive_point

            if in_threshold:
                if len(snap_obj.data) == 1:
                    from .mesh_drawing import GPU_Indices_Mesh
                    is_bound = obj.display_type == 'BOUNDS'
                    draw_face = snap_face and not is_bound and obj.display_type != 'WIRE'
                    draw_edge = snap_edge and not is_bound
                    draw_vert = snap_vert and not is_bound
                    snap_obj.data.append(GPU_Indices_Mesh(self.depsgraph, obj, draw_face, draw_edge, draw_vert))

                snap_obj.data[1].set_draw_mode(snap_face, snap_edge, snap_vert)
                snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)

                if snap_obj == main_snap_obj:
                    snap_obj.data[1].Draw(self._offset_cur, -0.0001)
                else:
                    snap_obj.data[1].Draw(self._offset_cur)
                self._offset_cur += snap_obj.data[1].get_tot_elems()

                tmp = self.snap_objects[self.drawn_count]
                self.snap_objects[self.drawn_count] = self.snap_objects[i]
                self.snap_objects[i] = tmp

                self.drawn_count += 1

        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)

        bgl.glReadPixels(
                int(self.mval[0]) - self._dist_px, int(self.mval[1]) - self._dist_px,
                self.threshold, self.threshold, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)

        #bgl.glReadBuffer(bgl.GL_BACK)
        #import numpy as np
        #a = np.array(self._snap_buffer)
        #print(a)

        snap_obj, index = self._get_nearest_index()
        #print("index:", index)
        if snap_obj:
            ret = self._get_loc(snap_obj, index)

        bgl.glDisable(bgl.GL_DEPTH_TEST)

        self._offscreen.unbind()
        _Internal.gpu_Indices_restore_state()

        return (snap_obj, *ret)
Example #29
0
def get_pixel_data_from_current_back_buffer(width, height):
    buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
    bgl.glReadBuffer(bgl.GL_BACK)
    bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
    return buffer
def render_main(self, context, animation=False):

    # Save old info
    bgl.glEnable(bgl.GL_MULTISAMPLE)
    settings = bpy.context.scene.render.image_settings
    depth = settings.color_depth
    settings.color_depth = '16'

    scene = context.scene
    clipdepth = context.scene.camera.data.clip_end
    path = scene.render.filepath
    objlist = context.view_layer.objects

    # --------------------
    # Get resolution
    # --------------------

    render_scale = scene.render.resolution_percentage / 100
    width = int(scene.render.resolution_x * render_scale)
    height = int(scene.render.resolution_y * render_scale)


    # --------------------------------------
    # Draw all lines in Offsecreen
    # --------------------------------------
    offscreen = gpu.types.GPUOffScreen(width, height)
    
    view_matrix = Matrix([
        [2 / width, 0, 0, -1],
        [0, 2 / height, 0, -1],
        [0, 0, 1, 0],
        [0, 0, 0, 1]])

    view_matrix_3d = scene.camera.matrix_world.inverted()
    projection_matrix = scene.camera.calc_matrix_camera(context.view_layer.depsgraph, x=width, y=height)
    scene.measureit_arch_is_render_draw = True
    with offscreen.bind():
        # Clear Depth Buffer, set Clear Depth to Cameras Clip Distance
        bgl.glClearDepth(clipdepth)
        bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)

        # Draw Scene If Necessary
        if scene.measureit_arch_use_depth_clipping is True:
            draw_scene(self, context, projection_matrix) 
        
        # Clear Color Keep on depth info
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

        # -----------------------------
        # Loop to draw all objects
        # -----------------------------
        for myobj in objlist:
            if myobj.visible_get() is True:
                if 'DimensionGenerator' in myobj:
                    measureGen = myobj.DimensionGenerator[0]
                    for linDim in measureGen.alignedDimensions:
                        draw_alignedDimension(context, myobj, measureGen,linDim)
                    for dim in measureGen.angleDimensions:
                        draw_angleDimension(context, myobj, measureGen,dim)

                if 'LineGenerator' in myobj:
                    # Set 3D Projection Martix
                    gpu.matrix.reset()
                    gpu.matrix.load_matrix(view_matrix_3d)
                    gpu.matrix.load_projection_matrix(projection_matrix)

                    # Draw Line Groups
                    op = myobj.LineGenerator[0]
                    draw_line_group(context, myobj, op)
             
                if 'AnnotationGenerator' in myobj:
                    # Set 3D Projection Martix
                    gpu.matrix.reset()
                    gpu.matrix.load_matrix(view_matrix_3d)
                    gpu.matrix.load_projection_matrix(projection_matrix)

                    # Draw Line Groups
                    op = myobj.AnnotationGenerator[0]
                    draw_annotation(context, myobj, op)                

        # -----------------------------
        # Loop to draw all debug
        # -----------------------------
        if scene.measureit_arch_debug is True:
            selobj = bpy.context.selected_objects
            for myobj in selobj:
                if scene.measureit_arch_debug_objects is True:
                    draw_object(context, myobj, None, None)
                elif scene.measureit_arch_debug_object_loc is True:
                    draw_object(context, myobj, None, None)
                if scene.measureit_arch_debug_vertices is True:
                    draw_vertices(context, myobj, None, None)
                elif scene.measureit_arch_debug_vert_loc is True:
                    draw_vertices(context, myobj, None, None)
                if scene.measureit_arch_debug_edges is True:
                    draw_edges(context, myobj, None, None)
                if scene.measureit_arch_debug_faces is True or scene.measureit_arch_debug_normals is True:
                    draw_faces(context, myobj, None, None)
        
        # -----------------------------
        # Draw a rectangle frame
        # -----------------------------
        if scene.measureit_arch_rf is True:
            rfcolor = scene.measureit_arch_rf_color
            rfborder = scene.measureit_arch_rf_border
            rfline = scene.measureit_arch_rf_line

            bgl.glLineWidth(rfline)
            x1 = rfborder
            x2 = width - rfborder
            y1 = int(ceil(rfborder / (width / height)))
            y2 = height - y1
            draw_rectangle((x1, y1), (x2, y2))

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
    offscreen.free()

    scene.measureit_arch_is_render_draw = False
    # -----------------------------
    # Create image
    # -----------------------------
    image_name = "measureit_arch_output"
    if image_name not in bpy.data.images:
        bpy.data.images.new(image_name, width, height)

    image = bpy.data.images[image_name]
    image.scale(width, height)
    image.pixels = [v / 255 for v in buffer]

    # Saves image
    if image is not None and (scene.measureit_arch_render is True or animation is True):
        ren_path = bpy.context.scene.render.filepath
        filename = "mit_frame"
        ftxt = "%04d" % scene.frame_current
        outpath = (ren_path + filename + ftxt + ".png")
        save_image(self, outpath, image)

    # restore default value
    settings.color_depth = depth
def color_to_KK(color, lut_name):
    width = 1
    height = 1

    # Some Sauce
    vertex_default = '''
    in vec2 a_position;

    void main() {
        gl_Position = vec4(a_position, 0.0, 1.0);
    }
    '''

    # The Secret Sauce
    current_code = '''
    uniform vec3 inputColor;
    uniform sampler2D lut;

    vec3 to_srgb(vec3 c){
        c.rgb = max( 1.055 * pow( c.rgb, vec3(0.416666667,0.416666667,0.416666667) ) - 0.055, 0 );
        return c;
    }

    void main() {
        vec3 color = inputColor / 255;
        
        const vec3 coord_scale = vec3(0.0302734375, 0.96875, 31.0);
        const vec3 coord_offset = vec3( 0.5/1024, 0.5/32, 0.0);
        const vec2 texel_height_X0 = vec2( 0.03125, 0.0 );
        
        vec3 coord = color * coord_scale + coord_offset;
        
        vec3 coord_frac = fract( coord );
        vec3 coord_floor = coord - coord_frac;
        vec2 coord_bot = coord.xy + coord_floor.zz * texel_height_X0;
        vec2 coord_top = coord_bot + texel_height_X0;

        vec3 lutcol_bot = texture( lut, coord_bot ).rgb;
        vec3 lutcol_top = texture( lut, coord_top ).rgb;
        
        vec3 lutColor = mix(lutcol_bot, lutcol_top, coord_frac.z);
        
        
        vec3 shaderColor = lutColor;
        
        gl_FragColor = vec4(shaderColor.rgb, 1);
    }
    '''

    # This object gives access to off screen buffers.
    offscreen = gpu.types.GPUOffScreen(width, height)

    # Context manager to ensure balanced bind calls, even in the case of an error.
    # Only run if valid
    with offscreen.bind():

        # Clear buffers to preset values
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

        # Initialize the shader
        # GPUShader combines multiple GLSL shaders into a program used for drawing.
        # It must contain a vertex and fragment shaders, with an optional geometry shader.
        shader = gpu.types.GPUShader(vertex_default, current_code)

        # Initialize the shader batch
        # It makes sure that all the vertex attributes necessary for a specific shader are provided.
        batch = batch_for_shader(
            shader,
            'TRI_FAN',
            {'a_position': ((-1, -1), (1, -1), (1, 1), (-1, 1))},
        )

        # Bind the shader object. Required to be able to change uniforms of this shader.
        shader.bind()

        try:
            # Specify the value of a uniform variable for the current program object.
            # In this case, a color tuple.
            shader.uniform_float('inputColor', color)
        except ValueError:
            pass

        try:
            lut_image = bpy.data.images[lut_name]

            # Make sure image has a bindcode
            if lut_image.bindcode == 0:
                for i in range(0, 20):
                    lut_image.gl_load()
                    if lut_image.bindcode != 0:
                        break

            # https://docs.blender.org/api/current/bgl.html
            bgl.glBindTexture(bgl.GL_TEXTURE_2D, lut_image.bindcode)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER,
                                bgl.GL_LINEAR)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER,
                                bgl.GL_LINEAR)

            # Specify the value of a uniform variable for the current program object.
            # In this case, an image.
            shader.uniform_int("lut", 0)
        except ValueError:
            pass

        # Run the drawing program with the parameters assigned to the batch.
        batch.draw(shader)

        # The Buffer object is simply a block of memory that is delineated and initialized by the user.
        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 3)

        # Select a color buffer source for pixels.
        bgl.glReadBuffer(bgl.GL_BACK)

        # Read a block of pixels from the frame buffer.
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGB, bgl.GL_UNSIGNED_BYTE,
                         buffer)

    # Free the offscreen object. The framebuffer, texture and render objects will no longer be accessible.
    offscreen.free()

    # Get and return the pixels from the final buffer
    final_color = [v for v in buffer]
    final_color = np.array(final_color).reshape(width, height, -1)
    return final_color[0][0]

    # RGBA (1, 1, 1, 1) to RGB as int (255, 255, 255)
    def RGBA_to_RGB_int(rgba):
        rgba = np.round(rgba * 255).astype('int')
        rgba = np.delete(rgba, 3)
        return rgba

    # Make sure not to select a transparent pixel
    def filter_pixel(index_list, image):
        for i in index_list:
            if image[i][3] == 1:
                index = i
                break
        return index

    ########## SETUP ##########

    exposure = np.array([exposure[0], exposure[1], exposure[2], 1])

    # Init MC Texture
    mc_mask_data = np.array(mc_mask.pixels).reshape(-1, 4)
    mc_mask_data = mc_mask_data * exposure
    mc_mask_data = np.clip(mc_mask_data, 0, 1)

    # Init Main Texture
    bpy.data.images[texture.name].scale(mc_mask.size[0], mc_mask.size[1])
    texture_data = np.array(texture.pixels).reshape(-1, 4)

    # Init MC Color indexes
    red_index = -1
    green_index = -1
    blue_index = -1

    # Init MainTex Colors
    red_color = -1
    green_color = -1
    blue_color = -1

    # Init converted colors (light)
    red_converted_color_light = np.array([255, 0, 0, 255])
    green_converted_color_light = np.array([0, 255, 0, 255])
    blue_converted_color_light = np.array([0, 0, 255, 255])

    ########## FIND MC INDEXES ##########
    r, g, b = mc_mask_data[:, 0], mc_mask_data[:, 1], mc_mask_data[:, 2]
    r = r.max()
    g = g.max()
    b = b.max()

    # Red
    pixel_list = np.where(np.all(mc_mask_data == (r, 0, 0, 1), axis=-1))[0]
    if len(pixel_list) > 0:
        red_index = filter_pixel(pixel_list, texture_data)

    # Green
    pixel_list = np.where(np.all(mc_mask_data >= (0, g, 0, 1), axis=-1))[0]
    if len(pixel_list) > 0:
        green_index = filter_pixel(pixel_list, texture_data)
    else:
        # Green (Yellow)
        pixel_list = np.where(np.all(mc_mask_data == (r, g, 0, 1), axis=-1))[0]
        if len(pixel_list) > 0:
            green_index = filter_pixel(pixel_list, texture_data)

    # Blue
    pixel_list = np.where(np.all(mc_mask_data == (0, 0, b, 1), axis=-1))[0]
    if len(pixel_list) > 0:
        blue_index = filter_pixel(pixel_list, texture_data)
    else:
        # Blue (Cyan)
        pixel_list = np.where(np.all(mc_mask_data == (0, g, b, 1), axis=-1))[0]
        if len(pixel_list) > 0:
            blue_index = filter_pixel(pixel_list, texture_data)
        else:
            # Blue (Magenta)
            pixel_list = np.where(np.all(mc_mask_data == (r, 0, b, 1),
                                         axis=-1))[0]
            if len(pixel_list) > 0:
                blue_index = filter_pixel(pixel_list, texture_data)
            else:
                # Blue (White)
                pixel_list = np.where(
                    np.all(mc_mask_data == (r, g, b, 1), axis=-1))[0]
                if len(pixel_list) > 0:
                    blue_index = filter_pixel(pixel_list, texture_data)

    ########## SCALE INDEXES ##########

    # mc_w, mc_h = mc_mask.size
    # tex_w, tex_h = texture.size

    # scale = int((tex_w * tex_h) / (mc_w * mc_h))

    # red_index = red_index * scale
    # green_index = green_index * scale
    # blue_index = blue_index * scale

    ########## GET AND CONVERT COLORS FROM MAIN TEXTURE ##########

    if red_index >= 0:
        red_color = texture_data[red_index]
        red_color = RGBA_to_RGB_int(red_color)

        red_converted_color_light = color_to_KK(red_color, lut)

    if green_index >= 0:
        green_color = texture_data[green_index]
        green_color = RGBA_to_RGB_int(green_color)

        green_converted_color_light = color_to_KK(green_color, lut)

    if blue_index >= 0:
        blue_color = texture_data[blue_index]
        blue_color = RGBA_to_RGB_int(blue_color)

        blue_converted_color_light = color_to_KK(blue_color, lut)

    # print(red_index)
    # print(green_index)
    # print(blue_index)

    # print(red_converted_color_light)
    # print(green_converted_color_light)
    # print(blue_converted_color_light)

    return red_converted_color_light / 255, green_converted_color_light / 255, blue_converted_color_light / 255
lib = ctypes.WinDLL(
    'F:/seniorproject/Visual Studio/BlenderToDepthMapDLL/x64/Debug/BlenderToDepthMapDLL.dll'
)

CreateDepthBufMapFile = lib[1]
WriteDepthMapBufFile = lib[7]
UnmapDepthBufFile = lib[6]

CreateDepthBufMapFile.restype = ctypes.c_void_p
WriteDepthMapBufFile.argtypes = [
    ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int
]
HarambePointer = CreateDepthBufMapFile(0, 0)

bgl.glReadBuffer(bgl.GL_FRONT)

# allocate 4 integers to capture the box (x,y,width,height) of the GL_FRONT
b = bgl.Buffer(bgl.GL_INT, 4)

# capture the GL_FRONT bounding box
bgl.glGetIntegerv(bgl.GL_VIEWPORT, b)

pix = bgl.Buffer(bgl.GL_FLOAT, b[2] * b[3])
print((b[2], b[3]))
print((b[2] * b[3]))


def ReadOutDepthMap():
    global HarambePointer
    global b
Example #33
0
 def drawBoltGPU(self, lines, pixels, coord, w, h):
     vertexSource = '''
         in vec3 position;
         void main() 
         {
             gl_Position = vec4(position, 1.0);
         }
         '''
     geometrySource = '''
         layout(lines) in;
         layout(triangle_strip, max_vertices = 4) out;
         void main() 
         {
             float width = gl_in[1].gl_Position.z;
             vec2 line = gl_in[1].gl_Position.xy - gl_in[0].gl_Position.xy;
             vec2 normal = normalize(vec2(line.y, -line.x))*(width/2.0);
             for(int i=0; i<4; i++)
             {
                 vec2 coords = gl_in[i/2].gl_Position.xy+(1-2*(i%2))*normal;
                 gl_Position = vec4(coords, 0.0, 1.0);
                 EmitVertex();
             }
             EndPrimitive();
         }
         '''
     fragmentSource = '''
         out vec4 fragColor;
         void main()
         {
             fragColor = vec4(1.0,1.0,1.0,1.0);
         }
         '''
     positions = []
     for segment in lines.segments:
         pts = lines.getCoords(segment)[0]
         ptA = np.divide(pts[0], np.array([w, h])) * 2 - 1.0
         ptB = np.divide(pts[1], np.array([w, h])) * 2 - 1.0
         width = bl_math.clamp(
             self.thickness *
             (1.0 - (segment.level / (self.falloff * lines.getMaxLevel()))),
             0, self.thickness) / w
         positions.append((ptA[0], ptA[1], width))
         positions.append((ptB[0], ptB[1], width))
     offscreen = gpu.types.GPUOffScreen(w, h)
     shaders = gpu.types.GPUShader(vertexSource,
                                   fragmentSource,
                                   geocode=geometrySource)
     batch = batch_for_shader(shaders, 'LINES',
                              {"position": tuple(positions)})
     with offscreen.bind():
         bgl.glClearColor(0.0, 0.0, 0.0, 1.0)
         bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
         with gpu.matrix.push_pop():
             gpu.matrix.load_matrix(mathutils.Matrix.Identity(4))
             gpu.matrix.load_projection_matrix(mathutils.Matrix.Identity(4))
             shaders.bind()
             batch.draw(shaders)
         buffer = bgl.Buffer(bgl.GL_FLOAT, w * h * 4)
         bgl.glReadBuffer(bgl.GL_BACK)
         bgl.glReadPixels(0, 0, w, h, bgl.GL_RGBA, bgl.GL_FLOAT, buffer)
     pixels.foreach_set(buffer)
     offscreen.free()
def image_to_KK(image, lut_name):
    width = image.size[0]
    height = image.size[1]

    # Some Sauce
    vertex_default = '''
    in vec2 a_position;
    in vec2 a_texcoord;

    void main() {
        gl_Position = vec4(a_position, 0.0, 1.0);
    }
    '''

    # The Secret Sauce
    current_code = '''
    uniform sampler2D tex0;
    uniform sampler2D lut;
    uniform vec2    u_resolution;

    vec3 to_srgb(vec3 c){
        c.rgb = max( 1.055 * pow( c.rgb, vec3(0.416666667,0.416666667,0.416666667) ) - 0.055, 0 );
        return c;
    }
    
    vec3 apply_lut(vec3 color) {
        const vec3 coord_scale = vec3(0.0302734375, 0.96875, 31.0);
        const vec3 coord_offset = vec3( 0.5/1024, 0.5/32, 0.0);
        const vec2 texel_height_X0 = vec2( 0.03125, 0.0 );
        
        vec3 coord = color * coord_scale + coord_offset;
        
        vec3 coord_frac = fract( coord );
        vec3 coord_floor = coord - coord_frac;
        vec2 coord_bot = coord.xy + coord_floor.zz * texel_height_X0;
        vec2 coord_top = coord_bot + texel_height_X0;

        vec3 lutcol_bot = texture2D( lut, coord_bot ).rgb;
        vec3 lutcol_top = texture2D( lut, coord_top ).rgb;
        
        vec3 lutColor = mix(lutcol_bot, lutcol_top, coord_frac.z);
        
        return lutColor;
    }

    void main() {
        vec4 texRGBA = texture2D(tex0, gl_FragCoord.xy / u_resolution);

        vec3 texColor = to_srgb(texRGBA.rgb);

        vec3 newColor = apply_lut(texColor);

        newColor = to_srgb(newColor);
        
        gl_FragColor = vec4(newColor.rgb, texRGBA.a);
    }
    '''

    # This object gives access to off screen buffers.
    offscreen = gpu.types.GPUOffScreen(width, height)

    # Context manager to ensure balanced bind calls, even in the case of an error.
    # Only run if valid
    with offscreen.bind():

        # Clear buffers to preset values
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

        # Initialize the shader
        # GPUShader combines multiple GLSL shaders into a program used for drawing.
        # It must contain a vertex and fragment shaders, with an optional geometry shader.
        shader = gpu.types.GPUShader(vertex_default, current_code)

        # Initialize the shader batch
        # It makes sure that all the vertex attributes necessary for a specific shader are provided.
        batch = batch_for_shader(
            shader,
            'TRI_FAN',
            {'a_position': ((-1, -1), (1, -1), (1, 1), (-1, 1))},
        )

        # Bind the shader object. Required to be able to change uniforms of this shader.
        shader.bind()

        bgl.glUniform1i(bgl.glGetUniformLocation(shader.program, "tex0"), 0)
        bgl.glUniform1i(bgl.glGetUniformLocation(shader.program, "lut"), 1)

        try:
            # Make sure image has a bindcode
            if image.bindcode == 0:
                for i in range(0, 20):
                    image.gl_load()
                    if image.bindcode != 0:
                        break

            # https://docs.blender.org/api/current/bgl.html
            bgl.glActiveTexture(bgl.GL_TEXTURE0)
            bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER,
                                bgl.GL_LINEAR)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER,
                                bgl.GL_LINEAR)

            # Specify the value of a uniform variable for the current program object.
            # In this case, an image.
            shader.uniform_int("tex0", 0)
        except ValueError:
            pass

        try:
            lut_image = bpy.data.images[lut_name]

            # Make sure image has a bindcode
            if lut_image.bindcode == 0:
                for i in range(0, 20):
                    lut_image.gl_load()
                    if lut_image.bindcode != 0:
                        break

            # https://docs.blender.org/api/current/bgl.html
            bgl.glActiveTexture(bgl.GL_TEXTURE1)
            bgl.glBindTexture(bgl.GL_TEXTURE_2D, lut_image.bindcode)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T,
                                bgl.GL_CLAMP_TO_EDGE)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER,
                                bgl.GL_LINEAR)
            bgl.glTexParameterf(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER,
                                bgl.GL_LINEAR)

            # Specify the value of a uniform variable for the current program object.
            # In this case, an image.
            shader.uniform_int("lut", 1)
        except ValueError:
            pass

        try:
            shader.uniform_float('u_resolution', (width, height))
        except ValueError:
            pass

        # Run the drawing program with the parameters assigned to the batch.
        batch.draw(shader)

        # The Buffer object is simply a block of memory that is delineated and initialized by the user.
        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)

        # Select a color buffer source for pixels.
        bgl.glReadBuffer(bgl.GL_BACK)

        # Read a block of pixels from the frame buffer.
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                         bgl.GL_UNSIGNED_BYTE, buffer)

    # Free the offscreen object. The framebuffer, texture and render objects will no longer be accessible.
    offscreen.free()

    # Return the final buffer-pixels
    pixels = [v / 255 for v in buffer]
    return pixels, width, height
Example #35
0
def render_map(self, context):
    import tempfile

    image_name = "Gem Map"
    width = self.width
    height = self.height
    ratio_w = width / self.region.width
    ratio_h = height / self.region.height
    padding = 30
    x = padding
    y = height - padding
    temp_filepath = os.path.join(tempfile.gettempdir(), "gem_map_temp.png")

    asset.render_preview(width,
                         height,
                         temp_filepath,
                         compression=15,
                         gamma=2.2)
    render_image = load_image(temp_filepath)
    render_image.gl_load()

    mat_offscreen = Matrix()
    mat_offscreen[0][0] = 2 / width
    mat_offscreen[0][3] = -1
    mat_offscreen[1][1] = 2 / height
    mat_offscreen[1][3] = -1

    shader = gpu.shader.from_builtin("2D_UNIFORM_COLOR")
    shader_img = gpu.shader.from_builtin("2D_IMAGE")
    offscreen = gpu.types.GPUOffScreen(width, height)

    with offscreen.bind():
        bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

        with gpu.matrix.push_pop():
            gpu.matrix.load_matrix(mat_offscreen)
            gpu.matrix.load_projection_matrix(Matrix())

            # Background
            # --------------------------------

            shader.bind()
            shader.uniform_float("color", (1.0, 1.0, 1.0, 1.0))
            batch = batch_for_shader(
                shader, "TRI_FAN",
                {"pos": self.rect_coords(0, 0, width, height)})
            batch.draw(shader)

            # Render result
            # --------------------------------

            bgl.glEnable(bgl.GL_BLEND)

            bgl.glActiveTexture(bgl.GL_TEXTURE0)
            bgl.glBindTexture(bgl.GL_TEXTURE_2D, render_image.bindcode)

            shader_img.bind()
            shader_img.uniform_int("image", 0)

            args = {
                "pos": self.rect_coords(0, 0, width, height),
                "texCoord": self.rect_coords(0, 0, 1, 1),
            }

            batch = batch_for_shader(shader_img, "TRI_FAN", args)
            batch.draw(shader_img)

            # Gem map
            # --------------------------------

            self.draw_gems(context, ratio_w=ratio_w, ratio_h=ratio_h)
            self.onscreen_gem_table(x, y, color=(0.0, 0.0, 0.0, 1.0))

        buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
        bgl.glReadBuffer(bgl.GL_BACK)
        bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                         bgl.GL_UNSIGNED_BYTE, buffer)

    offscreen.free()

    if image_name not in bpy.data.images:
        bpy.data.images.new(image_name, width, height)

    image = bpy.data.images[image_name]
    image.scale(width, height)
    image.pixels = [v / 255 for v in buffer]

    if self.use_save and bpy.data.is_saved:
        filepath = bpy.data.filepath
        filename = os.path.splitext(os.path.basename(filepath))[0]
        save_path = os.path.join(os.path.dirname(filepath),
                                 filename + " Gem Map.png")

        image.filepath_raw = save_path
        image.file_format = "PNG"
        image.save()

    render_image.gl_free()
    bpy.data.images.remove(render_image)

    if os.path.exists(temp_filepath):
        os.remove(temp_filepath)

    # Restore OpenGL defaults
    # ----------------------------

    bgl.glDisable(bgl.GL_BLEND)

    # Show in a new window
    # ----------------------------

    asset.show_window(width, height, space_data={"image": image})
Example #36
0
    def execute(self, context):
        bgl.glEnable(bgl.GL_PROGRAM_POINT_SIZE)

        scene = context.scene
        render = scene.render
        image_settings = render.image_settings

        original_depth = image_settings.color_depth
        image_settings.color_depth = '8'

        scale = render.resolution_percentage / 100
        width = int(render.resolution_x * scale)
        height = int(render.resolution_y * scale)

        pcv = context.object.point_cloud_visualizer
        cloud = PCVManager.cache[pcv.uuid]
        cam = scene.camera
        if (cam is None):
            self.report({'ERROR'}, "No camera found.")
            return {'CANCELLED'}

        render_suffix = pcv.render_suffix
        render_zeros = pcv.render_zeros

        offscreen = GPUOffScreen(width, height)
        offscreen.bind()
        try:
            gpu.matrix.load_matrix(Matrix.Identity(4))
            gpu.matrix.load_projection_matrix(Matrix.Identity(4))

            bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)

            o = cloud['object']
            vs = cloud['vertices']
            cs = cloud['colors']
            ns = cloud['normals']

            dp = pcv.render_display_percent
            l = int((len(vs) / 100) * dp)
            if (dp >= 99):
                l = len(vs)
            vs = vs[:l]
            cs = cs[:l]
            ns = ns[:l]

            # sort by depth
            mw = o.matrix_world
            depth = []
            for i, v in enumerate(vs):
                vw = mw @ Vector(v)
                depth.append(world_to_camera_view(scene, cam, vw)[2])
            zps = zip(depth, vs, cs, ns)
            sps = sorted(zps, key=lambda a: a[0])
            # split and reverse
            vs = [a for _, a, b, c in sps][::-1]
            cs = [b for _, a, b, c in sps][::-1]
            ns = [c for _, a, b, c in sps][::-1]

            shader = GPUShader(vertex_shader, fragment_shader)
            batch = batch_for_shader(shader, 'POINTS', {
                "position": vs,
                "color": cs,
                "normal": ns,
            })
            shader.bind()

            view_matrix = cam.matrix_world.inverted()
            camera_matrix = cam.calc_matrix_camera(
                bpy.context.depsgraph,
                x=render.resolution_x,
                y=render.resolution_y,
                scale_x=render.pixel_aspect_x,
                scale_y=render.pixel_aspect_y,
            )
            perspective_matrix = camera_matrix @ view_matrix

            shader.uniform_float("perspective_matrix", perspective_matrix)
            shader.uniform_float("object_matrix", o.matrix_world)
            shader.uniform_float("point_size", pcv.render_point_size)
            shader.uniform_float("alpha_radius", pcv.alpha_radius)

            if (pcv.light_enabled and pcv.has_normals):
                cm = Matrix((
                    (
                        -1.0,
                        0.0,
                        0.0,
                        0.0,
                    ),
                    (
                        0.0,
                        -0.0,
                        1.0,
                        0.0,
                    ),
                    (
                        0.0,
                        -1.0,
                        -0.0,
                        0.0,
                    ),
                    (
                        0.0,
                        0.0,
                        0.0,
                        1.0,
                    ),
                ))
                _, obrot, _ = o.matrix_world.decompose()
                mr = obrot.to_matrix().to_4x4()
                mr.invert()
                direction = cm @ pcv.light_direction
                direction = mr @ direction
                shader.uniform_float("light_direction", direction)

                inverted_direction = direction.copy()
                inverted_direction.negate()

                c = pcv.light_intensity
                shader.uniform_float("light_intensity", (
                    c,
                    c,
                    c,
                ))
                shader.uniform_float("shadow_direction", inverted_direction)
                c = pcv.shadow_intensity
                shader.uniform_float("shadow_intensity", (
                    c,
                    c,
                    c,
                ))
                shader.uniform_float("show_normals", float(pcv.show_normals))
            else:
                z = (0, 0, 0)
                shader.uniform_float("light_direction", z)
                shader.uniform_float("light_intensity", z)
                shader.uniform_float("shadow_direction", z)
                shader.uniform_float("shadow_intensity", z)
                shader.uniform_float("show_normals", float(False))

            batch.draw(shader)

            buffer = bgl.Buffer(bgl.GL_BYTE, width * height * 4)
            bgl.glReadBuffer(bgl.GL_BACK)
            bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA,
                             bgl.GL_UNSIGNED_BYTE, buffer)

        except Exception as e:
            self.report({'ERROR'}, str(e))
            return {'CANCELLED'}

        finally:
            offscreen.unbind()
            offscreen.free()

        # image from buffer
        image_name = "pcv_output"
        if (image_name not in bpy.data.images):
            bpy.data.images.new(image_name, width, height)
        image = bpy.data.images[image_name]
        image.scale(width, height)
        image.pixels = [v / 255 for v in buffer]

        # save as image file
        save_render(
            self,
            scene,
            image,
            render_suffix,
            render_zeros,
        )

        # restore
        image_settings.color_depth = original_depth

        return {'FINISHED'}