Example #1
0
    def do_gst_gl_filter_set_caps(self, in_caps, out_caps):
        in_info = GstVideo.VideoInfo()
        in_info.from_caps(in_caps)

        out_info = GstVideo.VideoInfo()
        out_info.from_caps(out_caps)

        in_ratio = in_info.width / in_info.height
        out_ratio = out_info.width / out_info.height

        if in_ratio > out_ratio:
            w = out_info.width
            h = out_info.width / in_ratio
            x = 0
            y = (out_info.height - h) / 2
        elif in_ratio < out_ratio:
            w = out_info.height * in_ratio
            h = out_info.height
            x = (out_info.width - w) / 2
            y = 0
        else:
            w = out_info.width
            h = out_info.height
            x = 0
            y = 0

        self.x = int(x)
        self.y = int(y)
        self.w = int(w)
        self.h = int(h)
        self.scale_x = self.w / out_info.width
        self.scale_y = self.h / out_info.height
        return True
Example #2
0
 def __init__(self, buffer, caps=None):
     self.buffer = buffer
     self.caps = caps
     self.caps_str = self.caps.get_structure(0)
     self.video_meta = GstVideo.buffer_get_video_meta(buffer)
     if (not self.video_meta):
         self.video_meta = GstVideo.VideoInfo()
         self.video_meta.from_caps(self.caps)
     self.width = self.video_meta.width
     self.height = self.video_meta.height
     self.format_str = self.caps_str.get_string("format")
     self.channels = VideoFrame.FORMAT_CHANNELS[self.format_str]
Example #3
0
def event_probe(pad, info, pdata):
    event = info.get_event()
    evtype = GstVideo.navigation_event_get_type(event)
    if evtype == GstVideo.NavigationEventType.MOUSE_MOVE:
        foo = GstVideo.navigation_event_parse_mouse_move_event(event)
        print(foo)
    if evtype == GstVideo.NavigationEventType.KEY_PRESS or evtype == GstVideo.NavigationEventType.KEY_RELEASE:
        foo = GstVideo.navigation_event_parse_key_event(event)
        print(foo)
    if evtype == GstVideo.NavigationEventType.MOUSE_BUTTON_PRESS or evtype == GstVideo.NavigationEventType.MOUSE_BUTTON_RELEASE:
        foo = GstVideo.navigation_event_parse_mouse_button_event(event)
        print(foo)
    return Gst.PadProbeReturn.OK
Example #4
0
 def do_transform_ip(self, frame_buf):
     self.render()
     if self.composition:
         # Note: Buffer IS writable (ref is 1 in native land). However gst-python
         # took an additional ref so it's now 2 and gst_buffer_is_writable
         # returns false. We can't modify the buffer without fiddling with refcount.
         if frame_buf.mini_object.refcount != 2:
             return Gst.FlowReturn.ERROR
         frame_buf.mini_object.refcount -= 1
         GstVideo.buffer_add_video_overlay_composition_meta(
             frame_buf, self.composition)
         frame_buf.mini_object.refcount += 1
     return Gst.FlowReturn.OK
    def __init__(self, buffer: Gst.Buffer, video_info: GstVideo.VideoInfo = None, caps: Gst.Caps = None):
        self.__buffer = buffer
        self.__video_info = None

        if video_info:
            self.__video_info = video_info
        elif caps:
            self.__video_info = GstVideo.VideoInfo()
            self.__video_info.from_caps(caps)
        elif self.video_meta():
            self.__video_info = GstVideo.VideoInfo()
            self.__video_info.width = self.video_meta().width
            self.__video_info.height = self.video_meta().height
Example #6
0
    def requestKeyframe(self):
        print "request keyframe"
        self.pipelines['main'].send_event(
            GstVideo.video_event_new_upstream_force_key_unit(
                Gst.CLOCK_TIME_NONE, True, self.cnt))

        return True
    def render_svg(self, svg, buf):
        meta = GstVideo.buffer_get_video_meta(buf)
        if meta:
            assert meta.n_planes == 1
            assert meta.width == self.width
            assert meta.height == self.height
            assert meta.stride[0] >= self.min_stride
            stride = meta.stride[0]
        else:
            stride = self.min_stride

        with _gst_buffer_map(buf, Gst.MapFlags.WRITE) as mapped:
            assert len(mapped) >= stride * self.height

            # Fill with transparency.
            ctypes.memset(ctypes.addressof(mapped), 0, ctypes.sizeof(mapped))

            surface = libcairo.cairo_image_surface_create_for_data(
                ctypes.addressof(mapped), int(cairo.FORMAT_ARGB32), self.width,
                self.height, stride)

            # Render the SVG overlay.
            data = svg.encode('utf-8')
            context = libcairo.cairo_create(surface)
            handle = librsvg.rsvg_handle_new_from_data(data, len(data), 0)
            librsvg.rsvg_handle_render_cairo(handle, context)
            librsvg.rsvg_handle_close(handle, 0)
            libgobject.g_object_unref(handle)
            libcairo.cairo_surface_flush(surface)
            libcairo.cairo_surface_destroy(surface)
            libcairo.cairo_destroy(context)
Example #8
0
def gst_sample_to_pixbuf(sample):
    '''Converts the image from a given GstSample to a GdkPixbuf'''
    caps = Gst.Caps.from_string("video/x-raw,format=RGBA")
    converted_sample = GstVideo.video_convert_sample(sample, caps, Gst.CLOCK_TIME_NONE)

    buffer = converted_sample.get_buffer()
    pixbuf = buffer.extract_dup(0, buffer.get_size())
    
    caps = converted_sample.get_caps()
    struct = caps.get_structure(0)
    
    colorspace = GdkPixbuf.Colorspace.RGB
    alpha = True
    bps = 8
    width_struct = struct.get_int("width")
    assert width_struct[0]
    height_struct = struct.get_int("height")
    assert height_struct[0]
    original_width = width_struct[1]
    original_height = height_struct[1]

    rowstride_struct = struct.get_int("stride")
    if rowstride_struct[0] == True:
        # The stride information might be hidden in the struct.
        # For now it doesn't work. I think it's the name of the field.
        rowstride = rowstride_struct[1]
    else:
        rowstride = bps / 8 * 4 * original_width

    gdkpixbuf = GdkPixbuf.Pixbuf.new_from_bytes(
        GLib.Bytes.new_take(pixbuf),
        colorspace, alpha, bps, original_width,
        original_height, rowstride)
        
    return gdkpixbuf
Example #9
0
    def callback(app_sink, obj: 'ISource'):
        """
        This function will be called in a separate thread when our appsink
        says there is data for us. user_data has to be defined
        when calling g_signal_connect. It can be used to pass objects etc.
        from your other function to the callback.
        """
        sample = app_sink.emit("pull-sample")
        if sample:
            caps = sample.get_caps()
            gst_buffer = sample.get_buffer()
            try:
                (ret, buffer_map) = gst_buffer.map(Gst.MapFlags.READ)
                video_info = GstVideo.VideoInfo()
                video_info.from_caps(caps)

                np_data = np.frombuffer(buffer_map.data, np.uint8).reshape(
                    (video_info.height, video_info.width, 3))
                with obj.lock:
                    buffer = obj.buffer
                    if buffer is None or buffer.shape != np_data.shape:
                        obj.buffer = np_data.copy()
                    else:
                        np.copyto(buffer, np_data)
            finally:
                gst_buffer.unmap(buffer_map)

        return Gst.FlowReturn.OK
Example #10
0
 def request_iframe(self):
     src = self.pipe.get_by_name('iframe')
     event = GstVideo.video_event_new_downstream_force_key_unit(
         Gst.CLOCK_TIME_NONE, Gst.CLOCK_TIME_NONE, Gst.CLOCK_TIME_NONE,
         True, 0)
     src.send_event(event)
     src.unref()
 def __init__(self,
              engine,
              src_size,
              save_every_n_frames=-1,
              print_stats=False):
     self.engine = engine
     self.src_size = src_size
     self.save_every_n_frames = save_every_n_frames
     self.print_stats = print_stats
     self.inf_q = queue.SimpleQueue()
     self.trash = queue.SimpleQueue()
     self.trash_lock = threading.RLock()
     self.vinfo = GstVideo.VideoInfo()
     self.glcontext = None
     self.pool = None
     self.fbo = None
     self.default_shader = None
     self.hm_shader = None
     self.hm_tex_id = 0  # Instantaneous heatmap
     self.vao_id = 0
     self.positions_buffer = 0
     self.texcoords_buffer = 0
     self.vbo_indices_buffer = 0
     self.frames = 0
     self.reset_display_toggles()
     self.inf_times = collections.deque(maxlen=100)
     self.agg_times = collections.deque(maxlen=100)
     self.frame_times = collections.deque(maxlen=100)
     self.running = True
     self.gc_thread = threading.Thread(target=self.gc_loop)
     self.gc_thread.start()
     self.last_frame_time = time.monotonic()
Example #12
0
    def do_keyframe(self, user_data):
        # Forces a keyframe on all video encoders
        event = GstVideo.video_event_new_downstream_force_key_unit(
            self.clock.get_time(), 0, 0, True, 0)
        self.pipeline.send_event(event)

        return True
    def blockOnNextKeyframe(self):       
            
        tp = self.elements["srcQueue"].get_static_pad("sink")    
#         tp = self.elements["queue_preview"].get_static_pad("sink")    
        self.pipelines['main'].send_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE, True, self.cnt))  
        tp.add_probe(Gst.PadProbeType.BUFFER, blockActiveQueuePad, self)
                
        return True
Example #14
0
 def send_event(self):
     self.count = self.count + 1
     #if self.count > 2147483640:
     #self.count = 0
     pushed = self.appsink_pad.push_event(
         GstVideo.video_event_new_upstream_force_key_unit(
             Gst.CLOCK_TIME_NONE, True, self.count))
     print "key unit event sent... {0}".format(pushed)
Example #15
0
    def render(self):
        if not self.svg:
            self.composition = None
            self.rendered_svg = None
            return

        if self.svg == self.rendered_svg:
            return

        overlay_size = self.render_size * self.scale_factor
        stride = libcairo.cairo_format_stride_for_width(
            int(cairo.FORMAT_ARGB32), overlay_size.width)
        overlay_buffer = Gst.Buffer.new_allocate(None,
                                                 stride * overlay_size.height)
        with _gst_buffer_map(overlay_buffer, Gst.MapFlags.WRITE) as mapped:
            # Fill with transparency and create surface from buffer.
            ctypes.memset(ctypes.addressof(mapped), 0, ctypes.sizeof(mapped))
            surface = libcairo.cairo_image_surface_create_for_data(
                ctypes.addressof(mapped), int(cairo.FORMAT_ARGB32),
                overlay_size.width, overlay_size.height, stride)

            # Render the SVG overlay.
            data = self.svg.encode('utf-8')
            context = libcairo.cairo_create(surface)
            libcairo.cairo_scale(context, self.scale_factor, self.scale_factor)
            handle = librsvg.rsvg_handle_new_from_data(data, len(data), 0)
            librsvg.rsvg_handle_render_cairo(handle, context)
            librsvg.rsvg_handle_close(handle, 0)
            libgobject.g_object_unref(handle)
            libcairo.cairo_surface_flush(surface)
            libcairo.cairo_surface_destroy(surface)
            libcairo.cairo_destroy(context)

            # Attach overlay to VideoOverlayComposition.
            GstVideo.buffer_add_video_meta(overlay_buffer,
                                           GstVideo.VideoFrameFlags.NONE,
                                           GstVideo.VideoFormat.BGRA,
                                           overlay_size.width,
                                           overlay_size.height)
            rect = GstVideo.VideoOverlayRectangle.new_raw(
                overlay_buffer, 0, 0, self.render_size.width,
                self.render_size.height,
                GstVideo.VideoOverlayFormatFlags.PREMULTIPLIED_ALPHA)
            self.composition = GstVideo.VideoOverlayComposition.new(rect)
            self.rendered_svg = self.svg
Example #16
0
    def force_keyframe(self):
        return

        # nothing …
        clock = self.get_clock()
        now = clock.get_time()

        force_keyframe_event = GstVideo.video_event_new_downstream_force_key_unit(now, now, now, True, 0)
        self.source.send_event(force_keyframe_event)
Example #17
0
 def on_sync_message(self, bus, message):
     s = message.get_structure()
     logger.debug("sync message %s", s)
     if s is None:
         return True
     if GstVideo.is_video_overlay_prepare_window_handle_message(message):
         imagesink = message.src
         imagesink.set_property("force-aspect-ratio", True)
         self.reparent(self.xid, imagesink)
     return True
    def blockOnNextKeyframe(self):

        tp = self.elements["srcQueue"].get_static_pad("sink")
        #         tp = self.elements["queue_preview"].get_static_pad("sink")
        self.pipelines['main'].send_event(
            GstVideo.video_event_new_upstream_force_key_unit(
                Gst.CLOCK_TIME_NONE, True, self.cnt))
        tp.add_probe(Gst.PadProbeType.BUFFER, blockActiveQueuePad, self)

        return True
Example #19
0
 def on_sync_message(self, bus, message):
     s = message.get_structure()
     logger.debug("sync message %s", s)
     if s is None:
         return True
     if GstVideo.is_video_overlay_prepare_window_handle_message(message):
         imagesink = message.src
         imagesink.set_property("force-aspect-ratio", True)
         self.reparent(self.xid, imagesink)
     return True
    def __init__(self):
        GstBase.BaseTransform.__init__(self)
        self.set_in_place(True)

        self.videoinfo = GstVideo.VideoInfo()

        self.last_gpio_ts = 0
        self.last_buf_ts = 0

        GPIO.setmode(GPIO.BOARD)
        channel = 16
        GPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
        GPIO.add_event_detect(channel, GPIO.FALLING, callback=self.gpio_event)
Example #21
0
    def do_set_caps(self, icaps, ocaps):
        in_info = GstAudio.AudioInfo()
        in_info.from_caps(icaps)
        out_info = GstVideo.VideoInfo()
        out_info.from_caps(ocaps)

        self.convert_info = GstAudio.AudioInfo()
        self.convert_info.set_format(GstAudio.AudioFormat.S32,
                                     in_info.rate,
                                     in_info.channels,
                                     in_info.position)
        self.converter = GstAudio.AudioConverter.new(GstAudio.AudioConverterFlags.NONE,
                                                     in_info,
                                                     self.convert_info,
                                                     None)

        self.fig = plt.figure()
        dpi = self.fig.get_dpi()
        self.fig.patch.set_alpha(0.3)
        self.fig.set_size_inches(out_info.width / float(dpi),
                                 out_info.height / float(dpi))
        self.ax = plt.Axes(self.fig, [0., 0., 1., 1.])
        self.fig.add_axes(self.ax)
        self.ax.set_axis_off()
        self.ax.set_ylim((GLib.MININT, GLib.MAXINT))
        self.agg = self.fig.canvas.switch_backends(FigureCanvasAgg)
        self.h = None

        samplesperwindow = int(in_info.rate * in_info.channels * self.window_duration)
        self.thinning_factor = max(int(samplesperwindow / out_info.width - 1), 1)

        cap = int(samplesperwindow / self.thinning_factor)
        self.ax.set_xlim([0, cap])
        self.ringbuffer = RingBuffer(capacity=cap)
        self.ringbuffer.extend([0.0] * cap)
        self.frame_duration = Gst.util_uint64_scale_int(Gst.SECOND,
                                                        out_info.fps_d,
                                                        out_info.fps_n)
        self.next_time = self.frame_duration

        self.agg.draw()
        self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)

        self.samplesperbuffer = Gst.util_uint64_scale_int(in_info.rate * in_info.channels,
                                                          out_info.fps_d,
                                                          out_info.fps_n)
        self.next_offset = self.samplesperbuffer
        self.cur_offset = 0
        self.buf_offset = 0

        return True
Example #22
0
    def blockOnNextKeyframe(self):      
        if self.selfTermination:
            if self.cnt >= self.selfTermination:
                self.exitByTimer = True
                self.window.destroy()
            
        tp = self.elements["srcQueue"].get_static_pad("sink")    
#         tp = self.elements["queue_preview"].get_static_pad("sink")    
        self.pipelines['main'].send_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE, True, self.cnt))  
        tp.add_probe(Gst.PadProbeType.BUFFER, blockActiveQueuePad, self)
                
        self.updateLight()
        
        return True
Example #23
0
def callback(appsink, user_data):
    """
    This function will be called in a separate thread when our appsink
    says there is data for us. user_data has to be defined
    when calling g_signal_connect. It can be used to pass objects etc.
    from your other function to the callback.
    """
    sample = appsink.emit("pull-sample")

    if sample:

        caps = sample.get_caps()

        gst_buffer = sample.get_buffer()

        try:
            (ret, buffer_map) = gst_buffer.map(Gst.MapFlags.READ)

            video_info = GstVideo.VideoInfo()
            video_info.from_caps(caps)

            stride = video_info.finfo.bits / 8

            pixel_offset = int(video_info.width / 2 * stride +
                               video_info.width * video_info.height / 2 *
                               stride)

            # this is only one pixel
            # when dealing with formats like BGRx
            # pixel_data will have to consist out of
            # pixel_offset   => B
            # pixel_offset+1 => G
            # pixel_offset+2 => R
            # pixel_offset+3 => x
            pixel_data = buffer_map.data[pixel_offset]
            timestamp = gst_buffer.pts

            global framecount

            output_str = "Captured frame {}, Pixel Value={} Timestamp={}".format(
                framecount, pixel_data, timestamp)

            print(output_str, end="\r")  # print with \r to rewrite line

            framecount += 1

        finally:
            gst_buffer.unmap(buffer_map)

    return Gst.FlowReturn.OK
Example #24
0
    def blockOnNextKeyframe(self):      
        if self.selfTermination:
            if self.cnt >= self.selfTermination:
                self.exitByTimer = True
                self.window.destroy()
            
        tp = self.elements["srcQueue"].get_static_pad("sink")    
#         tp = self.elements["queue_preview"].get_static_pad("sink")    
        self.pipelines['main'].send_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE, True, self.cnt))  
        tp.add_probe(Gst.PadProbeType.BUFFER, blockActiveQueuePad, self)
                
        self.updateLight()
        
        return True
Example #25
0
    def inference_loop(self):
        while True:
            with self.condition:
                while not self.gstbuffer and self.running:
                    self.condition.wait()
                if not self.running:
                    break
                gstbuffer = self.gstbuffer
                self.gstbuffer = None

            # Input tensor is expected to be tightly packed, that is,
            # width and stride in pixels are expected to be the same.
            # For the Coral devboard using GPU this will always be true,
            # but when using generic GStreamer CPU based elements the line
            # stride will always be a multiple of 4 bytes in RGB format.
            # In case of mismatch we have to copy the input line by line.
            # For best performance input tensor size should take this
            # into account when using CPU based elements.
            # TODO: Use padded posenet models to avoid this.
            meta = GstVideo.buffer_get_video_meta(gstbuffer)
            assert meta and meta.n_planes == 1
            bpp = 3  # bytes per pixel.
            buf_stride = meta.stride[0]  # 0 for first and only plane.
            inf_stride = meta.width * bpp

            if inf_stride == buf_stride:
                # Fast case, pass buffer as input tensor as is.
                input_tensor = gstbuffer
            else:
                # Slow case, need to pack lines tightly (copy).
                result, mapinfo = gstbuffer.map(Gst.MapFlags.READ)
                assert result
                data_view = memoryview(mapinfo.data)
                input_tensor = bytearray(inf_stride * meta.height)
                src_offset = dst_offset = 0
                for row in range(meta.height):
                    src_end = src_offset + inf_stride
                    dst_end = dst_offset + inf_stride
                    input_tensor[dst_offset:dst_end] = data_view[
                        src_offset:src_end]
                    src_offset += buf_stride
                    dst_offset += inf_stride
                input_tensor = bytes(input_tensor)
                gstbuffer.unmap(mapinfo)

            output = self.inf_callback(input_tensor)
            with self.condition:
                self.output = output
                self.condition.notify_all()
Example #26
0
    def on_file_change(self, bus, msg):
        if msg.type == Gst.MessageType.ELEMENT:
            structure = msg.get_structure()
            #logger.debug("Message received from:%s",msg.src.get_name())
            #logger.debug("Structure: %s",structure.to_string())
            #GST_LOG("structure is %" GST_PTR_FORMAT,structure)
            if msg.src.get_name() == "tsdemux":

                logger.debug("Structure:%s", structure.to_string())

            if msg.src.get_name() == "splitmuxsink":
                logger.debug("Structure:%s", structure.get_name())
                if structure.get_name() == "splitmuxsink-fragment-closed":
                    filename = os.path.basename(
                        structure.get_string("location"))
                    #(result,index)=structure.get_int("index")
                    index = 0
                    (result,
                     running_time) = structure.get_clock_time("running-time")
                    logger.debug(filename + " received. Running-time:" +
                                 str(convert_ns(running_time)))
                    self.playlist.append_segment({
                        "segment_name":
                        self.urlroot + '/' + self.channel + '/' + filename,
                        "seq_num":
                        index
                    })
                    self.playlist.renderPlaylist()
            if msg.src.get_name() == "tsparse" and structure.get_name(
            ) == "sit":
                section = GstMpegts.message_parse_mpegts_section(msg)
                if section:
                    spliceinfo = section.get_scte_splice_info()
                    if spliceinfo.splice_command_type == 5:
                        logger.info(
                            "Splice Insert received. EventID:%#5.8x, In/~Out:%s, PCR:%s",
                            spliceinfo.splice_insert.splice_event_id,
                            spliceinfo.splice_insert.out_of_network_indicator,
                            convert_ns(spliceinfo.splice_insert.pts_time,
                                       90000))
                        force_key_unit_event = GstVideo.video_event_new_upstream_force_key_unit(
                            spliceinfo.splice_insert.pts_time, True,
                            spliceinfo.splice_insert.splice_event_id)
                        result = self.splitmuxsink.get_property(
                            "sink").send_event(force_key_unit_event)
                        logger.info(
                            "GstForceKeyUnit event sent downstream with result:%s.",
                            result)
Example #27
0
    def add_region(self,
                   x,
                   y,
                   w,
                   h,
                   label_id: int,
                   confidence: float = 0.0,
                   region_tensor: Gst.Structure = None,
                   normalized: bool = False) -> RegionOfInterest:
        if normalized:
            x = int(x * self.video_info().width)
            y = int(y * self.video_info().height)
            w = int(w * self.video_info().width)
            h = int(h * self.video_info().height)

        if not self.__is_bounded(x, y, w, h):
            x_init, y_init, w_init, h_init = x, y, w, h
            x, y, w, h = self.__clip(x, y, w, h)
            Gst.debug(
                "ROI coordinates [x, y, w, h] are out of image borders and will be clipped: [{}, {}, {}, {}] -> "
                "[{}, {}, {}, {}]".format(x_init, y_init, w_init, h_init, x, y,
                                          w, h))

        label = self.__get_label_by_label_id(region_tensor, label_id)

        video_roi_meta = GstVideo.buffer_add_video_region_of_interest_meta(
            self.__buffer, label, x, y, w, h)

        if not region_tensor:
            region_tensor = Gst.Structure.new_empty("detection")
        else:
            region_tensor.set_name(
                "detection")  # make sure we're about to add detection Tensor

        region_tensor.set_value('label_id', label_id)
        region_tensor.set_value('confidence', confidence)
        region_tensor.set_value('x_min', x / self.video_info().width)
        region_tensor.set_value('x_max', (x + w) / self.video_info().width)
        region_tensor.set_value('y_min', y / self.video_info().height)
        region_tensor.set_value('y_max', (y + h) / self.video_info().height)

        self.__regions.append(
            RegionOfInterest(
                ctypes.cast(
                    hash(video_roi_meta),
                    ctypes.POINTER(VideoRegionOfInterestMeta)).contents))
        self.__regions[-1].add_tensor(tensor=region_tensor)
        return self.__regions[-1]
Example #28
0
    def run_inference(self, inf_buf, inf_caps):
        start = time.monotonic()
        inference_time, data = self.engine.run_inference(inf_buf)

        # Underlying output tensor is owned by engine and if we want to
        # keep the data around while running another inference we have
        # to make our own copy.
        self.inf_q.put(data.copy())

        if self.save_every_n_frames > 0 and self.frames % self.save_every_n_frames == 0:
            meta = GstVideo.buffer_get_video_meta(inf_buf)
            result, mapinfo = inf_buf.map(Gst.MapFlags.READ)
            image = Image.frombytes('RGB', (meta.width, meta.height), mapinfo.data)
            image.save('inf_{:05d}.png'.format(self.frames))
            inf_buf.unmap(mapinfo)
        elapsed = time.monotonic() - start
        self.inf_times.append(elapsed)
Example #29
0
    def __init__(self, engine, save_frames=False, print_stats=False):
        self.engine = engine
        self.save_frames = save_frames
        self.print_stats = print_stats
        self.inf_q = queue.SimpleQueue()
        self.trash = queue.SimpleQueue()
        self.trash_lock = threading.RLock()
        self.vinfo = GstVideo.VideoInfo()
        self.glcontext = None
        self.pool = None
        self.fbo = None
        self.default_shader = None
        self.hm_shader = None
        self.hm_tex_id = 0  # Instantaneous heatmap
        self.agg_hm_tex_id = 0  # Agglomerated heatmap (people density)
        self.vao_id = 0
        self.positions_buffer = 0
        self.texcoords_buffer = 0
        self.vbo_indices_buffer = 0
        self.frames = 0
        self.reset_display_toggles()
        self.inf_times = collections.deque(maxlen=100)
        self.agg_times = collections.deque(maxlen=100)
        self.running = True
        self.gc_thread = threading.Thread(target=self.gc_loop)
        self.gc_thread.start()

        # Person count
        self.people_count_last_n = collections.deque(maxlen=60)
        self.people_count_log = collections.deque(maxlen=360)

        # This will hold the time-averaged people densities.
        self.heatmap_sum = None

        # If we have GPIO then init GPIO buttons
        try:
            import gpio
            self.ui = gpio.UI_EdgeTpuDevBoard()
            print("GPIO detected!")
        except:
            print("Unable to load GPIO - Control modes using keyboard.")
            self.ui = None
Example #30
0
 def __init__(self, pool, glupload):
     self.glcontext = glupload.context
     res, self.dmabuf = pool.acquire_buffer()
     assert res == Gst.FlowReturn.OK
     assert GstAllocators.is_dmabuf_memory(self.dmabuf.peek_memory(0))
     with _gst_buffer_map(self.dmabuf, Gst.MapFlags.WRITE) as mapped:
         self.ptr = ctypes.addressof(mapped)
         self.len = ctypes.sizeof(mapped)
         self.clear()
     meta = GstVideo.buffer_get_video_meta(self.dmabuf)
     assert meta
     self.surface = libcairo.cairo_image_surface_create_for_data(
         self.ptr, int(cairo.FORMAT_ARGB32), meta.width, meta.height,
         meta.stride[0])
     self.cairo = libcairo.cairo_create(self.surface)
     res, self.gl_buffer = glupload.perform_with_buffer(self.dmabuf)
     assert res == GstGL.GLUploadReturn.DONE
     memory = self.gl_buffer.peek_memory(0)
     assert GstGL.is_gl_memory(memory)
     self.texture_id = libgstgl.gst_gl_memory_get_texture_id(hash(memory))
     self.sync = GstGL.buffer_add_gl_sync_meta(self.glcontext,
                                               self.gl_buffer)
Example #31
0
    def __init__(self):
        super(PySpinSrc, self).__init__()

        # Initialize properties before Base Class initialization
        self.info = GstVideo.VideoInfo()

        # Properties
        self.auto_exposure: bool = self.DEFAULT_AUTO_EXPOSURE
        self.auto_gain: bool = self.DEFAULT_AUTO_GAIN
        self.exposure_time: float = self.DEFAULT_EXPOSURE_TIME
        self.gain: float = self.DEFAULT_GAIN
        self.auto_wb: bool = self.DEFAULT_AUTO_WB
        self.wb_blue: float = self.DEFAULT_WB_BLUE
        self.wb_red: float = self.DEFAULT_WB_RED
        self.h_binning: int = self.DEFAULT_H_BINNING
        self.v_binning: int = self.DEFAULT_V_BINNING
        self.offset_x: int = self.DEFAULT_OFFSET_X
        self.offset_y: int = self.DEFAULT_OFFSET_Y
        self.center_x: int = self.DEFAULT_CENTER_X
        self.center_y: int = self.DEFAULT_CENTER_Y
        self.num_cam_buffers: int = self.DEFAULT_NUM_BUFFERS
        self.serial: str = self.DEFAULT_SERIAL_NUMBER
        self.user_set: str = self.DEFAULT_USER_SET

        # Camera capabilities
        self.camera_caps = None

        # Image Capture Device
        self.image_acquirer: ImageAcquirer = None

        # Buffer timing
        self.timestamp_offset: int = 0
        self.previous_timestamp: int = 0

        # Base class properties
        self.set_live(True)
        self.set_format(Gst.Format.TIME)
    def add_region(self,
                   x,
                   y,
                   w,
                   h,
                   label: str = "",
                   confidence: float = 0.0,
                   normalized: bool = False) -> RegionOfInterest:
        if normalized:
            x = int(x * self.video_info().width)
            y = int(y * self.video_info().height)
            w = int(w * self.video_info().width)
            h = int(h * self.video_info().height)

        if not self.__is_bounded(x, y, w, h):
            x_init, y_init, w_init, h_init = x, y, w, h
            x, y, w, h = self.__clip(x, y, w, h)
            warn(
                "ROI coordinates [x, y, w, h] are out of image borders and will be clipped: [{}, {}, {}, {}] -> "
                "[{}, {}, {}, {}]".format(x_init, y_init, w_init, h_init, x, y,
                                          w, h),
                stacklevel=2)

        video_roi_meta = GstVideo.buffer_add_video_region_of_interest_meta(
            self.__buffer, label, x, y, w, h)
        roi = RegionOfInterest(
            ctypes.cast(hash(video_roi_meta),
                        ctypes.POINTER(VideoRegionOfInterestMeta)).contents)

        tensor = roi.add_tensor("detection")
        tensor['confidence'] = float(confidence)
        tensor['x_min'] = float(x / self.video_info().width)
        tensor['x_max'] = float((x + w) / self.video_info().width)
        tensor['y_min'] = float(y / self.video_info().height)
        tensor['y_max'] = float((y + h) / self.video_info().height)

        return roi
Example #33
0
 def request_iframe(self):
     src = self.pipe.get_by_name('iframe')
     event = GstVideo.video_event_new_downstream_force_key_unit(Gst.CLOCK_TIME_NONE, Gst.CLOCK_TIME_NONE, Gst.CLOCK_TIME_NONE, True, 0)
     src.send_event(event)
     src.unref()
Example #34
0
	def probe_callback_appsrc(self,appsrc_pad,info):
		info_event = info.get_event()
		if GstVideo.video_event_is_force_key_unit(info_event):
			print "--> hlssink sent force key unit"
			self.sendMessage("force_key_unit")
		return Gst.PadProbeReturn.PASS	
Example #35
0
 def onSyncMessage(self, bus, message):
     if GstVideo.is_video_overlay_prepare_window_handle_message(message):
         message.src.set_property('force-aspect-ratio', True)
         message.src.set_window_handle(self.video_tab.GetHandle())
Example #36
0
	def send_event(self):
		self.count = self.count+1
		#if self.count > 2147483640:
			#self.count = 0	
		pushed = self.hlssink_pad.push_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE,True,self.count))
		print "pushed... {0}".format(pushed)
Example #37
0
 def onSyncMessage(self, bus, message):
     if GstVideo.is_video_overlay_prepare_window_handle_message(message):
         message.src.set_property('force-aspect-ratio', True)
         message.src.set_window_handle(self.video_tab.GetHandle())
Example #38
0
 def requestKeyframe(self):
     print "request keyframe"        
     self.pipelines['main'].send_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE, True, self.cnt))
     
     return True  
Example #39
0
	def send_event(self):
		self.count = self.count + 1
		pushed = self.appsink_pad.push_event(GstVideo.video_event_new_upstream_force_key_unit(Gst.CLOCK_TIME_NONE,True,self.count))
		print "key unit event sent... {0}".format(pushed)
Example #40
0
 def bus_sync_handler(bus, msg, pipeline):
     if not GstVideo.is_video_overlay_prepare_window_handle_message(msg):
         return Gst.BusSyncReply.PASS
     msg.src.set_window_handle(self.da.get_window().get_xid())
     return Gst.BusSyncReply.DROP