def do_gst_gl_filter_set_caps(self, in_caps, out_caps): in_info = GstVideo.VideoInfo() in_info.from_caps(in_caps) out_info = GstVideo.VideoInfo() out_info.from_caps(out_caps) in_ratio = in_info.width / in_info.height out_ratio = out_info.width / out_info.height if in_ratio > out_ratio: w = out_info.width h = out_info.width / in_ratio x = 0 y = (out_info.height - h) / 2 elif in_ratio < out_ratio: w = out_info.height * in_ratio h = out_info.height x = (out_info.width - w) / 2 y = 0 else: w = out_info.width h = out_info.height x = 0 y = 0 self.x = int(x) self.y = int(y) self.w = int(w) self.h = int(h) self.scale_x = self.w / out_info.width self.scale_y = self.h / out_info.height return True
def __init__(self, buffer: Gst.Buffer, video_info: GstVideo.VideoInfo = None, caps: Gst.Caps = None): self.__buffer = buffer self.__video_info = None if video_info: self.__video_info = video_info elif caps: self.__video_info = GstVideo.VideoInfo() self.__video_info.from_caps(caps) elif self.video_meta(): self.__video_info = GstVideo.VideoInfo() self.__video_info.width = self.video_meta().width self.__video_info.height = self.video_meta().height
def __init__(self, engine, src_size, save_every_n_frames=-1, print_stats=False): self.engine = engine self.src_size = src_size self.save_every_n_frames = save_every_n_frames self.print_stats = print_stats self.inf_q = queue.SimpleQueue() self.trash = queue.SimpleQueue() self.trash_lock = threading.RLock() self.vinfo = GstVideo.VideoInfo() self.glcontext = None self.pool = None self.fbo = None self.default_shader = None self.hm_shader = None self.hm_tex_id = 0 # Instantaneous heatmap self.vao_id = 0 self.positions_buffer = 0 self.texcoords_buffer = 0 self.vbo_indices_buffer = 0 self.frames = 0 self.reset_display_toggles() self.inf_times = collections.deque(maxlen=100) self.agg_times = collections.deque(maxlen=100) self.frame_times = collections.deque(maxlen=100) self.running = True self.gc_thread = threading.Thread(target=self.gc_loop) self.gc_thread.start() self.last_frame_time = time.monotonic()
def callback(app_sink, obj: 'ISource'): """ This function will be called in a separate thread when our appsink says there is data for us. user_data has to be defined when calling g_signal_connect. It can be used to pass objects etc. from your other function to the callback. """ sample = app_sink.emit("pull-sample") if sample: caps = sample.get_caps() gst_buffer = sample.get_buffer() try: (ret, buffer_map) = gst_buffer.map(Gst.MapFlags.READ) video_info = GstVideo.VideoInfo() video_info.from_caps(caps) np_data = np.frombuffer(buffer_map.data, np.uint8).reshape( (video_info.height, video_info.width, 3)) with obj.lock: buffer = obj.buffer if buffer is None or buffer.shape != np_data.shape: obj.buffer = np_data.copy() else: np.copyto(buffer, np_data) finally: gst_buffer.unmap(buffer_map) return Gst.FlowReturn.OK
def __init__(self, buffer, caps=None): self.buffer = buffer self.caps = caps self.caps_str = self.caps.get_structure(0) self.video_meta = GstVideo.buffer_get_video_meta(buffer) if (not self.video_meta): self.video_meta = GstVideo.VideoInfo() self.video_meta.from_caps(self.caps) self.width = self.video_meta.width self.height = self.video_meta.height self.format_str = self.caps_str.get_string("format") self.channels = VideoFrame.FORMAT_CHANNELS[self.format_str]
def __init__(self): GstBase.BaseTransform.__init__(self) self.set_in_place(True) self.videoinfo = GstVideo.VideoInfo() self.last_gpio_ts = 0 self.last_buf_ts = 0 GPIO.setmode(GPIO.BOARD) channel = 16 GPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_OFF) GPIO.add_event_detect(channel, GPIO.FALLING, callback=self.gpio_event)
def do_set_caps(self, icaps, ocaps): in_info = GstAudio.AudioInfo() in_info.from_caps(icaps) out_info = GstVideo.VideoInfo() out_info.from_caps(ocaps) self.convert_info = GstAudio.AudioInfo() self.convert_info.set_format(GstAudio.AudioFormat.S32, in_info.rate, in_info.channels, in_info.position) self.converter = GstAudio.AudioConverter.new(GstAudio.AudioConverterFlags.NONE, in_info, self.convert_info, None) self.fig = plt.figure() dpi = self.fig.get_dpi() self.fig.patch.set_alpha(0.3) self.fig.set_size_inches(out_info.width / float(dpi), out_info.height / float(dpi)) self.ax = plt.Axes(self.fig, [0., 0., 1., 1.]) self.fig.add_axes(self.ax) self.ax.set_axis_off() self.ax.set_ylim((GLib.MININT, GLib.MAXINT)) self.agg = self.fig.canvas.switch_backends(FigureCanvasAgg) self.h = None samplesperwindow = int(in_info.rate * in_info.channels * self.window_duration) self.thinning_factor = max(int(samplesperwindow / out_info.width - 1), 1) cap = int(samplesperwindow / self.thinning_factor) self.ax.set_xlim([0, cap]) self.ringbuffer = RingBuffer(capacity=cap) self.ringbuffer.extend([0.0] * cap) self.frame_duration = Gst.util_uint64_scale_int(Gst.SECOND, out_info.fps_d, out_info.fps_n) self.next_time = self.frame_duration self.agg.draw() self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox) self.samplesperbuffer = Gst.util_uint64_scale_int(in_info.rate * in_info.channels, out_info.fps_d, out_info.fps_n) self.next_offset = self.samplesperbuffer self.cur_offset = 0 self.buf_offset = 0 return True
def callback(appsink, user_data): """ This function will be called in a separate thread when our appsink says there is data for us. user_data has to be defined when calling g_signal_connect. It can be used to pass objects etc. from your other function to the callback. """ sample = appsink.emit("pull-sample") if sample: caps = sample.get_caps() gst_buffer = sample.get_buffer() try: (ret, buffer_map) = gst_buffer.map(Gst.MapFlags.READ) video_info = GstVideo.VideoInfo() video_info.from_caps(caps) stride = video_info.finfo.bits / 8 pixel_offset = int(video_info.width / 2 * stride + video_info.width * video_info.height / 2 * stride) # this is only one pixel # when dealing with formats like BGRx # pixel_data will have to consist out of # pixel_offset => B # pixel_offset+1 => G # pixel_offset+2 => R # pixel_offset+3 => x pixel_data = buffer_map.data[pixel_offset] timestamp = gst_buffer.pts global framecount output_str = "Captured frame {}, Pixel Value={} Timestamp={}".format( framecount, pixel_data, timestamp) print(output_str, end="\r") # print with \r to rewrite line framecount += 1 finally: gst_buffer.unmap(buffer_map) return Gst.FlowReturn.OK
def __init__(self, engine, save_frames=False, print_stats=False): self.engine = engine self.save_frames = save_frames self.print_stats = print_stats self.inf_q = queue.SimpleQueue() self.trash = queue.SimpleQueue() self.trash_lock = threading.RLock() self.vinfo = GstVideo.VideoInfo() self.glcontext = None self.pool = None self.fbo = None self.default_shader = None self.hm_shader = None self.hm_tex_id = 0 # Instantaneous heatmap self.agg_hm_tex_id = 0 # Agglomerated heatmap (people density) self.vao_id = 0 self.positions_buffer = 0 self.texcoords_buffer = 0 self.vbo_indices_buffer = 0 self.frames = 0 self.reset_display_toggles() self.inf_times = collections.deque(maxlen=100) self.agg_times = collections.deque(maxlen=100) self.running = True self.gc_thread = threading.Thread(target=self.gc_loop) self.gc_thread.start() # Person count self.people_count_last_n = collections.deque(maxlen=60) self.people_count_log = collections.deque(maxlen=360) # This will hold the time-averaged people densities. self.heatmap_sum = None # If we have GPIO then init GPIO buttons try: import gpio self.ui = gpio.UI_EdgeTpuDevBoard() print("GPIO detected!") except: print("Unable to load GPIO - Control modes using keyboard.") self.ui = None
def __init__(self): super(PySpinSrc, self).__init__() # Initialize properties before Base Class initialization self.info = GstVideo.VideoInfo() # Properties self.auto_exposure: bool = self.DEFAULT_AUTO_EXPOSURE self.auto_gain: bool = self.DEFAULT_AUTO_GAIN self.exposure_time: float = self.DEFAULT_EXPOSURE_TIME self.gain: float = self.DEFAULT_GAIN self.auto_wb: bool = self.DEFAULT_AUTO_WB self.wb_blue: float = self.DEFAULT_WB_BLUE self.wb_red: float = self.DEFAULT_WB_RED self.h_binning: int = self.DEFAULT_H_BINNING self.v_binning: int = self.DEFAULT_V_BINNING self.offset_x: int = self.DEFAULT_OFFSET_X self.offset_y: int = self.DEFAULT_OFFSET_Y self.center_x: int = self.DEFAULT_CENTER_X self.center_y: int = self.DEFAULT_CENTER_Y self.num_cam_buffers: int = self.DEFAULT_NUM_BUFFERS self.serial: str = self.DEFAULT_SERIAL_NUMBER self.user_set: str = self.DEFAULT_USER_SET # Camera capabilities self.camera_caps = None # Image Capture Device self.image_acquirer: ImageAcquirer = None # Buffer timing self.timestamp_offset: int = 0 self.previous_timestamp: int = 0 # Base class properties self.set_live(True) self.set_format(Gst.Format.TIME)
def overlay_caps_changed(self, overlay, caps): self.overlay_caps = GstVideo.VideoInfo() self.overlay_caps.from_caps(caps)