def __assetLoadedCb(self, proxy, res, asset, transcoder): try: GES.Asset.request_finish(res) except GLib.Error as e: if transcoder: self.emit("error-preparing-asset", asset, proxy, e) del transcoder else: self.__createTranscoder(asset) return if not transcoder: if not self.__assetsMatch(asset, proxy): return self.__createTranscoder(asset) else: transcoder.props.pipeline.props.video_filter.finalize(proxy) transcoder.props.pipeline.props.audio_filter.finalize(proxy) del transcoder if asset.get_info().get_duration() != proxy.get_info().get_duration(): self.error( "Asset %s (duration=%s) and created proxy %s (duration=%s) do not" " have the same duration this should *never* happen, please file" " a bug with the media files." % (asset.get_id(), Gst.TIME_ARGS( asset.get_info().get_duration()), proxy.get_id(), Gst.TIME_ARGS(proxy.get_info().get_duration()))) self.emit("proxy-ready", asset, proxy) self.__emitProgress(proxy, 100)
def handle_message(data, msg): if msg.type == Gst.MessageType.ERROR: err, debug_info = msg.parse_error() print("Error received from element %s: %s" % (msg.src.get_name(), err), file=sys.stderr) print("Debugging information: %s" % debug_info, file=sys.stderr) data.terminate = True elif msg.type == Gst.MessageType.EOS: print("End-Of-Stream reached.") data.terminate = True elif msg.type == Gst.MessageType.DURATION_CHANGED: # The duration has changed, mark the current one as invalid data.duration = Gst.CLOCK_TIME_NONE elif msg.type == Gst.MessageType.STATE_CHANGED: if msg.src == data.playbin: old_state, new_state, pending_state = msg.parse_state_changed() print("Pipeline state changed from %s to %s." % (old_state.value_nick, new_state.value_nick)) # Remember whether we are in the PLAYING state or not data.playing = (new_state == Gst.State.PLAYING) if data.playing: # We just moved to PLAYING. Check if seeking is possible query = Gst.Query.new_seeking(Gst.Format.TIME) if data.playbin.query(query): (_, data.seek_enabled, start, end) = query.parse_seeking() if data.seek_enabled: print("Seeking is ENABLED from %s to %s" % (Gst.TIME_ARGS(start), Gst.TIME_ARGS(end))) else: print("Seeking is DISABLED for this stream.") else: print("Seeking query failed.", file=sys.stderr) else: print("Unexpected message received.", file=sys.stderr)
def do_transform_ip(self, buf: Gst.Buffer) -> Gst.FlowReturn: policy, prio = threadsched.get_curschedparam() if policy != threadsched.SCHED_RR: ret = threadsched.set_curschedparam(threadsched.SCHED_RR, 10) if ret == 0: print("gstreamer: moved to RT scheduler") ts = self.base_time + buf.pts dt = ts - self.last_buf_ts self.last_buf_ts = ts print("buf :", Gst.TIME_ARGS(ts), "dt:", Gst.TIME_ARGS(dt)) return Gst.FlowReturn.OK
def gpio_event(self, channel): policy, prio = threadsched.get_curschedparam() if policy != threadsched.SCHED_RR: ret = threadsched.set_curschedparam(threadsched.SCHED_RR, 10) if ret == 0: print("gpio: moved to RT scheduler") clock = self.get_clock() # The clock will only be present after the stream got started, # but GPIO events will come all the time if (clock): ts = clock.get_time() dt = ts - self.last_gpio_ts self.last_gpio_ts = ts print("gpio:", Gst.TIME_ARGS(ts), "dt:", Gst.TIME_ARGS(dt))
def position_changed_cb(pipeline, position, scenario, action, wanted_position): if pipeline._busy_async: return if pipeline._next_seek: return print(str(wanted_position), str(position)) if wanted_position != position: scenario.report_simple( GLib.quark_from_string("scenario::execution-error"), "Position after seek (%s) does not match wanted one %s" % (Gst.TIME_ARGS(position), Gst.TIME_ARGS(wanted_position))) pipeline.disconnect_by_func(position_changed_cb) action.set_done()
def do_transform_ip(self, buffer): if self.pts_offset is None: self.pts_offset = self.first_pts - buffer.pts buffer.pts += self.pts_offset Gst.debug("do_transform_ip: timestamp: %s" % (Gst.TIME_ARGS(buffer.pts))) return Gst.FlowReturn.OK
def do_transform_ip(self, buffer_out): Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer_out.pts))) # Get Frame dimensions config = buffer_out.pool.get_config() caps = config['caps'] struct = caps.get_structure(0) (ok, width) = struct.get_int('width') if not ok: raise RuntimeError("Failed to get width") (ok, height) = struct.get_int('height') if not ok: raise RuntimeError("Failed to get height") mo = buffer_out.mini_object saved_refcount = mo.refcount mo.refcount = 1 # for GObject instances, hash() returns the pointer to the C struct pbuffer = hash(buffer_out) mapping = _GstMapInfo() success = _libgst.gst_buffer_map(pbuffer, mapping, Gst.MapFlags.WRITE) if not success: raise RuntimeError("Could not map buffer") else: ctypes_region = ctypes.cast(mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size)) raw_ptr = ctypes_region.contents # create ctypes array from the raw pointer ptr = (ctypes.c_byte * mapping.size).from_address(ctypes.addressof(raw_ptr)) # cast array to uint32 to work with RGBA/BGRx data # FIXME: hardcode caps to only allow 4-byte modes? ptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_uint32)) np_arr = np.ctypeslib.as_array(ptr, shape=(height, width)) # this is fast, just one call into numpy and proably vectorized in C # if image is BGRx, then this will erase the red channel np_arr &= 0xff00ffff # invert colors for top quarter rows for i in range(height // 4): np_arr[i] ^= 0x00ffffff # this is SLOOOOW, multiple calls into numpy for each element # however, if performance is not a concern #(for example, processing video offline), you can add # sync=false to the last pipeline element (sink) # to avoid the "gst_base_sink_is_too_late" error #for i in range(height): # for j in range(width): # np_arr[i][j] //= 2 _libgst.gst_buffer_unmap(pbuffer, mapping) mo.refcount = saved_refcount return Gst.FlowReturn.OK
def stop(loop, pipeline): _, position = pipeline.query_position(Gst.Format.TIME) print("Position: %s\r" % Gst.TIME_ARGS(position)) if position > 10 * Gst.SECOND: loop.quit() print("Stopping after 10 seconds") return False return True
def do_transform_ip(self, buffer_out): Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer_out.pts))) if self.__mxnet_detector__ is None: self.__mxnet_detector__ = self.initializeDetector() # Get Frame dimensions config = buffer_out.pool.get_config() caps = config['caps'] struct = caps.get_structure(0) (ok, width) = struct.get_int('width') if not ok: raise RuntimeError("Failed to get width") (ok, height) = struct.get_int('height') if not ok: raise RuntimeError("Failed to get height") mo = buffer_out.mini_object saved_refcount = mo.refcount mo.refcount = 1 # for GObject instances, hash() returns the pointer to the C struct pbuffer = hash(buffer_out) mapping = _GstMapInfo() success = _libgst.gst_buffer_map(pbuffer, mapping, Gst.MapFlags.WRITE) if not success: raise RuntimeError("Could not map buffer") else: ctypes_region = ctypes.cast( mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size)) raw_ptr = ctypes_region.contents # create ctypes array from the raw pointer ptr = (ctypes.c_byte * mapping.size).from_address( ctypes.addressof(raw_ptr)) # cast array to uint32 to work with RGBA/BGRx data # FIXME: hardcode caps to only allow 4-byte modes? ptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_uint8)) np_arr = np.ctypeslib.as_array(ptr, shape=(height, width, 4)) cv_img = cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED) #self.__mxnet_detector__.process(cv_img, np_arr) self.__mxnet_detector__.process(np_arr, np_arr) if False: cv2.rectangle(np_arr, (10, 10), (100, 100), (0, 255, 0), 4) _libgst.gst_buffer_unmap(pbuffer, mapping) mo.refcount = saved_refcount return Gst.FlowReturn.OK
def timeout(loop, pipeline, w, idx): _, position = pipeline.query_position(Gst.Format.TIME) print( "Position[{}]: {}\r".format( pipeline.get_property("name"), Gst.TIME_ARGS(position) ) ) w.update_slider(idx, position) if position > 5 * Gst.SECOND and position < 6 * Gst.SECOND: gen_pipe_dot(pipeline, pipeline.get_property("name")) return True
def chainfunc(self, pad: Gst.Pad, parent, buffer: Gst.Buffer) -> Gst.FlowReturn: """ :param parent: GstPluginPy """ # DO SOMETHING info_str = "{}: int-prop: {}, float-prop: {} ".format( Gst.TIME_ARGS(buffer.pts), self.int_prop, self.float_prop) info_str += "bool-prop: {}, str-prop: {}, pyobject-prop: {}".format( self.bool_prop, self.str_prop, self.pyobject_prop) Gst.info(info_str) # ***************** return self.srcpad.push(buffer)
def __update_tooltip(self, event): """Sets or clears the tooltip showing info about the hovered line.""" markup = None if event: if not event.xdata: return xdata = max(self.__line_xs[0], min(event.xdata, self.__line_xs[-1])) res, value = self.__source.control_source_get_value(xdata) assert res pmin = self.__paramspec.minimum pmax = self.__paramspec.maximum value = value * (pmax - pmin) + pmin # Translators: This is a tooltip for a clip's keyframe curve, # showing what the keyframe curve affects, the timestamp at # the mouse cursor location, and the value at that timestamp. markup = _("Property: %s\nTimestamp: %s\nValue: %s") % ( self.__propertyName, Gst.TIME_ARGS(xdata), "{:.3f}".format(value)) self.set_tooltip_markup(markup)
def main(): Gst.init(None) data = CustomData() # Create the elements data.playbin = Gst.ElementFactory.make("playbin", "playbin") if not data.playbin: print("Not all elements could be created.", file=sys.stderr) exit(-1) # Set the URI to play data.playbin.set_property( "uri", "https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm" ) # Start playing ret = data.playbin.set_state(Gst.State.PLAYING) if ret == Gst.StateChangeReturn.FAILURE: print("Unable to set the pipeline to the playing state.", file=sys.stderr) exit(-1) # Listen to the bus bus = data.playbin.get_bus() while not data.terminate: message = bus.timed_pop_filtered( 100 * Gst.MSECOND, Gst.MessageType.STATE_CHANGED | Gst.MessageType.ERROR | Gst.MessageType.EOS | Gst.MessageType.DURATION_CHANGED) # Parse message if message: handle_message(data, message) else: if data.playing: fmt = Gst.Format.TIME current = -1 # Query the current position of the stream _, current = data.playbin.query_position(fmt) if not current: print("Could not query current position", file=sys.stderr) # If we didn't know it yet, query the stream duration if data.duration == Gst.CLOCK_TIME_NONE: _, data.duration = data.playbin.query_duration(fmt) if not data.duration: print("Could not query current duration", file=sys.stderr) print("Position %s / %s" % (Gst.TIME_ARGS(current), Gst.TIME_ARGS(data.duration))) sys.stdout.flush() # If seeking is enabled, we have not done it yet, and the time is right, seek if data.seek_enabled and not data.seek_done and current > 10 * Gst.SECOND: print("\nReached 10s, performing seek...") data.playbin.seek_simple( Gst.Format.TIME, Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT, 30 * Gst.SECOND) data.seek_done = True # Free resources data.playbin.set_state(Gst.State.NULL)
def __asset_loaded_cb(self, proxy, res, asset, transcoder): try: GES.Asset.request_finish(res) except GLib.Error as e: if transcoder: self.emit("error-preparing-asset", asset, proxy, e) del transcoder else: self.__create_transcoder(asset) return shadow = transcoder and self._is_shadow_transcoder(transcoder) if not transcoder: if not self.__assets_match(asset, proxy): self.__create_transcoder(asset) return else: if transcoder.props.pipeline.props.video_filter: transcoder.props.pipeline.props.video_filter.finalize() if transcoder.props.pipeline.props.audio_filter: transcoder.props.pipeline.props.audio_filter.finalize() del transcoder asset_duration = asset_get_duration(asset) proxy_duration = asset_get_duration(proxy) if asset_duration != proxy_duration: duration = min(asset_duration, proxy_duration) self.info( "Resetting %s duration from %s to %s as" " new proxy has a different duration", asset.props.id, Gst.TIME_ARGS(asset_duration), Gst.TIME_ARGS(duration)) asset.set_uint64(ASSET_DURATION_META, duration) proxy.set_uint64(ASSET_DURATION_META, duration) target_uri = self.get_target_uri(asset) for clip in self.app.project_manager.current_project.ges_timeline.iter_clips( ): if not isinstance(clip, GES.UriClip): continue if self.get_target_uri(clip.props.uri) == target_uri: if clip.props.in_point + clip.props.duration > duration: new_duration = duration - clip.props.in_point if new_duration > 0: self.warning( "%s resetting duration to %s as" " new proxy has a shorter duration", clip, Gst.TIME_ARGS(new_duration)) clip.set_duration(new_duration) else: new_inpoint = new_duration - clip.props.in_point self.error( "%s resetting duration to %s" " and inpoint to %s as the proxy" " is shorter", clip, Gst.TIME_ARGS(new_duration), Gst.TIME_ARGS(new_inpoint)) clip.set_inpoint(new_inpoint) clip.set_duration(duration - new_inpoint) clip.set_max_duration(duration) if shadow: self.app.project_manager.current_project.finalize_proxy(proxy) else: self.emit("proxy-ready", asset, proxy) self.__emit_progress(proxy, 100)
def editContainer(scenario, action): timeline = get_pipeline(scenario).props.timeline container = timeline.get_element(action.structure["container-name"]) if container is None: for layer in timeline.get_layers(): for clip in layer.get_clips(): Gst.info("Exisiting clip: %s" % clip.get_name()) scenario.report_simple(GLib.quark_from_string("scenario::execution-error"), "Could not find container: %s" % action.structure["container-name"]) return 1 res, position = GstValidate.action_get_clocktime(scenario, action, "position") layer_prio = action.structure["new-layer-priority"] if res is False: return 0 edge = get_edge(action.structure) container_ui = container.ui setEditingMode(timeline, scenario, action) y = 21 - container_ui.translate_coordinates(timeline.ui, 0, 0)[1] if container.get_layer().get_priority() != layer_prio and layer_prio != -1: try: layer = timeline.get_layers()[layer_prio] Gst.info("Y is: %s Realized?? %s Priori: %s layer prio: %s" % (layer.ui.get_allocation().y, container_ui.get_realized(), container.get_layer().get_priority(), layer_prio)) y = layer.ui.get_allocation().y - container_ui.translate_coordinates(timeline.ui, 0, 0)[1] if y < 0: y += 21 elif y > 0: y -= 21 except IndexError: if layer_prio == -1: y = -5 else: layer = timeline.get_layers()[-1] alloc = layer.ui.get_allocation() y = alloc.y + alloc.height + 10 - container_ui.translate_coordinates(timeline.ui, 0, 0)[1] if not hasattr(scenario, "last_edge"): scenario.last_edge = edge if not hasattr(scenario, "dragging") or scenario.dragging is False \ or scenario.last_edge != edge: event_widget = container.ui if isinstance(container, GES.SourceClip): if edge == GES.Edge.EDGE_START: event_widget = container.ui.leftHandle elif edge == GES.Edge.EDGE_END: event_widget = container.ui.rightHandle scenario.dragging = True event = Event(Gdk.EventType.BUTTON_PRESS, button=1, y=y) with mock.patch.object(Gtk, "get_event_widget") as get_event_widget: get_event_widget.return_value = event_widget timeline.ui._button_press_event_cb(event_widget, event) event = Event(Gdk.EventType.MOTION_NOTIFY, button=1, x=Zoomable.nsToPixelAccurate(position) - container_ui.translate_coordinates(timeline.ui.layout.layers_vbox, 0, 0)[0], y=y, state=Gdk.ModifierType.BUTTON1_MASK) with mock.patch.object(Gtk, "get_event_widget") as get_event_widget: get_event_widget.return_value = container.ui timeline.ui._motion_notify_event_cb(None, event) GstValidate.print_action(action, "Editing %s to %s in %s mode, edge: %s " "with new layer prio: %d\n" % (action.structure["container-name"], Gst.TIME_ARGS(position), scenario.last_mode, edge, layer_prio)) _releaseButtonIfNeeded(scenario, action, timeline, container, edge, layer_prio, position, y) scenario.last_edge = edge return 1
def snapping_started_cb(timeline, element1, element2, dist, self): Gst.error( "Here %s %s" % (Gst.TIME_ARGS(element1.props.start + element1.props.duration), Gst.TIME_ARGS(element2.props.start))) not_called.append("No snapping should happen")
def testOneSecond(self): self.assertEquals(Gst.TIME_ARGS(Gst.SECOND), '0:00:01.000000000')
def testClockTimeNone(self): self.assertEquals(Gst.TIME_ARGS(Gst.CLOCK_TIME_NONE), 'CLOCK_TIME_NONE')
def do_transform_ip(self, buffer): Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts))) return Gst.FlowReturn.OK
def do_render(self, buffer): Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts))) return Gst.FlowReturn.OK