def _setupClock(self, pipeline): # Configure our pipeline to use a known basetime and clock. clock = gst.system_clock_obtain() clock.set_property('clock-type', 'realtime') # It doesn't matter too much what this basetime is, so long as we know # the value. self.basetime = clock.get_time() # We force usage of the system clock. pipeline.use_clock(clock) # Now we disable default basetime distribution pipeline.set_new_stream_time(gst.CLOCK_TIME_NONE) # And we choose our own basetime... self.debug("Setting basetime of %d", self.basetime) pipeline.set_base_time(self.basetime)
def record(output_file="/dev/stdout", udp_h264_port=None, video_device=None, audio_device=None): """ Record from uvch264 device TODO: /dev/stdout may not be the most portable way for outputting to stdout TODO: There are some overlapping timestamps on playback on many players, which is a bit annoying. It may be because the ts_src is hooked to the decoded stuff, which probably also increases the latency/jitter of the timestamps? Shouldn't be too hard to fix. """ pipe_str = "" pipe_str = \ 'matroskamux name=mux ! queue ! ' \ 'filesink location="%(output_file)s" ' \ %{'output_file': output_file} pipe_str += 'ts_src name=ts_src ' pipe_str += 'ts_src.text_src0 ! text/plain ! queue ! mux. ' if not video_device: pipe_str += "videotestsrc name=video_src ! ts_src.sink " pipe_str += "videotestsrc ! mux. " else: from trusas0.utils import sh # Disable autofocus sh("v4l2-ctl -d %s -c focus_auto=0"%video_device) sh("v4l2-ctl -d %s -c focus_absolute=0"%video_device) pipe_str += \ ' uvch264_src device=%(video_device)s auto-start=true name=video_src ' \ 'fixed-framerate=true initial-bitrate=50000000 profile=baseline ' \ 'video_src.vidsrc ! video/x-h264,width=1280,height=720,framerate=30/1 ! ts_src.sink '\ 'ts_src.src ! h264parse ! tee name=vidtee ' \ 'vidtee.src0 ! queue ! mux. ' \ % {'video_device': video_device} # Gstreamer doesn't a nice way to create a proper # SDP/RTP-stream, so let's just dump out the raw video if udp_h264_port: pipe_str += 'vidtee.src1 ! queue ! rtph264pay ! udpsink sync=false host=127.0.0.1 port=%i '%int(udp_h264_port) if audio_device: pipe_str += ' alsasrc device="%s" ! queue ! voaacenc ! mux.'%audio_device log.info("Launching pipeline %s"%pipe_str) pipeline = gst.parse_launch(pipe_str) # Make sure we have an EPOCH clock clock = gst.system_clock_obtain() clock.set_property("clock-type", 0) # Set to gst.CLOCK_TYPE_REALTIME pipeline.use_clock(clock) mainloop = gobject.MainLoop() ts_src = pipeline.get_by_name('ts_src') #print ts_src #print "\n".join(dir(ts_src)) log_level_map = { #gst.MESSAGE_EOS: log.info, #gst.MESSAGE_INFO: log.info, #gst.MESSAGE_STATE_CHANGED: log.info, gst.MESSAGE_WARNING: log.warning, gst.MESSAGE_ERROR: log.error, } def on_message(bus, message): t = message.type log_func = log_level_map.get(t, #lambda obj: log.debug("Unknown gstreamer message: %s"%obj)) lambda obj: None) # Gstreamer spams like crazy log_func(message) def shutdown(): # This should work: #pipeline.send_event(gst.event_new_eos()) # But because the gstreamer EOS stuff seems to be FUBAR, # force the EOS to all pads # TODO: THIS DOESN'T SEEM TO ALWAYS PROVIDE A CLEAN # SHUTDOWN for element in pipeline.recurse(): for pad in element.pads(): if pad.get_property("direction") != gst.PAD_SINK: continue pad.send_event(gst.event_new_eos()) def on_error(bus, error): shutdown() def on_eos(bus, eos): mainloop.quit() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", on_message) bus.connect("message::error", on_error) bus.connect("message::eos", on_eos) signal.signal(signal.SIGTERM, lambda *args: shutdown()) signal.signal(signal.SIGINT, lambda *args: shutdown()) gobject.threads_init() pipeline.set_state(gst.STATE_PLAYING) mainloop.run() pipeline.set_state(gst.STATE_NULL)