def start(self): if self.pipeline is None: self._build_pipeline() bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect('message::eos', self._bus_message_eos_cb) bus.connect('message::error', self._bus_message_error_cb) bus.enable_sync_message_emission() bus.connect('sync-message::element', self._bus_sync_message_element_cb) self.pipeline.set_state(gst.STATE_PLAYING) msg = bus.timed_pop_filtered( 20 * gst.SECOND, gst.MESSAGE_ERROR | gst.MESSAGE_ASYNC_DONE) if msg is None: log.error("timeout: pipeline failed to preroll") reactor.stop()
def start(self): if self.pipeline is None: self._build_pipeline() bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect('message::eos', self._bus_message_eos_cb) bus.connect('message::error', self._bus_message_error_cb) bus.enable_sync_message_emission() bus.connect('sync-message::element', self._bus_sync_message_element_cb) self.pipeline.set_state(gst.STATE_PLAYING) msg = bus.timed_pop_filtered(20 * gst.SECOND, gst.MESSAGE_ERROR | gst.MESSAGE_ASYNC_DONE) if msg is None: log.error("timeout: pipeline failed to preroll") reactor.stop()
def clientConnectionFailed(self, _connector, reason): log.error('connecting to %r failed: %s', self.url, reason.getErrorMessage()) reactor.stop()
def connectionFailed(self, reason): log.error("app couldn't connect: %s", reason.getErrorMessage())
def _bus_message_error_cb(self, bus, message): gerror, debug = message.parse_error() log.error("%s -- %s", gerror.message, debug) reactor.stop()
def on_data(self, ts, type_, data): stream = self._tracks.get(type_, None) if not stream: d = self._make_stream(ts, type_) if not d: return # FIXME: there should really be a queue between nstream and sg # instead, for the moment, we'll just re-try the on_data() d.addCallback(lambda _: self.on_data(ts, type_, data)) return flags = 0 if type_ == chunks.MSG_VIDEO: bytes = len(data) frame_type, codec_id, h264_type = None, None, None if bytes > 1: ft_codec, h264_type = _s_double_uchar.unpack(data.peek(2)) frame_type, codec_id = ft_codec >> 4, ft_codec & 0x0f elif bytes > 0: ft_codec, = _s_uchar.unpack(data.peek(1)) frame_type, codec_id = ft_codec >> 4, ft_codec & 0x0f if frame_type == 1 and codec_id == 7 and h264_type == 0: d = stream.write_headers(data) return elif frame_type is not None: if frame_type == 1: flags = FF_KEYFRAME else: # FIXME: this is not necessarily correct flags = FF_INTERFRAME elif type_ == chunks.MSG_AUDIO: bytes = len(data) codec_id, aac_type = None, None if bytes > 1: ft_codec, aac_type = _s_double_uchar.unpack(data.peek(2)) codec_id = ft_codec >> 4 elif bytes > 0: ft_codec, = _s_uchar.unpack(data.peek(1)) codec_id = ft_codec >> 4 if codec_id == 10 and aac_type == 0: self._audio_headers += 1 d = stream.write_headers(data) return elif codec_id != 10 and self._audio_headers == 0: # Flash, doesn't use real headers for those formats, # but let's mark that there is an audio track with an # empty packet (Flash doesn't seem to mind those) # early on self._audio_headers += 1 d = stream.write_headers(vb(data.peek(1))) if codec_id is not None: flags = FF_KEYFRAME # audio usually is all keyframes else: log.error('Unsupported data type: %r', type_) return d = stream.write(ts, flags, data)