Example #1
0
def _addRecdXmlAttrs(el, recd, forMeshTransmit):
    el.setAttribute('type', str(recd.type))

    if (recd.type == constants.TYPE_AUDIO) and (not forMeshTransmit):
        aiPixbuf = recd.getAudioImagePixbuf()
        if aiPixbuf:
            aiPixbufString = str(utils.getStringEncodedFromPixbuf(aiPixbuf))
            el.setAttribute('audioImage', aiPixbufString)

    if (recd.datastoreId != None) and (not forMeshTransmit):
        el.setAttribute('datastoreId', str(recd.datastoreId))

    el.setAttribute('title', recd.title)
    el.setAttribute('time', str(recd.time))
    el.setAttribute('photographer', recd.recorderName)
    el.setAttribute('recorderHash', str(recd.recorderHash) )
    el.setAttribute('colorStroke', str(recd.colorStroke) )
    el.setAttribute('colorFill', str(recd.colorFill) )
    el.setAttribute('buddy', str(recd.buddy))
    el.setAttribute('mediaMd5', str(recd.mediaMd5))
    el.setAttribute('thumbMd5', str(recd.thumbMd5))
    el.setAttribute('mediaBytes', str(recd.mediaBytes))

    if recd.thumbBytes:
        el.setAttribute('thumbBytes', str(recd.thumbBytes))

    # FIXME: can this be removed, or at least autodetected? has not been
    # changed for ages, should not be relevant
    el.setAttribute('version', '54')

    pixbuf = recd.getThumbPixbuf()
    if pixbuf:
        thumb64 = str(utils.getStringEncodedFromPixbuf(pixbuf))
        el.setAttribute('base64Thumb', thumb64)
Example #2
0
def _addRecdXmlAttrs(el, recd, forMeshTransmit):
    el.setAttribute("type", str(recd.type))

    if (recd.type == constants.TYPE_AUDIO) and (not forMeshTransmit):
        aiPixbuf = recd.getAudioImagePixbuf()
        if aiPixbuf:
            aiPixbufString = str(utils.getStringEncodedFromPixbuf(aiPixbuf))
            el.setAttribute("audioImage", aiPixbufString)

    if (recd.datastoreId != None) and (not forMeshTransmit):
        el.setAttribute("datastoreId", str(recd.datastoreId))

    el.setAttribute("title", recd.title)
    el.setAttribute("time", str(recd.time))
    el.setAttribute("photographer", recd.recorderName)
    el.setAttribute("recorderHash", str(recd.recorderHash))
    el.setAttribute("colorStroke", str(recd.colorStroke))
    el.setAttribute("colorFill", str(recd.colorFill))
    el.setAttribute("buddy", str(recd.buddy))
    el.setAttribute("mediaMd5", str(recd.mediaMd5))
    el.setAttribute("thumbMd5", str(recd.thumbMd5))
    el.setAttribute("mediaBytes", str(recd.mediaBytes))

    if recd.thumbBytes:
        el.setAttribute("thumbBytes", str(recd.thumbBytes))

    # FIXME: can this be removed, or at least autodetected? has not been
    # changed for ages, should not be relevant
    el.setAttribute("version", "54")

    pixbuf = recd.getThumbPixbuf()
    if pixbuf:
        thumb64 = str(utils.getStringEncodedFromPixbuf(pixbuf))
        el.setAttribute("base64Thumb", thumb64)
def _addRecdXmlAttrs(el, recd, forMeshTransmit):
    el.setAttribute('type', str(recd.type))

    if (recd.type == constants.TYPE_AUDIO) and (not forMeshTransmit):
        aiPixbuf = recd.getAudioImagePixbuf()
        if aiPixbuf:
            aiPixbufString = str(utils.getStringEncodedFromPixbuf(aiPixbuf))
            el.setAttribute('audioImage', aiPixbufString)

    if (recd.datastoreId != None) and (not forMeshTransmit):
        el.setAttribute('datastoreId', str(recd.datastoreId))

    el.setAttribute('title', recd.title)
    el.setAttribute('time', str(recd.time))
    el.setAttribute('photographer', recd.recorderName)
    el.setAttribute('recorderHash', str(recd.recorderHash))
    el.setAttribute('colorStroke', str(recd.colorStroke))
    el.setAttribute('colorFill', str(recd.colorFill))
    el.setAttribute('buddy', str(recd.buddy))
    el.setAttribute('mediaMd5', str(recd.mediaMd5))
    el.setAttribute('thumbMd5', str(recd.thumbMd5))
    el.setAttribute('mediaBytes', str(recd.mediaBytes))

    if recd.thumbBytes:
        el.setAttribute('thumbBytes', str(recd.thumbBytes))

    # FIXME: can this be removed, or at least autodetected? has not been
    # changed for ages, should not be relevant
    el.setAttribute('version', '54')

    pixbuf = recd.getThumbPixbuf()
    if pixbuf:
        thumb64 = str(utils.getStringEncodedFromPixbuf(pixbuf))
        el.setAttribute('base64Thumb', thumb64)
Example #4
0
    def _stop_recording_audio(self):
        self._pipeline.remove(self._audiobin)

        audio_path = os.path.join(Instance.instancePath, "output.wav")
        if not os.path.exists(audio_path) or os.path.getsize(audio_path) <= 0:
            # FIXME: inform model of failure?
            return

        if self._audio_pixbuf:
            self.model.still_ready(self._audio_pixbuf)

        line = 'filesrc location=' + audio_path + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
        audioline = gst.parse_launch(line)

        taglist = self._get_tags(constants.TYPE_AUDIO)

        if self._audio_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._audio_pixbuf)
            taglist[gst.TAG_EXTENDED_COMMENT] = "coverart=" + pixbuf_b64

        vorbis_enc = audioline.get_by_name('audioVorbisenc')
        vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)

        audioFilesink = audioline.get_by_name('audioFilesink')
        audioOggFilepath = os.path.join(Instance.instancePath, "output.ogg")
        audioFilesink.set_property("location", audioOggFilepath)

        audioBus = audioline.get_bus()
        audioBus.add_signal_watch()
        self._audio_transcode_handler = audioBus.connect('message', self._onMuxedAudioMessageCb, audioline)
        self._transcode_id = gobject.timeout_add(200, self._transcodeUpdateCb, audioline)
        audioline.set_state(gst.STATE_PLAYING)
Example #5
0
    def record_audio(self):
        logger.debug('record_audio')

        # take a photograph
        self._audio_pixbuf = self._pixbuf.copy()
        self.model.still_ready(self._audio_pixbuf)

        # stop the live view
        self._pipeline.set_state(Gst.State.NULL)  # synchronous
        self._pixbuf = None

        # make a pipeline to record and encode audio to file
        ogg = os.path.join(Instance.instancePath, "output.ogg")
        cmd = 'autoaudiosrc name=src ' \
            '! audioconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! vorbisenc name=vorbis ! oggmux ' \
            '! filesink location=%s' % ogg
        self._audio = Gst.parse_launch(cmd)

        # attach useful tags
        taglist = self._get_tags(constants.TYPE_AUDIO)
        if self._audio_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._audio_pixbuf)
            taglist.add_value(Gst.TagMergeMode.APPEND,
                              Gst.TAG_EXTENDED_COMMENT,
                              "coverart=" + pixbuf_b64)

        vorbis = self._audio.get_by_name('vorbis')
        vorbis.merge_tags(taglist, Gst.TagMergeMode.REPLACE_ALL)

        # detect end of stream
        bus = self._audio.get_bus()
        bus.add_signal_watch()

        def on_message_cb(bus, msg, ogg):
            if msg.type == Gst.MessageType.EOS:
                logger.debug('record_audio.on_message_cb Gst.MessageType.EOS')
                GLib.idle_add(self._stop_recording_audio, ogg)
                return

            return self._on_message_cb(bus, msg)

        bus.connect('message', on_message_cb, ogg)

        # start audio pipeline recording
        self._audio.set_state(Gst.State.PLAYING)  # asynchronous
Example #6
0
    def record_audio(self):
        logger.debug('record_audio')

        # take a photograph
        self._audio_pixbuf = self._pixbuf.copy()
        self.model.still_ready(self._audio_pixbuf)

        # stop the live view
        self._pipeline.set_state(Gst.State.NULL)  # synchronous
        self._pixbuf = None

        # make a pipeline to record and encode audio to file
        ogg = os.path.join(Instance.instancePath, "output.ogg")
        cmd = 'autoaudiosrc name=src ' \
            '! audioconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! vorbisenc name=vorbis ! oggmux ' \
            '! filesink location=%s' % ogg
        self._audio = Gst.parse_launch(cmd)

        # attach useful tags
        taglist = self._get_tags(constants.TYPE_AUDIO)
        if self._audio_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._audio_pixbuf)
            taglist.add_value(Gst.TagMergeMode.APPEND,
                              Gst.TAG_EXTENDED_COMMENT,
                              "coverart=" + pixbuf_b64)

        vorbis = self._audio.get_by_name('vorbis')
        vorbis.merge_tags(taglist, Gst.TagMergeMode.REPLACE_ALL)

        # detect end of stream
        bus = self._audio.get_bus()
        bus.add_signal_watch()

        def on_message_cb(bus, msg, ogg):
            if msg.type == Gst.MessageType.EOS:
                logger.debug('record_audio.on_message_cb Gst.MessageType.EOS')
                GLib.idle_add(self._stop_recording_audio, ogg)
                return

            return self._on_message_cb(bus, msg)

        bus.connect('message', on_message_cb, ogg)

        # start audio pipeline recording
        self._audio.set_state(Gst.State.PLAYING)  # asynchronous
Example #7
0
    def record_video(self, quality):
        logger.debug('record_video')
        if not self._cameras:
            return

        # take a photograph
        self._video_pixbuf = self._pixbuf.copy()

        # stop the live view
        self._pipeline.set_state(Gst.State.NULL)  # synchronous
        self._pixbuf = None

        # make a pipeline to record video and audio to file
        args = {}
        args['ogv'] = os.path.join(Instance.instancePath, "output.ogv")
        args['src'] = 'v4l2src device={0}'.format(self._camera)
        if self._camera == 'test':
            args['src'] = 'videotestsrc'

        cmd = '{src} name=vsrc ' \
            '! video/x-raw,width=640,height=480 ' \
            '! tee name=tee ' \
            'tee.! videorate max-rate=2 ! videoconvert ' \
            '! queue leaky=2 ' \
            '! autovideosink sync=false ' \
            'tee.! videorate max-rate=10 ! videoconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! theoraenc name=theora ' \
            '! mux. ' \
            'autoaudiosrc name=asrc ' \
            '! audiorate ' \
            '! audioconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! vorbisenc name=vorbis ! mux. ' \
            'oggmux name=mux ! filesink location={ogv}'
        self._video = Gst.parse_launch(cmd.format(**args))

        # attach useful tags
        taglist = self._get_tags(constants.TYPE_VIDEO)
        if self._video_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._video_pixbuf)
            taglist.add_value(Gst.TagMergeMode.APPEND,
                              Gst.TAG_EXTENDED_COMMENT,
                              "coverart=" + pixbuf_b64)

        theora = self._video.get_by_name('theora')
        vorbis = self._video.get_by_name('vorbis')
        vorbis.merge_tags(taglist, Gst.TagMergeMode.REPLACE_ALL)

        # set quality
        if quality == 0:
            theora.props.quality = 24
            vorbis.props.quality = 0.2
        if quality == 1:
            theora.props.quality = 52
            vorbis.props.quality = 0.4

        # catch end of stream
        bus = self._video.get_bus()
        bus.add_signal_watch()

        def on_message_cb(bus, msg, ogv):
            if msg.type == Gst.MessageType.EOS:
                logger.debug('record_video.on_message_cb Gst.MessageType.EOS')
                GLib.idle_add(self._stop_recording_video, ogv)
                return

            return self._on_message_cb(bus, msg)

        bus.connect('message', on_message_cb, args['ogv'])

        self._catch_window(bus, self.activity.set_glive_sink)

        # start video pipeline recording
        self._video.set_state(Gst.State.PLAYING)  # asynchronous
Example #8
0
    def record_video(self, quality):
        logger.debug('record_video')
        if not self._cameras:
            return

        # take a photograph
        self._video_pixbuf = self._pixbuf.copy()

        # stop the live view
        self._pipeline.set_state(Gst.State.NULL)  # synchronous
        self._pixbuf = None

        # make a pipeline to record video and audio to file
        args = {}
        args['ogv'] = os.path.join(Instance.instancePath, "output.ogv")
        args['src'] = 'v4l2src device={0}'.format(self._camera)
        if self._camera == 'test':
            args['src'] = 'videotestsrc'

        cmd = '{src} name=vsrc ' \
            '! video/x-raw,width=640,height=480 ' \
            '! tee name=tee ' \
            'tee.! videorate max-rate=2 ! videoconvert ' \
            '! queue leaky=2 ' \
            '! autovideosink sync=false ' \
            'tee.! videorate max-rate=10 ! videoconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! theoraenc name=theora ' \
            '! mux. ' \
            'autoaudiosrc name=asrc ' \
            '! audiorate ' \
            '! audioconvert ' \
            '! queue max-size-time=30000000000 ' \
            'max-size-bytes=0 max-size-buffers=0 ' \
            '! vorbisenc name=vorbis ! mux. ' \
            'oggmux name=mux ! filesink location={ogv}'
        self._video = Gst.parse_launch(cmd.format(**args))

        # attach useful tags
        taglist = self._get_tags(constants.TYPE_VIDEO)
        if self._video_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._video_pixbuf)
            taglist.add_value(Gst.TagMergeMode.APPEND,
                              Gst.TAG_EXTENDED_COMMENT,
                              "coverart=" + pixbuf_b64)

        theora = self._video.get_by_name('theora')
        vorbis = self._video.get_by_name('vorbis')
        vorbis.merge_tags(taglist, Gst.TagMergeMode.REPLACE_ALL)

        # set quality
        if quality == 0:
            theora.props.quality = 24
            vorbis.props.quality = 0.2
        if quality == 1:
            theora.props.quality = 52
            vorbis.props.quality = 0.4

        # catch end of stream
        bus = self._video.get_bus()
        bus.add_signal_watch()

        def on_message_cb(bus, msg, ogv):
            if msg.type == Gst.MessageType.EOS:
                logger.debug('record_video.on_message_cb Gst.MessageType.EOS')
                GLib.idle_add(self._stop_recording_video, ogv)
                return

            return self._on_message_cb(bus, msg)

        bus.connect('message', on_message_cb, args['ogv'])

        self._catch_window(bus, self.activity.set_glive_sink)

        # start video pipeline recording
        self._video.set_state(Gst.State.PLAYING)  # asynchronous