Esempio n. 1
0
 def __init__(self, **unused_kw):
     Loggable.__init__(self)
     self.videowidth = 720
     self.videoheight = 576
     self.videorate = gst.Fraction(25, 1)
     self.videopar = gst.Fraction(1, 1)
     self.audiochannels = 2
     self.audiorate = 44100
     self.audiodepth = 16
     self.vencoder = "theoraenc"
     self.aencoder = "vorbisenc"
     self.muxer = "oggmux"
     self.containersettings = {}
     self.acodecsettings = {}
     self.vcodecsettings = {}
     self.muxers = available_muxers()
     self.vencoders = available_video_encoders()
     self.aencoders = available_audio_encoders()
     self.muxers = available_combinations(self.muxers, self.vencoders,
                                          self.aencoders)
    def updateSettings(self):
        """ Updates and returns the ExportSettings configured in the widget """
        # Video Settings
        width = self.videowidthspin.get_value()
        height = self.videoheightspin.get_value()
        rate = gst.Fraction(
            *self.video_rates[self.videoratecbox.get_active()][1:])
        self.settings.setVideoProperties(width, height, rate)

        # Audio Settings
        nbchanns = self.audiochanncbox.get_active() + 1
        rate = self.audio_rates[self.audioratecbox.get_active()][1]
        depth = self.audio_depths[self.audiodepthcbox.get_active()][1]
        self.settings.setAudioProperties(nbchanns, rate, depth)

        # Encoders
        muxer = self.settings.muxers[
            self.muxercombobox.get_active()].get_name()
        vidx = self.vcodeccbox.get_active()
        if vidx < len(self.validvencoders):
            vencoder = self.validvencoders[vidx].get_name()
        elif vidx == len(self.validvencoders):
            vencoder = None
        else:
            self.warning("we don't want any video stream")
        aidx = self.acodeccbox.get_active()
        if aidx < len(self.validaencoders):
            aencoder = self.validaencoders[aidx].get_name()
        elif aidx == len(self.validaencoders):
            aencoder = None
        else:
            self.warning("we don't want any audio stream")
        self.settings.setEncoders(muxer, vencoder, aencoder)

        # encoder/muxer settings
        # only store cached values if no different factory was chosen.
        if muxer == self.muxer:
            self.settings.containersettings = self.containersettings
        else:
            self.settings.containersettings = {}
        if aencoder == self.aencoder:
            self.settings.acodecsettings = self.acodecsettings
        else:
            self.settings.acodecsettings = {}
        if vencoder == self.vencoder:
            self.settings.vcodecsettings = self.vcodecsettings
        else:
            self.settings.vcodecsettings = {}

        self.debug("Returning %s", self.settings)

        return self.settings
Esempio n. 3
0
 def get_pipeline_string(self, props):
     self.is_square = props.get('is-square', False)
     self.width = props.get('width', None)
     self.height = props.get('height', None)
     self.add_borders = props.get('add-borders', True)
     self.deintMode = props.get('deinterlace-mode', 'auto')
     self.deintMethod = props.get('deinterlace-method', 'ffmpeg')
     self.kuinterval = props.get('keyunits-interval', 0) * gst.MSECOND
     self.volume_level = props.get('volume', 1)
     fr = props.get('framerate', None)
     self.framerate = fr and gst.Fraction(fr[0], fr[1]) or None
     self._parse_aditional_properties(props)
     return self.get_pipeline_template(props)
Esempio n. 4
0
    def _onValueChangedCb(self, widget, dynamic, prop):
        value = dynamic.getWidgetValue()

        #FIXME Workaround in order to make aspectratiocrop working
        if isinstance(value, gst.Fraction):
            value = gst.Fraction(int(value.num), int(value.denom))

        if value != self._current_element_values.get(prop.name):
            self.action_log.begin("Effect property change")
            self._current_effect_setting_ui.element.set_property(
                prop.name, value)
            self.action_log.commit()
            self._current_element_values[prop.name] = value
Esempio n. 5
0
 def _parseText(self, text):
     match = self.fraction_regex.match(text)
     groups = match.groups()
     num = 1.0
     denom = 1.0
     if groups[0]:
         num = float(groups[0])
     if groups[2]:
         if groups[2] == "M":
             num = num * 1000
             denom = 1001
         elif groups[2][1:]:
             denom = float(groups[2][1:])
     return gst.Fraction(num, denom)
Esempio n. 6
0
    def __init__(self):
        gst.Bin.__init__(self)
        self.videoscale = gst.element_factory_make("videoscale",
                                                   "smart-videoscale")
        # set the scaling method to bilinear (cleaner)
        # FIXME : we should figure out if better methods are available in the
        # future, or ask the user which method he wants to use
        # FIXME : Instead of having the set_caps() method,
        # use proper caps negotiation
        self.videoscale.props.method = 1
        self.videobox = gst.element_factory_make("videobox", "smart-videobox")
        self.capsfilter = gst.element_factory_make("capsfilter",
                                                   "smart-capsfilter")
        self.add(self.videoscale, self.capsfilter, self.videobox)
        gst.element_link_many(self.videoscale, self.capsfilter, self.videobox)

        self._sinkPad = gst.GhostPad("sink", self.videoscale.get_pad("sink"))
        self._sinkPad.set_active(True)
        self._srcPad = gst.GhostPad("src", self.videobox.get_pad("src"))
        self._srcPad.set_active(True)

        self.add_pad(self._sinkPad)
        self.add_pad(self._srcPad)

        self._sinkPad.set_setcaps_function(self._sinkSetCaps)

        # input/output values
        self.capsin = None
        self.widthin = -1
        self.heightin = -1
        self.parin = gst.Fraction(1, 1)
        self.darin = gst.Fraction(1, 1)
        self.capsout = None
        self.widthout = -1
        self.heightout = -1
        self.parout = gst.Fraction(1, 1)
        self.darout = gst.Fraction(1, 1)
Esempio n. 7
0
	def create_pipeline(self):
		p = gst.Pipeline()

		width, height = self.framesize.split("x")
		width, height = int(width), int(height)
		if self.location:
			src = gst.element_factory_make("filesrc")
			src.props.location = self.location
			if self.format == "I420":
				bpp = 1.5
			elif self.format == "UYVY":
				bpp = 2
			src.props.blocksize = int(width * height * bpp)
		else:
			src = gst.element_factory_make("videotestsrc")
		src.props.num_buffers = self.num_buffers

		self.bitrates = self.bitrateseq.split(',')
		bitrate = self.bitrate
		enc = gst.element_factory_make(self.element, "encoder")
		enc.props.bitrate = bitrate
		enc.props.max_bitrate = self.max_bitrate

		if self.mode is not None:
			enc.props.mode = self.mode

		if self.intra_refresh is not None:
			enc.props.intra_refresh = self.intra_refresh

		s = gst.Structure("video/x-raw-yuv")
		s["format"] = gst.Fourcc(self.format)
		s["width"] = width
		s["height"] = height
		s["framerate"] = gst.Fraction(self.framerate, 1)

		capf_raw = gst.element_factory_make("capsfilter")
		capf_raw.props.caps = gst.Caps(s)

		ident = gst.element_factory_make("identity")
		videorate = gst.element_factory_make("videorate")
		sink = gst.element_factory_make("fakesink")

		p.add(src, capf_raw, videorate, enc, ident, sink)
		gst.element_link_many(src, capf_raw, videorate, enc, ident, sink)

		ident.connect("handoff", self.handoff)
		ident.set_property("signal-handoffs", True)

		return p
 def _reset(self):
     self.debug("resetting ourselves")
     self._outputrate = None
     self._srccaps = None
     # number of outputted buffers
     self._offset = 0
     self._segment = gst.Segment()
     self._segment.init(gst.FORMAT_TIME)
     self._needsegment = True
     self._bufferduration = 0
     self._outputrate = gst.Fraction(25, 1)
     # this is the buffer we store and repeatedly output
     self._buffer = None
     # this will be set by our task
     self.last_return = gst.FLOW_OK
Esempio n. 9
0
    def get_pipeline_string(self, properties):
        capsString = properties.get('format', 'video/x-raw-yuv')

        if capsString == 'video/x-raw-yuv':
            capsString = '%s,format=(fourcc)I420' % capsString

        # Filtered caps
        struct = gst.structure_from_string(capsString)
        for k in 'width', 'height':
            if k in properties:
                struct[k] = properties[k]

        if 'framerate' in properties:
            framerate = properties['framerate']
            struct['framerate'] = gst.Fraction(framerate[0], framerate[1])

        # always set par
        struct['pixel-aspect-ratio'] = gst.Fraction(1, 1)
        if 'pixel-aspect-ratio' in properties:
            par = properties['pixel-aspect-ratio']
            struct['pixel-aspect-ratio'] = gst.Fraction(par[0], par[1])

        # If RGB, set something ffmpegcolorspace can convert.
        if capsString == 'video/x-raw-rgb':
            struct['red_mask'] = 0xff00
        caps = gst.Caps(struct)

        is_live = 'is-live=true'

        overlay = ""
        overlayTimestamps = properties.get('overlay-timestamps', False)
        if overlayTimestamps:
            overlay = " timeoverlay ! "

        return "videotestsrc %s name=source ! " % is_live + overlay + \
            "identity name=identity silent=TRUE ! %s" % caps
Esempio n. 10
0
    def __init__(self, framerate=gst.Fraction(25, 1)):
        gst.Bin.__init__(self)
        self._framerate = framerate

        self._videorate = gst.element_factory_make("videorate")
        self._capsfilter = gst.element_factory_make("capsfilter")
        self.add(self._videorate, self._capsfilter)

        self._videorate.link(self._capsfilter)

        # Create source and sink pads
        self._sinkPad = gst.GhostPad('sink', self._videorate.get_pad('sink'))
        self._srcPad = gst.GhostPad('src', self._capsfilter.get_pad('src'))
        self.add_pad(self._sinkPad)
        self.add_pad(self._srcPad)

        self._setFramerate(framerate)
Esempio n. 11
0
    def testDurationCheckImage(self):
        self.discoverer.current_duration = gst.CLOCK_TIME_NONE
        pngdec = gst.element_factory_make('pngdec')
        self.discoverer.pipeline.add(pngdec)
        pad = pngdec.get_pad('src')
        caps = gst.Caps(pad.get_caps()[0])
        caps[0]['width'] = 320
        caps[0]['height'] = 240
        caps[0]['framerate'] = gst.Fraction(0, 1)
        pad.set_caps(caps)
        self.discoverer._newDecodedPadCb(None, pad, False)
        self.discoverer.addUri('illbepopped')
        self.discoverer._finishAnalysis("foo")

        self.failUnlessEqual(self.error, None)
        self.failUnlessEqual(self.discoverer.current_duration,
                             gst.CLOCK_TIME_NONE)
Esempio n. 12
0
    def __init__(self, instance, ui_manager):
        gtk.Table.__init__(self, rows=2, columns=1, homogeneous=False)
        Loggable.__init__(self)
        Zoomable.__init__(self)
        self.log("Creating Timeline")

        self._updateZoom = True
        self.project = None
        self.ui_manager = ui_manager
        self.app = instance
        self._temp_objects = None
        self._factories = None
        self._finish_drag = False
        self._position = 0
        self._state = gst.STATE_NULL
        self._createUI()
        self._prev_duration = 0
        self.rate = gst.Fraction(1, 1)
    def _sink_setcaps(self, unused_pad, caps):
        self.debug("caps %s" % caps.to_string())
        downcaps = self.srcpad.peer_get_caps().copy()
        self.debug("downcaps %s" % downcaps.to_string())

        # methodology
        # 1. We override any incoming framerate
        ccaps = gst.Caps(caps)
        for struct in ccaps:
            if struct.has_key("framerate"):
                try:
                    del struct["framerate"]
                except:
                    self.warning("Couldn't remove 'framerate' from %s" %
                                 struct.to_string())

        # 2. we do the intersection of our incoming stripped caps
        #    and the downstream caps
        intersect = ccaps.intersect(downcaps)
        if intersect.is_empty():
            self.warning("no negotiation possible !")
            return False

        # 3. for each candidate in the intersection, we try to set that
        #    candidate downstream
        for candidate in intersect:
            self.debug("Trying %s" % candidate.to_string())
            if self.srcpad.peer_accept_caps(candidate):
                self.debug("accepted ! %s" % candidate.to_string())
                # 4. When we have an accepted caps downstream, we store the negotiated
                #    framerate and return
                if not candidate.has_key("framerate") or \
                        not isinstance(candidate["framerate"], gst.Fraction):
                    candidate["framerate"] = gst.Fraction(25, 1)
                self._outputrate = candidate["framerate"]
                self._bufferduration = gst.SECOND * self._outputrate.denom / self._outputrate.num
                self._srccaps = candidate.copy()
                res = self.srcpad.set_caps(self._srccaps)
                return res

        # 5. If we can't find an accepted candidate, we return False
        return False
    def create_pipeline(self):
        p = gst.Pipeline()

        width, height = self.framesize.split("x")
        width, height = int(width), int(height)

        src = gst.element_factory_make("videotestsrc")
        src.props.num_buffers = self.num_buffers
        bitrate = self.bitrate
        scaler = gst.element_factory_make("videoscale")

        enc = gst.element_factory_make(self.element, "encoder")

        if self.mode is not None:
            enc.props.mode = self.mode

        if self.intra_refresh is not None:
            enc.props.intra_refresh = self.intra_refresh

        enc.props.bitrate = bitrate
        ident = gst.element_factory_make("identity")

        sink = gst.element_factory_make("fakesink")

        s = gst.Structure("video/x-raw-yuv")
        s["format"] = gst.Fourcc(self.format)
        s["width"] = width
        s["height"] = height
        s["framerate"] = gst.Fraction(self.framerate, 1)

        caps = gst.element_factory_make("capsfilter", "capsf")
        caps.props.caps = gst.Caps(s)

        p.add(src, scaler, caps, enc, ident, sink)
        gst.element_link_many(src, scaler, caps, enc, ident, sink)

        ident.connect("handoff", self.handoff)
        ident.set_property("signal-handoffs", True)
        return p
Esempio n. 15
0
    def testParAndDar(self):
        caps = gst.Caps('video/x-raw-int, width=320, height=240, '
                'pixel-aspect-ratio=2/1')
        stream = VideoStream(caps)
        self.failUnlessEqual(stream.par, gst.Fraction(2, 1))
        self.failUnlessEqual(stream.dar, gst.Fraction(640, 240))

        caps = gst.Caps('video/x-raw-int, width=320, height=240')
        stream = VideoStream(caps)
        self.failUnlessEqual(stream.par, gst.Fraction(1, 1))
        self.failUnlessEqual(stream.dar, gst.Fraction(320, 240))

        # no width and height, default to 4/3
        caps = gst.Caps('video/x-raw-int')
        stream = VideoStream(caps)
        self.failUnlessEqual(stream.par, gst.Fraction(1, 1))
        self.failUnlessEqual(stream.dar, gst.Fraction(4, 3))
Esempio n. 16
0
    def get_pipeline_string(self, properties):
        # setup the properties
        self.bus = None
        self.videowidth = properties.get('width', 240)
        self.videoheight = properties.get(
            'height', int(576 * self.videowidth/720.))
        self.videoframerate = properties.get('framerate', (25, 2))
        self.filelocation = properties.get('location')

        vstruct = gst.structure_from_string(
            "video/x-raw-yuv,width=%(width)d,height=%(height)d" %
            dict(width=self.videowidth, height=self.videoheight))
        vstruct['framerate'] = gst.Fraction(self.videoframerate[0],
                                            self.videoframerate[1])

        vcaps = gst.Caps(vstruct)

        self.run_discoverer()

        template = (
            'filesrc location=%(location)s'
            '       ! oggdemux name=demux'
            '    demux. ! queue ! theoradec name=theoradec'
            '       ! identity name=videolive single-segment=true silent=true'
            '       ! videorate name=videorate'
            '       ! videoscale'
            '       ! %(vcaps)s'
            '       ! identity name=vident sync=true silent=true'
            '       ! @feeder:video@'
            '    demux. ! queue ! vorbisdec name=vorbisdec'
            '       ! identity name=audiolive single-segment=true silent=true'
            '       ! audioconvert'
            '       ! audio/x-raw-int,width=16,depth=16,signed=(boolean)true'
            '       ! identity name=aident sync=true silent=true'
            '       ! @feeder:audio@'
            % dict(location=self.filelocation, vcaps=vcaps))

        return template
Esempio n. 17
0
    def create_pipeline(self):
        p = gst.Pipeline()

        width, height = self.framesize.split("x")
        width, height = int(width), int(height)

        if self.location:
            src = gst.element_factory_make("filesrc")
            src.props.location = self.location
            if self.format == "I420":
                bpp = 1.5
            elif self.format == "UYVY":
                bpp = 2
            src.props.blocksize = int(width * height * bpp)
        else:
            src = gst.element_factory_make("videotestsrc")
            src.props.num_buffers = self.num_buffers

        enc = gst.element_factory_make(self.element)
        enc.props.bitrate = self.bitrate

        sink = gst.element_factory_make("filesink")
        sink.props.location = self.tmp_filename

        s = gst.Structure("video/x-raw-yuv")
        s["format"] = gst.Fourcc(self.format)
        s["width"] = width
        s["height"] = height
        s["framerate"] = gst.Fraction(self.framerate, 1)

        capf = gst.element_factory_make("capsfilter")
        capf.props.caps = gst.Caps(s)
        p.add(src, capf, enc, sink)

        if not gst.element_link_many(src, capf, enc, sink):
            print " pipeline creation error !!"

        return p
Esempio n. 18
0
    def get_pipeline_string(self, props):
        if props.get('scaled-width', None) is not None:
            self.warnDeprecatedProperties(['scaled-width'])

        self.is_square = props.get('is-square', False)
        self.width = props.get('width', 0)
        self.height = props.get('height', 0)
        decoder = props.get('decoder', 'dvdec')
        if not self.is_square and not self.height:
            self.height = int(576 * self.width/720.) # assuming PAL
        self.add_borders = props.get('add-borders', True)
        guid = "guid=%s" % props.get('guid', 0)
        self.deintMode = props.get('deinterlace-mode', 'auto')
        self.deintMethod = props.get('deinterlace-method', 'ffmpeg')

        fr = props.get('framerate', None)
        if fr is not None:
            self.framerate = gst.Fraction(fr[0], fr[1])
        else:
            self.framerate = None

        # FIXME: might be nice to factor out dv1394src ! dvdec so we can
        # replace it with videotestsrc of the same size and PAR, so we can
        # unittest the pipeline
        # need a queue in case tcpserversink blocks somehow
        template = ('dv1394src %s'
                    '    ! tee name=t'
                    '    ! queue leaky=2 max-size-time=1000000000'
                    '    ! dvdemux name=demux'
                    '  demux. ! queue ! %s name=decoder'
                    '    ! @feeder:video@'
                    '  demux. ! queue ! audio/x-raw-int '
                    '    ! volume name=setvolume'
                    '    ! level name=volumelevel message=true '
                    '    ! @feeder:audio@'
                    '    t. ! queue ! @feeder:dv@' % (guid, decoder))

        return template
Esempio n. 19
0
    def __init__(self, framerate=gst.Fraction(25, 1)):
        gst.Bin.__init__(self)
        self._framerate = framerate

        self._videorate = gst.element_factory_make("videorate")
        self._capsfilter = gst.element_factory_make("capsfilter")
        self.add(self._videorate, self._capsfilter)

        self._videorate.link(self._capsfilter)

        # Set properties
        if gstreamer.element_has_property(self._videorate, 'skip-to-first'):
            self._videorate.set_property('skip-to-first', True)

        # Create source and sink pads
        self._sinkPad = gst.GhostPad('sink', self._videorate.get_pad('sink'))
        self._srcPad = gst.GhostPad('src', self._capsfilter.get_pad('src'))
        self.add_pad(self._sinkPad)
        self.add_pad(self._srcPad)

        self._sinkPad.set_event_function(self.eventfunc)

        self._setFramerate(framerate)
Esempio n. 20
0
    def _add_video_effects(self):
        # Add the effects to the component but don't plug them until we have a
        # valid video pad
        props = self.config['properties']
        is_square = props.get('is-square', False)
        add_borders = props.get('add-borders', False)
        width = props.get('width', None)
        height = props.get('height', None)
        fr = props.get('framerate', (25, 2))
        framerate = gst.Fraction(fr[0], fr[1])

        self.vr = videorate.Videorate('videorate', None, self.pipeline,
                                      framerate)
        self.addEffect(self.vr)
        #self.vr.effectBin.set_state(gst.STATE_PLAYING)
        self.debug("Videorate added")

        self.videoscaler = videoscale.Videoscale('videoscale', self, None,
                                                 self.pipeline, width, height,
                                                 is_square, add_borders)
        self.addEffect(self.videoscaler)
        #self.videoscaler.effectBin.set_state(gst.STATE_PLAYING)
        self.debug("Videoscaler  added")
Esempio n. 21
0
        if self.samples <= self.window.windowSize:
            return True  # Ignore these, our sliding window isn't very smart

        self.data[0].append(processing_time * float(self.framerate) * 100.0)
        self.data[1].append(self.window.average * float(self.framerate) *
                            100.0)
        self.data[2].append(self.window.max * float(self.framerate) * 100.0)
        print "This frame: %.2f: %.2f%%. Average: %.2f%%. Peak: %.2f%%" % (
            processing_time, processing_time * float(self.framerate) * 100.0,
            self.window.average * float(self.framerate) * 100.0,
            self.window.max * float(self.framerate) * 100.0)
        return True


if len(sys.argv) == 2:
    framerates = [(30, 1), (25, 1), (25, 2), (None, None)]
    sizes = [(800, 600), (400, 300), (None, None)]  # Other useful sizes here
    for framerate in framerates:
        for size in sizes:
            if framerate[1]:
                fr = gst.Fraction(framerate[0], framerate[1])
            else:
                fr = None
            infile = sys.argv[1]
            outfileTemplate = sys.argv[1] + ".%dx%d@%.2f.png"
            bench = TheoraBench(sys.argv[1], outfileTemplate, size[0], size[1],
                                fr)
            bench.run()
else:
    print "Usage: %s filename.ogg" % sys.argv[0]
Esempio n. 22
0
    def __init__(self, filename, max_interleave=1.0, timeout=3000):
        """
        filename: str; absolute path of the file to be discovered.
        max_interleave: int or float; the maximum frame interleave in seconds.
            The value must be greater than the input file frame interleave
            or the discoverer may not find out all input file's streams.
            The default value is 1 second and you shouldn't have to change it,
            changing it mean larger discovering time and bigger memory usage.
        timeout: int; duration in ms for the discovery to complete.
        """
        gobject.GObject.__init__(self)

        self.mimetype = None

        self.audiocaps = {}
        self.videocaps = {}

        self.videowidth = 0
        self.videoheight = 0
        self.videorate = gst.Fraction(0, 1)

        self.audiofloat = False
        self.audiorate = 0
        self.audiodepth = 0
        self.audiowidth = 0
        self.audiochannels = 0

        self.audiolength = 0L
        self.videolength = 0L
        self.audiobitrate = None
        self.videobitrate = None
        self.segmentlength = None

        self.is_video = False
        self.is_audio = False

        self.otherstreams = []

        self.finished = False
        self.tags = {}
        self._success = False
        self._nomorepads = False

        self._max_interleave = max_interleave

        if not os.path.isfile(filename):
            self.debug("File '%s' does not exist, finished" % filename)
            self.finished = True
            return

        # the initial elements of the pipeline
        self.src = gst.element_factory_make("filesrc")
        self.src.set_property("location", filename)
        self.src.set_property("blocksize", 1000000)
        self.dbin = gst.element_factory_make("decodebin2")
        self.add(self.src, self.dbin)
        self.src.link(self.dbin)
        self.typefind = self.dbin.get_by_name("typefind")

        # callbacks
        self.typefind.connect("have-type", self._have_type_cb)
        self.dbin.connect("new-decoded-pad", self._new_decoded_pad_cb)
        self.dbin.connect("no-more-pads", self._no_more_pads_cb)
        self.dbin.connect("unknown-type", self._unknown_type_cb)
Esempio n. 23
0
 def ts2frame(self, ts):
     """Converts timestamp to a frame number"""
     return long(round(gst.Fraction(ts, gst.SECOND) * self.fr))
Esempio n. 24
0
 def getDAR(self):
     return gst.Fraction(self.videowidth, self.videoheight) * self.videopar
Esempio n. 25
0
    def valueChanged(unused_widget, widget, target):
        target.set_text(str(widget.getWidgetValue()))

    widgets = (
        (PathWidget, "file:///home/", ()),
        (TextWidget, "banana", ()),
        (TextWidget, "words only", ("^([a-zA-Z]+\s*)+$", )),
        (TextWidget, "numbers only", ("^\d+$", ("12", "14"))),
        (NumericWidget, 42, (100, 1)),
        (ToggleWidget, True, ()),
        (ChoiceWidget, "banana", ((("banana", "banana"), ("apple", "apple"),
                                   ("pear", "pear")), )),
        (ColorWidget, 0x336699FF, (int, )),
        (FontWidget, "Sans 9", ()),
        (FractionWidget, "30M", (gst.FractionRange(gst.Fraction(1, 1),
                                                   gst.Fraction(30000,
                                                                1001)), )),
        (FractionWidget, gst.Fraction(25000, 1001), (
            gst.FractionRange(gst.Fraction(1, 1), gst.Fraction(30000, 1001)),
            (
                "25:1",
                gst.Fraction(30, 1),
                "30M",
            ),
        )),
    )

    W = gtk.Window()
    v = gtk.VBox()
    t = gtk.Table()
Esempio n. 26
0
def gst_buffer_from_ndarray(ndarray):
    '''
	Wrap a gst buffer object around the given ndarray object.
	The input must be a 2-D record array.  dtype should define the fields:
	 "red", "green", "blue" and optionally "alpha".  The type of the 
	fields must be uint8.
	'''
    width = ndarray.shape[1]
    height = ndarray.shape[0]
    print 'WIDTHHEIGHT', width, height

    dtype = ndarray.dtype

    ## must at least have 'red','green','blue' fields to be RGB image
    required_fields = ('red', 'green', 'blue')
    hasrgb = reduce(operator.and_,
                    map(dtype.fields.__contains__, required_fields))
    if not hasrgb:
        raise ValueError('array must have at least fields: %s' %
                         (required_fields, ))

    ## fields must all be uint8 (dtype.char == 'B')
    hasbytes = reduce(operator.and_,
                      [desc[0].char == 'B' for desc in dtype.fields.values()])
    if not hasbytes:
        raise ValueError('all fields in array records must be uint8')

    mimetype = 'video/x-raw-rgb'
    buffer_endianness = 'big'
    mask_type = '>i4'

    nfields = len(dtype.fields)
    channels = filter(dtype.fields.__contains__,
                      ('red', 'green', 'blue', 'alpha'))

    ## only going to deal with 8 bit channels
    bytespp = len(channels)
    bpp = 8 * bytespp
    rgb_depth = 24
    if buffer_endianness == 'big':
        offset = 4 - bytespp
    else:
        offset = 0

    # create channel masks for Caps
    masks = {}
    for channel in channels:
        mask = numpy.zeros(4, dtype=numpy.uint8)
        pos = dtype.fields[channel][1]
        mask[pos + offset] = 255
        mask = mask.view(mask_type)
        mask = int(mask[0])
        masks[channel + '_mask'] = mask

    ## make caps based on type of array
    if buffer_endianness == 'big':
        endianness = 4321
    elif buffer_endianness == 'little':
        endianness = 1234
    #caps = gst.caps_new_simple(mimetype, width=width, height=height, framerate=gst.Fraction(0), bpp=bpp, depth=rgb_depth, endianness=endianness, **masks)
    caps = gst.Caps(mimetype)
    cap = caps[0]
    cap['width'] = 182
    cap['height'] = 126
    cap['framerate'] = gst.Fraction(0)
    cap['bpp'] = bpp
    cap['depth'] = rgb_depth
    cap['endianness'] = endianness
    for key, value in masks.items():
        cap[key] = value

    print 'NEW BUFFER CAPS'
    print_caps(caps)
    print ''

    # make buffer and attach array data and new caps
    buf = gst.Buffer(ndarray.data)
    buf.set_caps(caps)

    return buf
Esempio n. 27
0
 def getWidgetValue(self):
     if self.last_valid:
         return self._parseText(self.last_valid)
     return gst.Fraction(1, 1)
Esempio n. 28
0
from pitivi.ui.common import\
    model,\
    frame_rates,\
    audio_rates,\
    audio_depths,\
    audio_channels,\
    get_combo_value,\
    set_combo_value

from pitivi.ui.preset import AudioPresetManager, DuplicatePresetNameException,\
    VideoPresetManager

# FIXME: are we sure the following tables correct?

pixel_aspect_ratios = model((str, object), (
    (_("Square"), gst.Fraction(1, 1)),
    (_("480p"), gst.Fraction(10, 11)),
    (_("480i"), gst.Fraction(8, 9)),
    (_("480p Wide"), gst.Fraction(40, 33)),
    (_("480i Wide"), gst.Fraction(32, 27)),
    (_("576p"), gst.Fraction(12, 11)),
    (_("576i"), gst.Fraction(16, 15)),
    (_("576p Wide"), gst.Fraction(16, 11)),
    (_("576i Wide"), gst.Fraction(64, 45)),
))

display_aspect_ratios = model((str, object), (
    (_("Standard (4:3)"), gst.Fraction(4, 3)),
    (_("DV (15:11)"), gst.Fraction(15, 11)),
    (_("DV Widescreen (16:9)"), gst.Fraction(16, 9)),
    (_("Cinema (1.37)"), gst.Fraction(11, 8)),
Esempio n. 29
0
    def setWidgetValue(self, value):
        width, height = value
        dar = gst.Fraction(width, height)

        self.dwidthWidget.setWidgetValue(width)
        self.dheightWidget.setWidgetValue(height)
Esempio n. 30
0
 def getSAR(self):
     width = int(self.width_spinbutton.get_value())
     height = int(self.height_spinbutton.get_value())
     return gst.Fraction(width, height)