def testConstructor(self): Gst.init(None) frac = Gst.Fraction(1, 2) self.assertEqual(frac.num, 1) self.assertEqual(frac.denom, 2) frac = Gst.Fraction(1) self.assertEqual(frac.num, 1) self.assertEqual(frac.denom, 1) self.assertRaises(TypeError, Gst.Fraction)
def test_widget_text(self): widget = FractionWidget() widget.setWidgetValue(Gst.Fraction(1000000, 1)) self.assertEqual(widget.text.get_text(), "1000000:1") widget.setWidgetValue(Gst.Fraction(7504120000000001, 4503600000000002)) self.assertEqual(widget.text.get_text(), "7504120000000001:4503600000000002")
def test_defaults(self): cfg = config.Config() self.assertIsInstance(cfg.audio_caps, Gst.Caps) self.assertEqual(cfg.audio_caps.get_size(), 1) struct = cfg.audio_caps.get_structure(0) self.assertEqual(struct.get_name(), "audio/x-raw") self.assertEqual(struct.get_value("format"), "S16LE") self.assertEqual(struct.get_value("channels"), 2) self.assertEqual(struct.get_value("layout"), "interleaved") self.assertEqual(struct.get_value("rate"), 48000) self.assertIsInstance(cfg.video_caps, Gst.Caps) self.assertEqual(cfg.video_caps.get_size(), 1) struct = cfg.video_caps.get_structure(0) self.assertEqual(struct.get_name(), "video/x-raw") self.assertEqual(struct.get_value("format"), "YUY2") self.assertEqual(struct.get_value("width"), 1920) self.assertEqual(struct.get_value("height"), 1080) self.assertEqual(struct.get_value("framerate"), Gst.Fraction(30, 1)) self.assertEqual(struct.get_value("pixel-aspect-ratio"), Gst.Fraction(1, 1)) self.assertEqual(struct.get_value("interlace-mode"), "progressive") self.assertEqual(cfg.control_addr, ("0.0.0.0", 0)) self.assertEqual(cfg.clock_addr, ("0.0.0.0", 0)) self.assertEqual(cfg.avsource_addr, ("0.0.0.0", 0)) self.assertEqual(cfg.avoutput_addr, ("0.0.0.0", 0)) self.assertEqual(sorted(cfg.composite_modes.keys()), [ "fullscreen", "picture-in-picture", "side-by-side-equal", "side-by-side-preview" ]) fullscreen = cfg.composite_modes["fullscreen"] self.assertEqual(fullscreen.a.xpos, 0) self.assertEqual(fullscreen.a.width, 1920) self.assertEqual(fullscreen.a.ypos, 0) self.assertEqual(fullscreen.a.height, 1080) self.assertEqual(fullscreen.a.alpha, 1.0) self.assertEqual(fullscreen.a.zorder, 1) self.assertEqual(fullscreen.b.xpos, 0) self.assertEqual(fullscreen.b.width, 0) self.assertEqual(fullscreen.b.ypos, 0) self.assertEqual(fullscreen.b.height, 0) self.assertEqual(fullscreen.b.alpha, 0.0) self.assertEqual(fullscreen.a.zorder, 1) pip = cfg.composite_modes["picture-in-picture"] self.assertEqual(pip.a.xpos, 0) self.assertEqual(pip.a.width, 1920) self.assertEqual(pip.a.xpos, 0) self.assertEqual(pip.a.height, 1080) self.assertEqual(pip.a.alpha, 1.0) self.assertEqual(pip.a.zorder, 1) self.assertEqual(pip.b.xpos, 1421) self.assertEqual(pip.b.width, 480) self.assertEqual(pip.b.ypos, 800) self.assertEqual(pip.b.height, 270) self.assertEqual(pip.b.alpha, 1.0) self.assertEqual(pip.b.zorder, 2)
def testVideo(self): project = common.create_project() project.videowidth = 1920 self.assertEqual(1920, project.videowidth) project.videoheight = 1080 self.assertEqual(1080, project.videoheight) project.videorate = Gst.Fraction(50, 7) self.assertEqual(Gst.Fraction(50, 7), project.videorate)
def run(self): """Set up widgets and run the dialog""" # TODO: in "onApplyButtonClicked", we only use the first stream... # If we have multiple audio or video streams, we should reflect that # in the UI, instead of acting as if there was only one. But that means # dynamically creating checkboxes and labels in a table and such. for stream in self.audio_streams: self.channels.set_text( get_value_from_model(audio_channels, stream.get_channels())) self.sample_rate.set_text( get_value_from_model(audio_rates, stream.get_sample_rate())) self.has_audio = True break for stream in self.video_streams: self.size_width.set_text(str(stream.get_width())) self.size_height.set_text(str(stream.get_height())) self.is_image = stream.is_image() if not self.is_image: # When gst returns a crazy framerate such as 0/1, that either # means it couldn't determine it, or it is a variable framerate framerate_num = stream.get_framerate_num() framerate_denom = stream.get_framerate_denom() if framerate_num != 0 and framerate_denom != 0: self.frame_rate.set_text( get_value_from_model( frame_rates, Gst.Fraction(framerate_num, framerate_denom))) else: foo = str(framerate_num) + "/" + str(framerate_denom) # Translators: a label showing an invalid framerate value self.frame_rate.set_text(_("invalid (%s fps)" % foo)) self.framerate_checkbutton.set_active(False) # For consistency, insensitize the checkbox AND value labels self.framerate_checkbutton.set_sensitive(False) self.frame_rate.set_sensitive(False) # Aspect ratio (probably?) doesn't need such a check: self.aspect_ratio.set_text( get_value_from_model( pixel_aspect_ratios, Gst.Fraction(stream.get_par_num(), stream.get_par_denom()))) self.has_video = True break if not self.has_video: self.frame1.hide() if not self.has_audio: self.frame2.hide() if self.is_image: self.hbox2.hide() self.hbox3.hide() self.video_header_label.set_markup("<b>" + _("Image:") + "</b>") self.dialog.connect("key-press-event", self._keyPressCb) self.dialog.run()
def testGetValue(self): Gst.init(None) st = Gst.Structure.new_empty("video/x-raw") st["range"] = R(Gst.Fraction(1, 30), Gst.Fraction(1, 2)) value = st["range"] self.failUnlessEqual(value.start, Gst.Fraction(1, 30)) self.failUnlessEqual(value.stop, Gst.Fraction(1, 2))
def testEqNe(self): Gst.init(None) frac = Gst.Fraction(1, 2) self.assertEqual(frac, frac) self.assertEqual(Gst.Fraction(1, 2), Gst.Fraction(1, 2)) self.assertEqual(Gst.Fraction(2, 4), Gst.Fraction(1, 2)) self.assertNotEqual(Gst.Fraction(1, 3), Gst.Fraction(1, 2)) self.assertNotEqual(Gst.Fraction(2, 1), Gst.Fraction(1, 2))
def testPropertyMarshalling(self): Gst.init(None) obj = Gst.ElementFactory.make("rawvideoparse") if not obj: obj = Gst.ElementFactory.make("rawvideoparse") if not obj: # no (raw)videoparse and I don't know of any elements in core or -base using # fraction properties. Skip this test. return value = obj.props.framerate self.failUnlessEqual(value.num, 25) self.failUnlessEqual(value.denom, 1) obj.props.framerate = Gst.Fraction(2, 1) value = obj.props.framerate self.failUnlessEqual(value.num, 2) self.failUnlessEqual(value.denom, 1) def bad(): obj.props.framerate = 1 self.failUnlessRaises(TypeError, bad) value = obj.props.framerate self.failUnlessEqual(value.num, 2) self.failUnlessEqual(value.denom, 1)
def _check_gst_python(): from gi.repository import Gst try: Gst.Fraction(9001, 1) # It's over NINE THOUSANDS! except TypeError: return False # What, nine thousands?! There's no way that can be right return True
def __init__(self, instance, hadj): Gtk.DrawingArea.__init__(self) Zoomable.__init__(self) Loggable.__init__(self) self.log("Creating new ScaleRuler") self.app = instance self._seeker = Seeker() self.hadj = hadj hadj.connect("value-changed", self._hadjValueChangedCb) self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.SCROLL_MASK) self.pixbuf = None # all values are in pixels self.pixbuf_offset = 0 self.pixbuf_offset_painted = 0 # This is the number of width we allocate for the pixbuf self.pixbuf_multiples = 4 self.position = 0 # In nanoseconds self.pressed = False self.min_frame_spacing = 5.0 self.frame_height = 5.0 self.frame_rate = Gst.Fraction(1 / 1) self.ns_per_frame = float(1 / self.frame_rate) * Gst.SECOND self.connect('draw', self.drawCb) self.connect('configure-event', self.configureEventCb) self.callback_id = None self.callback_id_scroll = None
def _addRenderPresetButtonClickedCb(self, unused_button): preset_name = self._getUniquePresetName(self.render_presets) self.render_presets.addPreset( preset_name, { "channels": int(get_combo_value(self.channels_combo)), "sample-rate": int(get_combo_value(self.sample_rate_combo)), "acodec": get_combo_value(self.audio_encoder_combo).get_name(), "vcodec": get_combo_value(self.video_encoder_combo).get_name(), "container": get_combo_value(self.muxercombobox).get_name(), "frame-rate": Gst.Fraction(int(get_combo_value(self.frame_rate_combo).num), int(get_combo_value( self.frame_rate_combo).denom)), "height": 0, "width": 0 }) self.render_presets.restorePreset(preset_name) self._updateRenderPresetButtons()
def __init__(self, timeline): Gtk.DrawingArea.__init__(self) Zoomable.__init__(self) Loggable.__init__(self) self.log("Creating new ScaleRuler") self.timeline = timeline self._pipeline = None hadj = timeline.timeline.hadj hadj.connect("value-changed", self._hadj_value_changed_cb) self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.SCROLL_MASK) self.pixbuf = None # all values are in pixels self.pixbuf_offset = 0 self.pixbuf_offset_painted = 0 self.position = 0 # In nanoseconds self.frame_rate = Gst.Fraction(1 / 1) self.ns_per_frame = float(1 / self.frame_rate) * Gst.SECOND self.scales = SCALES
def test_dependent_properties(self): """Checks dependent properties updating is handled correctly.""" mainloop = common.create_main_loop() app = common.create_pitivi() app.project_manager.new_blank_project() manager = EffectsPropertiesManager(app) called = False def set_child_property(prop_name, value): nonlocal called called = True self.assertEqual(prop_name, "aspect-ratio") GES.Effect.set_child_property(effect, prop_name, value) # When setting the aspect-ratio property, and the stars align, # the effect also changes the left/right properties. # Here we simulate the updating of the dependent properties. GES.Effect.set_child_property(effect, "left", 100) GES.Effect.set_child_property(effect, "right", 100) effect = GES.Effect.new("aspectratiocrop") effect.set_child_property = set_child_property effect_widget = manager.get_effect_configuration_ui(effect) widgets = {prop.name: widget for prop, widget in effect_widget.properties.items()} # Simulate the user choosing an aspect-ratio. widgets["aspect-ratio"].set_widget_value(Gst.Fraction(4, 3)) mainloop.run(until_empty=True) self.assertTrue(called)
def select_format(self, source): """Helper function that prompts the user to select a video format. Returns: Gst.Structure of format """ formats = self.list_formats(source) if not self.is_camera_640x480: fmt = formats[3] else: fmt = formats[4] print(fmt) frame_rates = self.get_frame_rate_list(fmt) rate = frame_rates[1] print(rate) # work around older GI implementations that lack proper Gst.Fraction/Gst.ValueList support if type(rate) == Gst.Fraction: fmt.set_value("framerate", rate) else: numerator, denominator = rate.split("/") fmt.set_value("framerate", Gst.Fraction(int(numerator), int(denominator))) # fmt is a Gst.Structure but Caps can only be generated from a string, # so a to_string conversion is needed return fmt
def __setup_videoscale_capsfilter(self): ''' Sets up the output format (width, height) for the video ''' caps_struct = Gst.Structure.new_empty('video/x-raw') caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1)) caps_struct.set_value('framerate', Gst.Fraction(30, 1)) video_info = self.data.get_video_streams()[0] if video_info.get_height() > video_info.get_width(): # portrait caps_struct.set_value('height', self.destination_dimensions[1]) else: # landscape caps_struct.set_value('width', self.destination_dimensions[0]) caps = Gst.Caps.new_empty() caps.append_structure(caps_struct) self.capsfilter.set_property('caps', caps)
def testGetValue(self): Gst.init(None) st = Gst.Structure.new_empty("video/x-raw") st["framerate"] = L([Gst.Fraction(1, 30), Gst.Fraction(1, 2)]) value = st["framerate"] self.failUnlessEqual(value[0], Gst.Fraction(1, 30)) self.failUnlessEqual(value[1], Gst.Fraction(1, 2)) st["matrix"] = L([L([0, 1]), L([-1, 0])]) value = st["matrix"] self.failUnlessEqual(value[0][0], 0) self.failUnlessEqual(value[0][1], 1) self.failUnlessEqual(value[1][0], -1) self.failUnlessEqual(value[1][1], 0)
def _deserializePreset(self, parser): width = parser["width"] height = parser["height"] framerate_num = parser["framerate-num"] framerate_denom = parser["framerate-denom"] framerate = Gst.Fraction(framerate_num, framerate_denom) par_num = parser["par-num"] par_denom = parser["par-denom"] par = Gst.Fraction(par_num, par_denom) return { "width": width, "height": height, "frame-rate": framerate, "par": par, }
def testConstructor(self): Gst.init(None) r = Gst.FractionRange(Gst.Fraction(1, 30), Gst.Fraction(1, 2)) self.assertEqual(r.start, Gst.Fraction(1, 30)) self.assertEqual(r.stop, Gst.Fraction(1, 2)) self.assertRaises(TypeError, Gst.FractionRange, Gst.Fraction(1, 2), Gst.Fraction(1, 30)) self.assertRaises(TypeError, Gst.FractionRange, 2, Gst.Fraction(1, 2)) self.assertRaises(TypeError, Gst.FractionRange, Gst.Fraction(1, 2), 2) self.assertRaises(TypeError, Gst.FractionRange)
def testInitialization(self): mainloop = common.create_main_loop() uris = collections.deque([ common.get_sample_uri("flat_colour1_640x480.png"), common.get_sample_uri("tears_of_steel.webm"), common.get_sample_uri("1sec_simpsons_trailer.mp4") ]) def loaded_cb(project, timeline): project.addUris([uris.popleft()]) def progress_cb(project, progress, estimated_time): if progress == 100: if uris: project.addUris([uris.popleft()]) else: mainloop.quit() # Create a blank project and add some assets. project = common.create_project() self.assertTrue(project._has_default_video_settings) self.assertTrue(project._has_default_audio_settings) project.connect_after("loaded", loaded_cb) project.connect_after("asset-loading-progress", progress_cb) mainloop.run() assets = project.list_assets(GES.UriClip) self.assertEqual(3, len(assets), assets) self.assertFalse(project._has_default_video_settings) self.assertFalse(project._has_default_audio_settings) # The audio settings should match tears_of_steel.webm self.assertEqual(1, project.audiochannels) self.assertEqual(44100, project.audiorate) # The video settings should match tears_of_steel.webm self.assertEqual(960, project.videowidth) self.assertEqual(400, project.videoheight) self.assertEqual(Gst.Fraction(24, 1), project.videorate) self.assertEqual(Gst.Fraction(1, 1), project.videopar)
def run(self): # TODO: in "onApplyButtonClicked", we only use the first stream... # If we have multiple audio or video streams, we should reflect that # in the UI, instead of acting as if there was only one. But that means # dynamically creating checkboxes and labels in a table and such. for stream in self.audio_streams: self.channels.set_text( get_value_from_model(audio_channels, stream.get_channels())) self.sample_rate.set_text( get_value_from_model(audio_rates, stream.get_sample_rate())) self.sample_depth.set_text( get_value_from_model(audio_depths, stream.get_depth())) self.has_audio = True break for stream in self.video_streams: self.size_width.set_text(str(stream.get_width())) self.size_height.set_text(str(stream.get_height())) self.is_image = stream.is_image() if not self.is_image: self.frame_rate.set_text( get_value_from_model( frame_rates, Gst.Fraction(stream.get_framerate_num(), stream.get_framerate_denom()))) self.aspect_ratio.set_text( get_value_from_model( pixel_aspect_ratios, Gst.Fraction(stream.get_par_num(), stream.get_par_denom()))) self.has_video = True break if not self.has_video: self.frame1.hide() if not self.has_audio: self.frame2.hide() if self.is_image: self.hbox2.hide() self.hbox3.hide() self.label2.set_markup("<b>" + _("Image:") + "</b>") self.dialog.run()
def setWidgetValue(self, value): if type(value) is str: value = self._parseText(value) elif not hasattr(value, "denom"): value = Gst.Fraction(value) if (value.denom / 1001) == 1: text = "%gM" % (value.num / 1000) else: text = "%g:%g" % (value.num, value.denom) self.text.set_text(text)
def translate(v, dtype): if dtype == 'fraction': return Gst.Fraction(*map(int, v.split('/'))) elif dtype == 'int': return int(v) elif dtype == 'float': return int(v) elif dtype == 'string': return v.strip() else: raise TypeError('Unsupported type: %s' % dtype)
def testGetValue(self): Gst.init(None) st = Gst.Structure.new_empty("video/x-raw") st["array"] = Gst.ValueArray([Gst.Fraction(1, 30), Gst.Fraction(1, 2)]) value = st["array"] st["array"] = Gst.ValueArray(value) self.assertEqual(value[0], Gst.Fraction(1, 30)) self.assertEqual(value[1], Gst.Fraction(1, 2)) st["matrix"] = Gst.ValueArray( [Gst.ValueArray([0, 1]), Gst.ValueArray([-1, 0])]) value = st["matrix"] self.assertEqual(value[0][0], 0) self.assertEqual(value[0][1], 1) self.assertEqual(value[1][0], -1) self.assertEqual(value[1][1], 0)
def createNoPreset(self, mgr): mgr.prependPreset(_("No preset"), { "channels": int(get_combo_value(self.channels_combo)), "sample-rate": int(get_combo_value(self.sample_rate_combo)), "acodec": get_combo_value(self.audio_encoder_combo).get_name(), "vcodec": get_combo_value(self.video_encoder_combo).get_name(), "container": get_combo_value(self.muxercombobox).get_name(), "frame-rate": Gst.Fraction( int(get_combo_value(self.frame_rate_combo).num), int(get_combo_value(self.frame_rate_combo).denom)), "height": self.project.videoheight, "width": self.project.videowidth})
def _apply(self): """Applies the widgets values to the project.""" project = self.project if self.has_video: # This also handles the case where the video is a still image video = self.video_streams[0] if self.size_checkbutton.get_active(): project.videowidth = video.get_width() project.videoheight = video.get_height() if self.framerate_checkbutton.get_active() and not self.is_image: project.videorate = Gst.Fraction(video.get_framerate_num(), video.get_framerate_denom()) if self.PAR_checkbutton.get_active() and not self.is_image: project.videopar = Gst.Fraction(video.get_par_num(), video.get_par_denom()) if self.has_audio: audio = self.audio_streams[0] if self.channels_checkbutton.get_active(): project.audiochannels = audio.get_channels() if self.samplerate_checkbutton.get_active(): project.audiorate = audio.get_sample_rate()
def _loadSection(self, filepath): parser = json.loads(open(filepath).read()) name = parser["name"] width = parser["width"] height = parser["height"] framerate_num = parser["framerate-num"] framerate_denom = parser["framerate-denom"] framerate = Gst.Fraction(framerate_num, framerate_denom) par_num = parser["par-num"] par_denom = parser["par-denom"] par = Gst.Fraction(par_num, par_denom) self.addPreset(name, { "width": width, "height": height, "frame-rate": framerate, "par": par, "filepath": filepath, })
def _onValueChangedCb(self, unused_widget, effect_widget, prop, effect): value = effect_widget.getWidgetValue() # FIXME Workaround in order to make aspectratiocrop working if isinstance(value, Gst.Fraction): value = Gst.Fraction(int(value.num), int(value.denom)) from pitivi.undo.timeline import CommitTimelineFinalizingAction pipeline = self.app.project_manager.current_project.pipeline with self.app.action_log.started("Effect property change", finalizing_action=CommitTimelineFinalizingAction(pipeline), toplevel=True): effect.set_child_property(prop.name, value)
def __init__(self): self.mainloop = GLib.MainLoop() self.pipeline = Gst.Pipeline() self.clock = self.pipeline.get_pipeline_clock() self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.connect('message::error', self.on_error) rates = [ ['low', 'video/x-raw, width=640, height=360', 500, 3], ['med', 'video/x-raw, width=1280, height=720', 1500, 3], ['high', 'video/x-raw, width=1920, height=1080', 5000, 4] ] # Video input # filesrc location=result.mp4 ! decodebin2 ! ffmpegcolorspace ! video/x-raw-rgb ! avimux ! self.malm([ ['multifilesrc', None, {'location': './jellyfish-25-mbps-hd-hevc.mpg', 'loop':'true'}], ['videoparse', None, {'width':1920,'height':1080, 'framerate':Gst.Fraction(30, 1)}], #['videotestsrc', None, {}], ['capsfilter', None, {'caps': 'video/x-raw, width=1920, height=1080'}], ['videoconvert', None, {}], ['deinterlace', None, {}], ['videorate', None, {}], ['capsfilter', None, {'caps': 'video/x-raw, framerate=30000/1001' }], ['tee', 'vinput', {}] ]) # Create each encoder, muxer, and rtmpsink. for rate in rates: self.malm([ ['queue', 'v{}'.format(rate[0]), {'max-size-bytes': 104857600}], ['videoscale', None, {}], ['capsfilter', None, {'caps': rate[1]}], ['x264enc', None, { 'speed-preset': settings.speed_preset, 'tune': 'zerolatency', 'bitrate': rate[2], 'threads': rate[3], 'option-string': 'scenecut=0' }], ['capsfilter', None, {'caps': 'video/x-h264, profile=baseline'}], ['h264parse', None, {}], ['flvmux', 'm{}'.format(rate[0]), {'streamable': True}], ['rtmpsink', None, {'location': settings.stream_location + rate[0]}] ]) self.vinput.link(getattr(self, 'v{}'.format(rate[0])))
def __init__(self, timeline, hadj): Gtk.DrawingArea.__init__(self) Zoomable.__init__(self) Loggable.__init__(self) self.log("Creating new ScaleRuler") # Allows stealing focus from other GTK widgets, prevent accidents: self.props.can_focus = True self.connect("focus-in-event", self._focusInCb) self.connect("focus-out-event", self._focusOutCb) self.timeline = timeline self._background_color = timeline.get_style_context().lookup_color( 'theme_bg_color')[1] self._seeker = Seeker() self.hadj = hadj hadj.connect("value-changed", self._hadjValueChangedCb) self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.SCROLL_MASK) self.pixbuf = None # all values are in pixels self.pixbuf_offset = 0 self.pixbuf_offset_painted = 0 # This is the number of width we allocate for the pixbuf self.pixbuf_multiples = 4 self.position = 0 # In nanoseconds self.pressed = False self.frame_rate = Gst.Fraction(1 / 1) self.ns_per_frame = float(1 / self.frame_rate) * Gst.SECOND self.connect('draw', self.drawCb) self.connect('configure-event', self.configureEventCb) self.callback_id = None self.callback_id_scroll = None self.set_size_request(0, HEIGHT) style = self.get_style_context() color_normal = style.get_color(Gtk.StateFlags.NORMAL) color_insensitive = style.get_color(Gtk.StateFlags.INSENSITIVE) self._color_normal = color_normal self._color_dimmed = Gdk.RGBA( *[(x * 3 + y * 2) / 5 for x, y in ((color_normal.red, color_insensitive.red), (color_normal.green, color_insensitive.green), (color_normal.blue, color_insensitive.blue))]) self.scales = SCALES
def format_framerate_value(framerate): """Formats the framerate or returns 0 if unable to determine it.""" if isinstance(framerate, DiscovererVideoInfo): num = framerate.get_framerate_num() denom = framerate.get_framerate_denom() framerate = Gst.Fraction(num, denom) if framerate.denom == 0: return "0" value = framerate.num / framerate.denom # Keep maximum 3 decimals. value = value * 1000 // 1 / 1000 return "{0:n}".format(value)