Example #1
0
    def run(self):
        """Set up widgets and run the dialog"""
        # TODO: in "onApplyButtonClicked", we only use the first stream...
        # If we have multiple audio or video streams, we should reflect that
        # in the UI, instead of acting as if there was only one. But that means
        # dynamically creating checkboxes and labels in a table and such.
        for stream in self.audio_streams:
            self.channels.set_text(
                get_value_from_model(audio_channels, stream.get_channels()))
            self.sample_rate.set_text(
                get_value_from_model(audio_rates, stream.get_sample_rate()))
            self.has_audio = True
            break

        for stream in self.video_streams:
            self.size_width.set_text(str(stream.get_width()))
            self.size_height.set_text(str(stream.get_height()))
            self.is_image = stream.is_image()
            if not self.is_image:
                # When gst returns a crazy framerate such as 0/1, that either
                # means it couldn't determine it, or it is a variable framerate
                framerate_num = stream.get_framerate_num()
                framerate_denom = stream.get_framerate_denom()
                if framerate_num != 0 and framerate_denom != 0:
                    self.frame_rate.set_text(
                        get_value_from_model(
                            frame_rates,
                            Gst.Fraction(framerate_num, framerate_denom)))
                else:
                    foo = str(framerate_num) + "/" + str(framerate_denom)
                    # Translators: a label showing an invalid framerate value
                    self.frame_rate.set_text(_("invalid (%s fps)" % foo))
                    self.framerate_checkbutton.set_active(False)
                    # For consistency, insensitize the checkbox AND value labels
                    self.framerate_checkbutton.set_sensitive(False)
                    self.frame_rate.set_sensitive(False)

                # Aspect ratio (probably?) doesn't need such a check:
                self.aspect_ratio.set_text(
                    get_value_from_model(
                        pixel_aspect_ratios,
                        Gst.Fraction(stream.get_par_num(),
                                     stream.get_par_denom())))

            self.has_video = True
            break

        if not self.has_video:
            self.frame1.hide()
        if not self.has_audio:
            self.frame2.hide()
        if self.is_image:
            self.hbox2.hide()
            self.hbox3.hide()
            self.video_header_label.set_markup("<b>" + _("Image:") + "</b>")

        self.dialog.connect("key-press-event", self._keyPressCb)
        self.dialog.run()
Example #2
0
    def run(self):
        """Set up widgets and run the dialog"""
        # TODO: in "onApplyButtonClicked", we only use the first stream...
        # If we have multiple audio or video streams, we should reflect that
        # in the UI, instead of acting as if there was only one. But that means
        # dynamically creating checkboxes and labels in a table and such.
        for stream in self.audio_streams:
            self.channels.set_text(
                get_value_from_model(audio_channels, stream.get_channels()))
            self.sample_rate.set_text(
                get_value_from_model(audio_rates, stream.get_sample_rate()))
            self.has_audio = True
            break

        for stream in self.video_streams:
            self.size_width.set_text(str(stream.get_width()))
            self.size_height.set_text(str(stream.get_height()))
            self.is_image = stream.is_image()
            if not self.is_image:
                # When gst returns a crazy framerate such as 0/1, that either
                # means it couldn't determine it, or it is a variable framerate
                framerate_num = stream.get_framerate_num()
                framerate_denom = stream.get_framerate_denom()
                if framerate_num != 0 and framerate_denom != 0:
                    self.frame_rate.set_text(
                        get_value_from_model(frame_rates,
                            Gst.Fraction(framerate_num, framerate_denom)
                        ))
                else:
                    foo = str(framerate_num) + "/" + str(framerate_denom)
                    # Translators: a label showing an invalid framerate value
                    self.frame_rate.set_text(_("invalid (%s fps)" % foo))
                    self.framerate_checkbutton.set_active(False)
                    # For consistency, insensitize the checkbox AND value labels
                    self.framerate_checkbutton.set_sensitive(False)
                    self.frame_rate.set_sensitive(False)

                # Aspect ratio (probably?) doesn't need such a check:
                self.aspect_ratio.set_text(
                    get_value_from_model(pixel_aspect_ratios, Gst.Fraction(
                        stream.get_par_num(),
                        stream.get_par_denom())))

            self.has_video = True
            break

        if not self.has_video:
            self.frame1.hide()
        if not self.has_audio:
            self.frame2.hide()
        if self.is_image:
            self.hbox2.hide()
            self.hbox3.hide()
            self.video_header_label.set_markup("<b>" + _("Image:") + "</b>")

        self.dialog.connect("key-press-event", self._keyPressCb)
        self.dialog.run()
Example #3
0
    def run(self):
        # TODO: in "onApplyButtonClicked", we only use the first stream...
        # If we have multiple audio or video streams, we should reflect that
        # in the UI, instead of acting as if there was only one. But that means
        # dynamically creating checkboxes and labels in a table and such.
        for stream in self.audio_streams:
            self.channels.set_text(
                get_value_from_model(audio_channels, stream.get_channels()))
            self.sample_rate.set_text(
                get_value_from_model(audio_rates, stream.get_sample_rate()))
            self.sample_depth.set_text(
                get_value_from_model(audio_depths, stream.get_depth()))
            self.has_audio = True
            break

        for stream in self.video_streams:
            self.size_width.set_text(str(stream.get_width()))
            self.size_height.set_text(str(stream.get_height()))
            self.is_image = stream.is_image()
            if not self.is_image:
                self.frame_rate.set_text(
                    get_value_from_model(
                        frame_rates,
                        Gst.Fraction(stream.get_framerate_num(),
                                     stream.get_framerate_denom())))
                self.aspect_ratio.set_text(
                    get_value_from_model(
                        pixel_aspect_ratios,
                        Gst.Fraction(stream.get_par_num(),
                                     stream.get_par_denom())))
            self.has_video = True
            break

        if not self.has_video:
            self.frame1.hide()
        if not self.has_audio:
            self.frame2.hide()
        if self.is_image:
            self.hbox2.hide()
            self.hbox3.hide()
            self.label2.set_markup("<b>" + _("Image:") + "</b>")
        self.dialog.run()
Example #4
0
    def run(self):
        """Set up widgets and run the dialog"""
        # TODO: in "onApplyButtonClicked", we only use the first stream...
        # If we have multiple audio or video streams, we should reflect that
        # in the UI, instead of acting as if there was only one. But that means
        # dynamically creating checkboxes and labels in a table and such.
        for stream in self.audio_streams:
            self.channels.set_text(
                get_value_from_model(audio_channels, stream.get_channels()))
            self.sample_rate.set_text(
                get_value_from_model(audio_rates, stream.get_sample_rate()))
            self.has_audio = True
            break

        for stream in self.video_streams:
            self.size_width.set_text(str(stream.get_width()))
            self.size_height.set_text(str(stream.get_height()))
            self.is_image = stream.is_image()
            if not self.is_image:
                self.frame_rate.set_text(
                    get_value_from_model(frame_rates, Gst.Fraction(
                        stream.get_framerate_num(),
                        stream.get_framerate_denom())))
                self.aspect_ratio.set_text(
                    get_value_from_model(pixel_aspect_ratios, Gst.Fraction(
                        stream.get_par_num(),
                        stream.get_par_denom())))
            self.has_video = True
            break

        if not self.has_video:
            self.frame1.hide()
        if not self.has_audio:
            self.frame2.hide()
        if self.is_image:
            self.hbox2.hide()
            self.hbox3.hide()
            self.label2.set_markup("<b>" + _("Image:") + "</b>")

        self.dialog.connect("key-press-event", self._keyPressCb)
        self.dialog.run()