def test_columns(self): self.assertEqual( self.store.get_n_columns(), len(self.store._class_type.Meta.get_column_properties())) self.assertEqual(self.store.get_column_type(self.store.c_name), gobject.type_from_name("gchararray")) self.assertEqual(self.store.get_column_type(self.store.c_number), gobject.type_from_name("gdouble")) self.assertEqual(self.store.get_column_type(self.store.c_test), gobject.type_from_name("PyObject"))
def create_collist_from_node(self, node, storelist): collist = [] for i in range(len(storelist)): collist.append(i) value = node.getAttribute('c' + str(i)) if storelist[i] == gobject.TYPE_BOOLEAN: if value == 'true': collist.append(True) else: collist.append(False) elif storelist[i] == gobject.TYPE_UINT: if value: collist.append(int(value)) else: collist.append(0) elif storelist[i] == gobject.TYPE_STRING: if value: collist.append(value) else: collist.append('') elif storelist[i] == gobject.type_from_name('GdkPixbuf'): #@UndefinedVariable if type(value) == types.StringType or type(value) == types.UnicodeType: pixbuf = self.get_pixbuf_map(value) else: pixbuf = value collist.append(pixbuf) elif storelist[i][:5] == 'i18n.': collist.append(_(node.getAttribute('c' + storelist[i][5:]))) return collist
def get_type(self, x): for t in ("GParamFloat", "GParamDouble"): if gobject.type_is_a(x, gobject.type_from_name(t)): return "float" for t in ("GParamInt", "GParamInt64", "GParamUInt", "GParamUInt64", "GParamLong", "GParamULong"): if gobject.type_is_a(x, gobject.type_from_name(t)): return "integer" for t in ("GParamString", "GParamChar", "GParamUChar"): if gobject.type_is_a(x, gobject.type_from_name(t)): return "string" if gobject.type_is_a(x, gobject.type_from_name("GParamEnum")): return "enum" if gobject.type_is_a(x, gobject.type_from_name("GParamBoolean")): return "boolean" return "unknown"
def setup_node_from_iter(self, node, model, iter, storelist): for i in range(len(storelist)): if storelist[i] == gobject.type_from_name('GdkPixbuf'): #@UndefinedVariable node.setAttribute('c' + str(i), self.pixbuf_revmap[id(model.get_value(iter, i))]) elif type(storelist[i]) is not types.UnicodeType: # XXX use pixbuf_revmap to revert ti node.setAttribute('c' + str(i), str(model.get_value(iter, i)))
def _override_property_internal(self, storage, full_name, klass): if not issubclass(klass, PropType): raise TypeError("klass needs to be a subclass of PropType") type_name, prop_name = full_name.split('::') pspec = storage.get_pspec(type_name, prop_name) # If we can't find the property in the GType, it's a custom type if not storage.has_property(type_name, prop_name): storage.add_custom_type(klass, type_name, prop_name, pspec) else: raise TypeError("%s exists, use override_simple" % full_name) # Note, that at this point we need to setup the overriden class # since we're not going to call the type constructor, so # this needs to be in sync with PropMeta.new klass.name = prop_name klass.owner_name = type_name klass.owner_type = gobject.type_from_name(type_name) if not issubclass(klass.custom_editor, PropertyCustomEditor): raise TypeError( "custom_editor %r for property class %r needs to be a " "subclass of PropertyCustomEditor " % (klass.custom_editor, klass))
def get_by_name_closest(self, type_name): """Return widget_adaptor for type_name or closest ancestor""" gtype = gobject.type_from_name(type_name) while True: adapter = self._widget_adaptors.get(gobject.type_name(gtype)) if adapter is not None: return adapter gtype = gobject.type_parent(gtype)
def OnDynamicPad(self, uridecodebin, src_pad): origin = src_pad.get_caps() if (self.container == False): a = origin.to_string() if a.startswith("audio/"): sinkpad = self.encodebin.get_static_pad("audio_0") src_pad.link(sinkpad) else: if self.videocaps == "novid": c = origin.to_string() if c.startswith("audio/"): sinkpad = self.encodebin.emit("request-pad", origin) d = sinkpad.get_caps().to_string() if d.startswith("audio/"): src_pad.link(sinkpad) else: # Checking if its a subtitle pad which we can't deal with # currently.0 # Making sure that when we remove video from a file we don't # bother with the video pad. c = origin.to_string() if not c.startswith("text/"): if not (c.startswith("video/") and (self.videocaps == False)): # print "creating sinkpad" sinkpad = self.encodebin.emit("request-pad", origin) if c.startswith("audio/"): src_pad.link(sinkpad) elif ((c.startswith("video/") or c.startswith("image/")) and (self.videocaps != False)): if self.videopasstoggle == False: src_pad.link(self.deinterlacer.get_static_pad("sink")) self.videoflipper.get_static_pad("src").link(sinkpad) else: srccaps = src_pad.get_caps() srcstring = srccaps.to_string() #print "source pad is " + str(srcstring) sinkcaps = sinkpad.get_caps() sinkstring = sinkcaps.to_string() #print "sinkpad is " + str(sinkstring) src_pad.link(sinkpad) # Grab element from encodebin which supports tagsetter interface and set app name # to Transmageddon GstTagSetterType = gobject.type_from_name("GstTagSetter") tag_setting_element = self.encodebin.get_by_interface(GstTagSetterType) if tag_setting_element != None: taglist = gst.TagList() taglist[ gst. TAG_ENCODER] = "Transmageddon encoder" # this should probably be set to # string combining audio+video encoder # implementations taglist[gst.TAG_APPLICATION_NAME] = "Transmageddon transcoder" tag_setting_element.merge_tags(taglist, gst.TAG_MERGE_APPEND)
def xgc_get_gobject_type(str): if str == 'boolean': return gobject.TYPE_BOOLEAN elif str == 'uint': return gobject.TYPE_UINT elif str == 'string': return gobject.TYPE_STRING elif str == 'pixmap': return gobject.type_from_name('GdkPixbuf') #@UndefinedVariable else: return str
def is_interface(self, full_class_name): class_name, class_type, class_pak = \ self.describe_full_class_name( full_class_name ) try: type_name = gobject.type_name( class_type ) type_info = gobject.type_from_name( type_name ) except: return False return type_info.is_interface()
def OnDynamicPad(self, uridecodebin, src_pad): origin = src_pad.get_caps() if (self.container==False): a = origin.to_string() if a.startswith("audio/"): sinkpad = self.encodebin.get_static_pad("audio_0") src_pad.link(sinkpad) else: if self.videocaps == "novid": c = origin.to_string() if c.startswith("audio/"): sinkpad = self.encodebin.emit("request-pad", origin) d = sinkpad.get_caps().to_string() if d.startswith("audio/"): src_pad.link(sinkpad) else: # Checking if its a subtitle pad which we can't deal with # currently.0 # Making sure that when we remove video from a file we don't # bother with the video pad. c = origin.to_string() if not c.startswith("text/"): if not (c.startswith("video/") and (self.videocaps == False)): # print "creating sinkpad" sinkpad = self.encodebin.emit("request-pad", origin) if c.startswith("audio/"): src_pad.link(sinkpad) elif ((c.startswith("video/") or c.startswith("image/")) and (self.videocaps != False)): if self.videopasstoggle==False: src_pad.link(self.deinterlacer.get_static_pad("sink")) self.videoflipper.get_static_pad("src").link(sinkpad) else: srccaps=src_pad.get_caps() srcstring=srccaps.to_string() #print "source pad is " + str(srcstring) sinkcaps=sinkpad.get_caps() sinkstring=sinkcaps.to_string() #print "sinkpad is " + str(sinkstring) src_pad.link(sinkpad) # Grab element from encodebin which supports tagsetter interface and set app name # to Transmageddon GstTagSetterType = gobject.type_from_name("GstTagSetter") tag_setting_element=self.encodebin.get_by_interface(GstTagSetterType) if tag_setting_element != None: taglist=gst.TagList() taglist[gst.TAG_ENCODER] = "Transmageddon encoder" # this should probably be set to # string combining audio+video encoder # implementations taglist[gst.TAG_APPLICATION_NAME] = "Transmageddon transcoder" tag_setting_element.merge_tags(taglist, gst.TAG_MERGE_APPEND)
def _parse_property_enum(self, pspec, prop): if prop.kind != TYPE_IDENTIFIER: raise Exception("Invalid enum property value: %r" % ( prop.value, )) value = prop.value if '.' in value: enum, value = value.split(".", 1) enum_type = GObject.type_from_name(enum) else: enum_type = pspec.value_type for e in enum_type.pytype.__enum_values__.values(): if e.value_nick == value: return e raise Exception(value)
def testConstruct(self): objs = [("kiwi+ui+widgets+list+List", "w1"), ("kiwi+ui+widgets+combobox+ComboBox", "w3"), ("kiwi+ui+widgets+combobox+ComboBoxEntry", "w5") ] if HAVE_2_8: objs.extend([("ObjectList", "w2"), ("ProxyComboBox", "w4"), ("ProxyComboBoxEntry", "w6") ]) s = '' for obj, name in objs: s += '<widget class="%s" id="%s"/>\n' % (obj, name) ob = disabledeprecationcall(ObjectBuilder, buffer=glade(s)) for obj, name in objs: widget = ob.get_widget(name) self.failUnless(isinstance(widget, gtk.Widget)) gtype = gobject.type_from_name(obj) self.failUnless(gobject.type_is_a(gtype, gtk.Widget)) self.failUnless(gobject.type_is_a(gtype, widget))
class Preview(gst.Bin): __gsignals__ = { "prepare-xwindow-id" : ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.type_from_name("GstElement"),) ) } def __init__(self, sltv): gobject.GObject.__init__(self) sltv.connect("sync-message", self.on_sync_message) self.sink = gst.element_factory_make("autovideosink", "sink") self.add(self.sink) self.colorspace = gst.element_factory_make( "ffmpegcolorspace", "colorspace" ) self.add(self.colorspace) self.videoscale = gst.element_factory_make("videoscale") self.add(self.videoscale) gst.element_link_many(self.colorspace, self.videoscale, self.sink) sink_pad = gst.GhostPad( "sink_ghost_pad", self.colorspace.sink_pads().next() ) self.add_pad(sink_pad) def on_sync_message(self, sltv, bus, message): if message.structure is None: return message_name = message.structure.get_name() if message_name == "prepare-xwindow-id": previewsink = message.src if previewsink.get_parent() == self.sink: previewsink.set_property("sync", False) previewsink.set_property("force-aspect-ratio", True) self.emit("prepare-xwindow-id", previewsink)
def __getitem__(self, name): if name in self.types: return self.types[name] else: return gobject.type_from_name(name)
# Insert a separator only if menu already had children if add_separator and len(menu.get_children()): sep = gtk.SeparatorMenuItem() sep.set_visible(True) menu_items.append(sep) menu.prepend(sep) # Do this reversed because we are prepending for action in reversed(actions): action.set_accel_group(uimanager.get_accel_group()) menu_item = action.create_menu_item() # Toolmenus doesn't use the trailing '...' menu pattern menu_item.set_label(menu_item.get_label().replace('...', '')) menu_items.append(menu_item) if position is not None: menu.insert(menu_item, position) else: menu.prepend(menu_item) return menu_items gobject.type_register(ToolMenuAction) # FIXME: This is at least present in PyGTK 2.22 MenuToolButton = getattr(gtk, 'MenuToolButton', None) if MenuToolButton is None: MenuToolButton = gobject.type_from_name('GtkMenuToolButton').pytype ToolMenuAction.set_tool_item_type(MenuToolButton)
import logging import gc import sys import ctypes import gobject logging.basicConfig(level = logging.DEBUG) # .15 is 7.25+ with the new vips8 API libvips = ctypes.CDLL('libvips.so.15') libvips.vips_init(sys.argv[0]) # should be able to find vipsimage, hopefully print gobject.type_from_name('VipsImage') _VipsImage = gobject.type_from_name('VipsImage') class VipsImage(_VipsImage): def __new__(cls): gobject.type_register(cls) return gobject.GObject.__new__(cls) def __init__(self, filename = None, mode = None): logging.debug('vipsimage: init') if filename != None: self.props.filename = filename if mode != None:
class Sltv(gobject.GObject): __gsignals__ = { "stopped": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "playing": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "preplay": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "error": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,) ), "sync-message": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.type_from_name("GstBus"), gobject.type_from_name("GstMessage")) ), "pipeline-ready": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()) } def __init__(self): gobject.GObject.__init__(self) self.player = None self.preview_enabled = False self.preview = None self.thumbnails = {} self.outputs = medialist.MediaList("Outputs", REGISTRY_OUTPUT) self.outputs.load() self.sources = medialist.MediaList("Sources", REGISTRY_INPUT) self.sources.load() self.audioconvs = medialist.MediaList("AudioConverters", REGISTRY_AUDIO) self.audioconvs.load() self.encoders = medialist.MediaList("Encoders", REGISTRY_ENCODING) self.encoders.load() self.videoconverters = medialist.MediaList( "VideoConverters", REGISTRY_VIDEO_CONVERTER ) self.videoconverters.load() self.audio = Audio() self.effect_enabled = False self.effect = {} self.effect_name = {MEDIA_VIDEO: "identity", MEDIA_AUDIO: "identity"} self.video_source = None self.pip_source = None self.pip_position = None self.audio_source = None self.overlay_text = None self.overlay_font = "Sans Bold 14" self.valign = "baseline" self.halign = "center" self.volume = None self.volume_value = None self.pending_state = None self.watermark_location = None self.watermark_resize = False self.watermark_size = 1.0 self.watermark_alpha = None self.watermark_selected = 0 self.videobalance_contrast = None self.videobalance_brightness = None self.videobalance_hue = None self.videobalance_saturation = None self.input_type = 0 self.output_bins = None def set_halign(self, halign): self.halign = halign if self.playing(): self.overlay.set_property("halign", halign) def set_valign(self, valign): self.valign = valign if self.playing(): self.overlay.set_property("valign", valign) def set_overlay_font(self, overlay_font): self.overlay_font = overlay_font if self.playing(): self.overlay.set_property("font-desc", overlay_font) def set_metadata(self, taglist): self.taglist = taglist def get_thumbnail(self, name): return self.thumbnails[name] def set_overlay_text(self, overlay_text): self.overlay_text = overlay_text if self.playing(): self.overlay.set_property("text", overlay_text) def set_watermark_location(self, location): self.watermark_location = location if self.playing(): self.watermark.set_property("location", location) def set_watermark_resize(self, enabled): self.watermark_resize = enabled def set_watermark_size(self, size): self.watermark_size = size def set_watermark_alpha(self, alpha): self.watermark_alpha = alpha if self.playing(): self.watermark.set_property("image-alpha", alpha) def _set_watermark(self, video_width, video_height): if self.watermark_location: self.watermark.set_property("location", self.watermark_location) if self.watermark_alpha: self.watermark.set_property("image-alpha", self.watermark_alpha) if self.watermark_resize: wm_width = self.watermark_size * video_width wm_height = self.watermark_size * video_height self.watermark.set_property("image-width", wm_width) self.watermark.set_property("image-height", wm_height) def set_videobalance_contrast(self, value): self.videobalance_contrast = value if self.playing(): self.videobalance.set_property("contrast", value) def set_videobalance_brightness(self, value): self.videobalance_brightness = value if self.playing(): self.videobalance.set_property("brightness", value) def set_videobalance_hue(self, value): self.videobalance_hue = value if self.playing(): self.videobalance.set_property("hue", value) def set_videobalance_saturation(self, value): self.videobalance_saturation = value if self.playing(): self.videobalance.set_property("saturation", value) def set_effect_name(self, effect_type, effect_name): if effect_name == "none": effect_name = "identity" self.effect_name[effect_type] = effect_name def stop_output(self, name): if not self.output_bins is None and self.output_bins.has_key(name): self.output_bins[name].stop() return True else: return False def play(self): self.emit("preplay") self.player = gst.Pipeline("player") self.queue_video = gst.element_factory_make("queue", "queue_video") self.player.add(self.queue_video) self.input_type = 0 # Source selection self.source_pads = {} self.audio_pads = {} self.pip_pads = {} self.output_bins = {} type = 0 source_number = 0 pip_number = 0 self.pip = PictureInPicture() self.player.add(self.pip) for row in self.sources.get_store(): (name, source) = row element = source.create() self.player.add(element) if element.does_audio(): if not self.input_type & MEDIA_AUDIO: # The pipeline has audio sources, and this is the first # audio source we add if self.audio_source is None: self.emit("error", "You need to select an audio source") self.emit("stopped") return self.input_type |= MEDIA_AUDIO self.input_selector = gst.element_factory_make( "input-selector", "audio-selector" ) self.player.add(self.input_selector) audiobin = audioinputbin.AudioInputBin(source) self.player.add(audiobin) element.audio_pad.link(audiobin.get_static_pad("sink")) self.audio_pads[name] = \ self.input_selector.get_request_pad("sink%d") audiobin.src_pad.link(self.audio_pads[name]) if element.does_video(): self.input_type |= MEDIA_VIDEO self.source_pads[name] = source_number source_number = source_number + 1 # Thumbnail preview tee = gst.element_factory_make("tee", None) self.player.add(tee) element.video_pad.link(tee.sink_pads().next()) thumbnail_queue = gst.element_factory_make("queue", None) self.player.add(thumbnail_queue) self.thumbnails[name] = Preview(self) self.player.add(self.thumbnails[name]) thumbnail_err = gst.element_link_many( tee, thumbnail_queue, self.thumbnails[name] ) if thumbnail_err == False: self.emit("error", "Error conecting thumbnail preview.") # Picture in Picture self.pip_pads[name] = pip_number pip_number = pip_number + 1 main_queue = gst.element_factory_make("queue", None) self.player.add(main_queue) pip_queue = gst.element_factory_make("queue", None) self.player.add(pip_queue) tee.link(main_queue) tee.link(pip_queue) main_queue.src_pads().next().link(self.pip.get_request_pad_A()) pip_queue.src_pads().next().link(self.pip.get_request_pad_B()) if name == self.video_source: type |= element.get_type() if name == self.audio_source: type |= element.get_type() self.watermark = gst.element_factory_make( "cairoimageoverlay", "cairoimageoverlay" ) self.player.add(self.watermark) self.colorspace = gst.element_factory_make( "ffmpegcolorspace", "colorspace-imageoverlay-videobalance" ) self.player.add(self.colorspace) self.videobalance = gst.element_factory_make( "videobalance", "videobalance" ) self.player.add(self.videobalance) if self.videobalance_contrast: self.videobalance.set_property( "contrast", self.videobalance_contrast ) if self.videobalance_brightness: self.videobalance.set_property( "brightness", self.videobalance_brightness ) if self.videobalance_hue: self.videobalance.set_property( "hue", self.videobalance_hue ) if self.videobalance_saturation: self.videobalance.set_property( "saturation", self.videobalance_saturation ) gst.element_link_many( self.pip, self.watermark, self.colorspace, self.videobalance, self.queue_video ) self._switch_source() self._switch_pip() if self.pip_position: self.pip.set_property("position", self.pip_position) self.effect[MEDIA_VIDEO] = effect.video_effect.VideoEffect( self.effect_name[MEDIA_VIDEO] ) self.player.add(self.effect[MEDIA_VIDEO]) self.overlay = gst.element_factory_make("textoverlay", "overlay") self.overlay.set_property("font-desc", self.overlay_font) self.overlay.set_property("halign", self.halign) self.overlay.set_property("valign", self.valign) self.player.add(self.overlay) gst.element_link_many( self.queue_video, self.effect[MEDIA_VIDEO], self.overlay ) self.preview_tee = multeequeue.MulTeeQueue() self.player.add(self.preview_tee) self.overlay.link(self.preview_tee) if self.input_type & MEDIA_AUDIO: self.convert = gst.element_factory_make("audioconvert", "convert") self.player.add(self.convert) self.effect[MEDIA_AUDIO] = effect.audio_effect.AudioEffect( self.effect_name[MEDIA_AUDIO] ) self.player.add(self.effect[MEDIA_AUDIO]) self.audio_tee = gst.element_factory_make("tee", "audio_tee") self.player.add(self.audio_tee) self.volume = volume.Volume() self.player.add(self.volume) gst.element_link_many( self.input_selector, self.volume, self.effect[MEDIA_AUDIO], self.convert, self.audio_tee ) self.input_selector.set_property( "active-pad", self.audio_pads[self.audio_source] ) added_encoders = {} pip_width = 0 pip_height = 0 for row in self.outputs.get_store(): (name, output) = row output_bin = outputbin.OutputBin(output) self.output_bins[name] = output_bin self.player.add(output_bin) encoder_name = output.get_config()["parent"] encoder_item = self.encoders.get_item(encoder_name) if encoder_item is None: self.emit("error", "Please, add an encoder.") break if added_encoders.has_key(encoder_name): tee = added_encoders[encoder_name] tee.link(output_bin) else: tee = gst.element_factory_make("tee", None) self.player.add(tee) converter_item = encoder_item.parent converter = converter_item.create() if converter_item.config["width"] > pip_width: pip_width = converter_item.config["width"] if converter_item.config["height"] > pip_height: pip_height = converter_item.config["height"] self.player.add(converter) encoder = encoder_item.factory.create(type) if encoder.vorbisenc: self.metadata = metadata.Metadata(encoder.vorbisenc) self.metadata.set_tags(self.taglist) encoder.config(encoder_item.config) self.player.add(encoder) added_encoders[encoder_name] = tee self.preview_tee.get_src_pad().link( converter.sink_pads().next() ) gst.element_link_many( converter, encoder, tee, output_bin ) if self.input_type & MEDIA_AUDIO: audio_queue = gst.element_factory_make("queue", None) self.player.add(audio_queue) gst.element_link_many(self.audio_tee, audio_queue, encoder) self.preview = Preview(self) self.player.add(self.preview) self.preview_tee.get_src_pad().link(self.preview.sink_pads().next()) if pip_width == 0: pip_width = 320 pip_height = 240 self.pip.set_property("width", int(pip_width)) self.pip.set_property("height", int(pip_height)) self.video_width = int(pip_width) self.video_height = int(pip_height) self._set_watermark(self.video_width, self.video_height) self.overlay.set_property("text", self.overlay_text) if self.volume_value is not None: self.volume.set_property("volume", self.volume_value) self.emit("pipeline-ready") bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message) bus.connect("sync-message::element", self.on_sync_message) cr = self.player.set_state(gst.STATE_PLAYING) if cr == gst.STATE_CHANGE_SUCCESS: self.emit("playing") elif cr == gst.STATE_CHANGE_ASYNC: self.pending_state = gst.STATE_PLAYING def stop(self): cr = self.player.set_state(gst.STATE_NULL) if cr == gst.STATE_CHANGE_SUCCESS: self.emit("stopped") elif cr == gst.STATE_CHANGE_ASYNC: self.pending_state = gst.STATE_NULL def playing(self): return self.player and self.player.get_state()[1] == gst.STATE_PLAYING def _swap_effect(self, effect_type): if effect_type == MEDIA_VIDEO: new_effect = effect.video_effect.VideoEffect( self.effect_name[effect_type] ) Swap.swap_element( self.player, self.queue_video, self.overlay, self.effect[effect_type], new_effect ) self.effect[effect_type] = new_effect else: new_effect = effect.audio_effect.AudioEffect( self.effect_name[effect_type] ) Swap.swap_element( self.player, self.volume, self.convert, self.effect[effect_type], new_effect ) self.effect[effect_type] = new_effect def set_effects(self, state): self.effect_enabled = state # If state is disabled and pipeline is playing, disable effects now if not self.effect_enabled: if self.playing(): self.change_effect("identity", MEDIA_VIDEO) self.change_effect("identity", MEDIA_AUDIO) def change_effect(self, effect_name, effect_type): # If that input doesn't exist, then there is no effect to change. if not self.input_type & effect_type: return if self.playing(): self.set_effect_name(effect_type, effect_name) self._swap_effect(effect_type) def _switch_source(self): self.pip.set_property( "a-active", self.source_pads[self.video_source] ) def set_video_source(self, source_name): self.video_source = source_name if self.playing(): self._switch_source() def _switch_pip(self): if self.pip_source and self.pip_pads.has_key(self.pip_source): self.pip.set_property("enabled", True) self.pip.set_property( "b-active", self.pip_pads[self.pip_source] ) else: self.pip.set_property("enabled", False) def set_pip_source(self, source_name): self.pip_source = source_name if self.playing(): self._switch_pip() def set_pip_position(self, selected): self.pip_position = selected if self.playing(): self.pip.set_property("position", selected) def set_audio_source(self, source_name): self.audio_source = source_name if self.playing(): self.input_selector.set_property( "active-pad", self.audio_pads[source_name] ) def set_preview(self, state): self.preview_enabled = state def get_preview(self): return self.preview def set_volume(self, value): self.volume_value = value if self.volume: self.volume.set_property("volume", value) def on_message(self, bus, message): t = message.type if t == gst.MESSAGE_EOS: cr = self.player.set_state(gst.STATE_NULL) if cr == gst.STATE_CHANGE_SUCCESS: self.emit("stopped") elif cr == gst.STATE_CHANGE_ASYNC: self.pending_state = gst.STATE_NULL elif t == gst.MESSAGE_ERROR: (gerror, debug) = message.parse_error() self.emit("error", gerror.message) print debug cr = self.player.set_state(gst.STATE_NULL) if cr == gst.STATE_CHANGE_SUCCESS: self.emit("stopped") elif cr == gst.STATE_CHANGE_ASYNC: self.pending_state = gst.STATE_NULL elif t == gst.MESSAGE_ASYNC_DONE: if self.pending_state == gst.STATE_NULL: self.emit("stopped") elif self.pending_state == gst.STATE_PLAYING: self.emit("playing") self.pending_state = None def on_sync_message(self, bus, message): self.emit("sync-message", bus, message)
#!/usr/bin/python import logging import gc import gobject import vipsobject import vipsimage logging.basicConfig(level=logging.DEBUG) # should be able to find vipsimage, hopefully print gobject.type_from_name('VipsImage') # test unref for i in range(1, 10): a = vipsimage.VipsImage('/home/john/pics/healthygirl.jpg') # should work a = vipsimage.VipsImage('/home/john/pics/healthygirl.jpg') print 'width =', a.width() print 'height =', a.height() print 'bands =', a.bands() print 'format = %d - %s' % (a.format(), vipsimage.VipsBandFormat.name(a.format())) print 'coding = %d - %s' % (a.coding(), vipsimage.VipsCoding.name(a.coding())) print 'interpretation = %d - %s' % ( a.interpretation(), vipsimage.VipsInterpretation.name(a.interpretation())) print 'xres =', a.xres() print 'yres =', a.yres()
menu = new_item.get_menu() menu_items = [] # Insert a separator only if menu already had children if len(menu.get_children()): sep = gtk.SeparatorMenuItem() sep.set_visible(True) menu_items.append(sep) menu.prepend(sep) # Do this reversed because we are prepending for action in reversed(actions): action.set_accel_group(uimanager.get_accel_group()) menu_item = action.create_menu_item() # Toolmenus doesn't use the trailing '...' menu pattern menu_item.set_label(menu_item.get_label().replace('...', '')) menu_items.append(menu_item) menu.prepend(menu_item) return menu_items gobject.type_register(ToolMenuAction) # FIXME: This is at least present in PyGTK 2.22 MenuToolButton = getattr(gtk, 'MenuToolButton', None) if MenuToolButton is None: MenuToolButton = gobject.type_from_name('GtkMenuToolButton').pytype ToolMenuAction.set_tool_item_type(MenuToolButton)
# -*- Mode: Python -*- import unittest import gobject import testhelper GUnknown = gobject.type_from_name("TestUnknown") Unknown = GUnknown.pytype class MyUnknown(Unknown, testhelper.Interface): some_property = gobject.property(type=str) def __init__(self): Unknown.__init__(self) self.called = False def do_iface_method(self): self.called = True Unknown.do_iface_method(self) gobject.type_register(MyUnknown) class MyObject(gobject.GObject, testhelper.Interface): some_property = gobject.property(type=str) def __init__(self): gobject.GObject.__init__(self)
class C(gobject.GObject): __gsignals__ = dict( my_boxed_signal=(gobject.SIGNAL_RUN_LAST, gobject.type_from_name('GStrv'), ()))
# -*- Mode: Python -*- import unittest import gobject import testhelper GUnknown = gobject.type_from_name("TestUnknown") Unknown = GUnknown.pytype class MyUnknown(Unknown, testhelper.Interface): some_property = gobject.property(type=str) def __init__(self): Unknown.__init__(self) self.called = False def do_iface_method(self): self.called = True Unknown.do_iface_method(self) gobject.type_register(MyUnknown) class MyObject(gobject.GObject, testhelper.Interface): some_property = gobject.property(type=str) def __init__(self): gobject.GObject.__init__(self) self.called = False
def _construct_object(self, obj, parent=None): if parent is not None: inst = getattr(parent.props, obj.name, None) else: inst = None if inst: obj_type = inst.__gtype__ else: obj_type = GObject.type_from_name(obj.name) obj_id = None # Properties properties = {} delayed_properties = [] for prop in obj.properties: name = prop.name if name == 'id': obj_id = prop.value continue if name == 'child_type': obj.child_type = prop.value continue if isinstance(prop.value, Object): prop.value = self._construct_object(prop.value) if '.' in name: delayed_properties.append(prop) else: pspec = getattr(obj_type.pytype.props, name) try: properties[name] = self._parse_property(pspec, prop) except DelayedProperty: delayed_properties.append(prop) if inst is None: inst = GObject.new(obj_type, **properties) if obj_id is None: obj_id = str(hash(obj)) self._objects[obj_id] = inst if parent is not None and not obj.is_property: Gtk.Buildable.add_child(parent, self._fake_builder, inst, obj.child_type) else: for name, value in properties.items(): inst.set_property(name, value) # Signals for signal in obj.signals: inst.connect(signal.name, self.signals[signal.handler]) # Children if isinstance(inst, Gtk.Container): for child in obj.children: properties = self._extract_child_properties(child) child_inst = self._construct_object(child, inst) child_pspecs = self._get_child_pspecs(inst) for prop in properties: pspec = child_pspecs[prop.name] value = self._parse_property(pspec, prop) inst.child_set_property(child_inst, prop.name, value) # Delayed_properties for prop in delayed_properties: self._delayed_properties.append((inst, prop)) return inst
def test_columns(self): self.assertEqual(self.store.get_n_columns(), len(self.store._class_type.Meta.get_column_properties())) self.assertEqual(self.store.get_column_type(self.store.c_name), gobject.type_from_name("gchararray")) self.assertEqual(self.store.get_column_type(self.store.c_number), gobject.type_from_name("gdouble")) self.assertEqual(self.store.get_column_type(self.store.c_test), gobject.type_from_name("PyObject"))