def Execute(self): try: width, height = PILBackend.GetImageSize( self.__picture.GetFilename()) except: return ratio = Aspect.ToFloat(self.__aspect) picRatio = width / float(height) if picRatio > ratio: scaledWidth = height * ratio scaledHeight = height else: scaledWidth = width scaledHeight = width / ratio centerRect = (int(round((width - scaledWidth) / 2.0)), int(round((height - scaledHeight) / 2.0)), int(round(scaledWidth)), int(round(scaledHeight))) self.__picture.SetStartRect(centerRect) self.__picture.SetTargetRect(centerRect)
def Execute(self): try: width, height = PILBackend.GetImageSize( self.__picture.GetFilename()) except: return if self.__picture.GetWidth() == -1: # FIXME: stupid if self.__picture.SetWidth(width) self.__picture.SetHeight(height) ratio = Aspect.ToFloat(self.__aspect) if width < height: # portrait startRect = (0, 0, width, width / ratio) targetRect = (0, height - (width / ratio), width, width / ratio) else: scaledWidth = width * 0.75 startRect = (0, 0, width, width / ratio) d = random.randint(0, 3) if d == 0: targetRect = (0, 0, scaledWidth, scaledWidth / ratio) elif d == 1: targetRect = (0, height - (scaledWidth / ratio), scaledWidth, scaledWidth / ratio) elif d == 2: targetRect = (width - scaledWidth, 0, scaledWidth, scaledWidth / ratio) elif d == 3: targetRect = (width - scaledWidth, height - (scaledWidth / ratio), scaledWidth, scaledWidth / ratio) if random.randint(0, 1): targetRect, startRect = startRect, targetRect self.__picture.SetStartRect(startRect) self.__picture.SetTargetRect(targetRect)
def __init__(self, parent, pic, aspect): self._init_ctrls(parent) self.pnlHdr.SetTitle(_(u'Adjust motion positions directly')) self.pnlHdr.SetBitmap(wx.ArtProvider.GetBitmap('PFS_MOTION_MANUAL_32')) self.__pic = pic self.__ratio = Aspect.ToFloat(aspect) self.__doOnChange = True self.__backupStart = self.__pic.GetStartRect() self.__backupEnd = self.__pic.GetTargetRect() font = self.stStartPos.GetFont() font.SetWeight(wx.FONTWEIGHT_BOLD) self.stStartPos.SetFont(font) self.stEndPos.SetFont(font) self._InitValues() self.SetInitialSize(self.GetEffectiveMinSize()) self.CenterOnParent() self.SetFocus()
def SetAspect(self, aspect): self.RATIO = Aspect.ToFloat(aspect) self.__KeepRectInImage() self.Refresh()
def Prepare(self): ''' Build the gstreamer pipeline and all necessary objects and bindings. ''' GObject.threads_init() self.ready = threading.Event() self.ready.set() self.active = True self.finished = False frameRate = self.GetProfile().GetFrameRate() # 1000ms / fps == x msec/frame self.imgDuration = int(round(1000 * Gst.MSECOND / frameRate.AsFloat())) self._Log(logging.DEBUG, "set imgDuration=%s", self.imgDuration) self.pipeline = Gst.Pipeline() if self.GetProfile().IsMPEGProfile(): caps = Gst.caps_from_string( "image/jpeg,framerate={0},pixel-aspect-ratio={1}".format( frameRate.AsStr(), Aspect.AsStr(self._aspect))) else: caps = Gst.caps_from_string("image/jpeg,framerate={0}".format( frameRate.AsStr())) videoSrc = Gst.ElementFactory.make("appsrc") videoSrc.set_property("block", True) videoSrc.set_property("caps", caps) videoSrc.connect("need-data", self._GstNeedData) self.pipeline.add(videoSrc) queueVideo = Gst.ElementFactory.make("queue") self.pipeline.add(queueVideo) jpegDecoder = Gst.ElementFactory.make("jpegdec") self.pipeline.add(jpegDecoder) colorConverter = Gst.ElementFactory.make("videoconvert") self.pipeline.add(colorConverter) videoEnc = self._GetVideoEncoder() self.pipeline.add(videoEnc) muxSubtitle = False subtitleEnc = None if self._GetSubtitleFile(): self.srtParse = SrtParser( self._GetSubtitleFile(), self.GetProfile().GetFrameRate().AsFloat()) if self.GetTypedProperty( "Subtitle", str) == "render" and Gst.ElementFactory.find( "textoverlay"): self.textoverlay = Gst.ElementFactory.make("textoverlay") self.textoverlay.set_property("text", "") self._SetupTextOverlay() self.pipeline.add(self.textoverlay) elif self.GetTypedProperty("Subtitle", str) == "embed": muxSubtitle = True subtitleEnc = self._GetSubtitleEncoder() # pylint: disable=assignment-from-none vcaps = None if self.GetProfile().IsMPEGProfile(): vcaps = Gst.caps_from_string("video/x-raw,format=I420") # link elements for video stream videoSrc.link(jpegDecoder) jpegDecoder.link(colorConverter) if self.textoverlay: colorConverter.link_filtered(self.textoverlay, vcaps) self.textoverlay.link(queueVideo) else: colorConverter.link_filtered(queueVideo, vcaps) queueVideo.link(videoEnc) audioEnc = None if self.GetAudioFiles(): self.concat = Gst.ElementFactory.make("concat") self.pipeline.add(self.concat) srcpad = self.concat.get_static_pad("src") srcpad.add_probe( Gst.PadProbeType. BUFFER, # | Gst.PadProbeType.EVENT_DOWNSTREAM, self._GstProbeBuffer) self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile]) audioConv = Gst.ElementFactory.make("audioconvert") self.pipeline.add(audioConv) audiorate = Gst.ElementFactory.make("audioresample") self.pipeline.add(audiorate) audioQueue = Gst.ElementFactory.make("queue") self.pipeline.add(audioQueue) audioEnc = self._GetAudioEncoder() self.pipeline.add(audioEnc) self.concat.link(audioConv) audioConv.link(audiorate) audiorate.link(audioQueue) audioQueue.link(audioEnc) if self.GetProfile().IsMPEGProfile(): vp = Gst.ElementFactory.make("mpegvideoparse") self.pipeline.add(vp) videoEnc.link(vp) videoEnc = vp if audioEnc: ap = Gst.ElementFactory.make("mpegaudioparse") self.pipeline.add(ap) audioEnc.link(ap) audioEnc = ap elif isinstance(self, Mp4X265AAC): vp = Gst.ElementFactory.make("h265parse") vp.set_property("config-interval", -1) self.pipeline.add(vp) videoEnc.link(vp) videoEnc = vp mux = self._GetMux() self.pipeline.add(mux) videoQueue2 = Gst.ElementFactory.make("queue") self.pipeline.add(videoQueue2) videoEncCaps = self._GetVideoEncoderCaps() # pylint: disable=assignment-from-none if videoEncCaps: videoEnc.link_filtered(videoQueue2, videoEncCaps) else: videoEnc.link(videoQueue2) videoQueue2.link(mux) if audioEnc: audioQueue2 = Gst.ElementFactory.make("queue") self.pipeline.add(audioQueue2) audioEnc.link(audioQueue2) audioQueue2.link(mux) if muxSubtitle: subCaps = self._GetSubtitleEncoderCaps() subPad = None if subCaps: subPad = mux.get_request_pad("subtitle_%u") if subPad: # muxer has subtitle pad, so initialize subtitle processing self.usePangoSubtitle = subCaps.find("pango-markup") != -1 subSrc = Gst.ElementFactory.make("appsrc") subCaps = Gst.caps_from_string(subCaps) subSrc.set_property("caps", subCaps) subSrc.set_property("format", Gst.Format.TIME) subSrc.connect("need-data", self._GstNeedSubtitleData) self.pipeline.add(subSrc) if subtitleEnc: self.pipeline.add(subtitleEnc) subSrc.link(subtitleEnc) srcPad = subtitleEnc.get_static_pad("src") else: srcPad = subSrc.get_static_pad("src") srcPad.link(subPad) else: self._Log( logging.WARNING, "Want to mux subtitle but container does not support it!") sink = Gst.ElementFactory.make("filesink") sink.set_property("location", self.GetOutputFile()) self.pipeline.add(sink) mux.link(sink) bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect("message", self._GstOnMessage) self.pipeline.set_state(Gst.State.PLAYING) GMainLoop.EnsureRunning() self.ready.clear()