def add(self, canvas_or_context): if self.movie is None: # The first frame will be written to a temporary png file, # then opened as a movie file, then saved again as a movie. handle, self.tmpfname = mkstemp('.tiff') canvas_or_context.save(self.tmpfname) try: movie, err = QTMovie.movieWithFile_error_(self.tmpfname) movie.setAttribute_forKey_(NSNumber.numberWithBool_(True), QTMovieEditableAttribute) range = QTMakeTimeRange(QTMakeTime(0, 600), movie.duration()) movie.scaleSegment_newDuration_(range, self._time) if err is not None: raise str(err) movie.writeToFile_withAttributes_(self.fname, {QTMovieFlatten: True}) self.movie, err = QTMovie.movieWithFile_error_(self.fname) self.movie.setAttribute_forKey_(NSNumber.numberWithBool_(True), QTMovieEditableAttribute) if err is not None: raise str(err) self.imageTrack = self.movie.tracks()[0] finally: os.remove(self.tmpfname) else: try: canvas_or_context.save(self.tmpfname) img = NSImage.alloc().initByReferencingFile_(self.tmpfname) self.imageTrack.addImage_forDuration_withAttributes_( img, self._time, {QTAddImageCodecType: 'tiff'}) finally: try: os.remove(self.tmpfname) except OSError: pass self.frame += 1
def add(self, canvas_or_context): if self.movie is None: # The first frame will be written to a temporary png file, # then opened as a movie file, then saved again as a movie. handle, self.tmpfname = mkstemp(".tiff") canvas_or_context.save(self.tmpfname) try: movie, err = QTMovie.movieWithFile_error_(self.tmpfname, None) movie.setAttribute_forKey_(NSNumber.numberWithBool_(True), QTMovieEditableAttribute) range = QTMakeTimeRange(QTMakeTime(0, 600), movie.duration()) movie.scaleSegment_newDuration_(range, self._time) if err is not None: raise str(err) movie.writeToFile_withAttributes_(self.fname, {QTMovieFlatten: True}) self.movie, err = QTMovie.movieWithFile_error_(self.fname, None) self.movie.setAttribute_forKey_(NSNumber.numberWithBool_(True), QTMovieEditableAttribute) if err is not None: raise str(err) self.imageTrack = self.movie.tracks()[0] finally: os.remove(self.tmpfname) else: try: canvas_or_context.save(self.tmpfname) img = NSImage.alloc().initByReferencingFile_(self.tmpfname) self.imageTrack.addImage_forDuration_withAttributes_(img, self._time, {QTAddImageCodecType: "tiff"}) finally: try: os.remove(self.tmpfname) except OSError: pass self.frame += 1
def _value_to_nsobject(value, nstype): '''Convert a string with a type specifier to a native Objective-C NSObject (serializable).''' return { 'string': lambda v: NSString.stringWithUTF8String_(v), 'int': lambda v: NSNumber.numberWithInt_(v), 'float': lambda v: NSNumber.numberWithFloat_(v), 'bool': lambda v: True if v == 'true' else False, 'data': lambda v: NSMutableData.dataWithLength_(len(value)).initWithBase64EncodedString_options_(value) }[nstype](value)
def _valueToNSObject(value, nstype): '''Convert a string with a type specifier to a native Objective-C NSObject (serializable).''' return { 'string': lambda v: NSString.stringWithUTF8String_(v), 'int': lambda v: NSNumber.numberWithInt_(v), 'float': lambda v: NSNumber.numberWithFloat_(v), 'bool': lambda v: True if v == 'true' else False, 'data': lambda v: NSData.initWithBytes_length_(v, len(v)) }[nstype](value)
def build(self): """ Build and configure the NSPredicatEditor using the configured criteria. """ predicateSet = [] # if nesting is supported we first add the compound type predicate if self._isNesting: basePred = NSPredicateEditorRowTemplate.alloc().initWithCompoundTypes_([2, 1]) predicateSet.append(basePred) # now convert each criteria into a predicate template for criteria in self._criteria: lexp = [NSExpression.expressionForConstantValue_(criteria['displayName'])] ops = [NSNumber.numberWithUnsignedInt_(i) for i in criteria['operators']] predicate = NSPredicateEditorRowTemplate.alloc().initWithLeftExpressions_rightExpressionAttributeType_modifier_operators_options_( lexp, criteria['type'], NSAnyPredicateModifier, ops, 0 ) predicateSet.append(predicate) # set the final predicate templates self._editor.setRowTemplates_(predicateSet)
def realignLayer(self, thisLayer, shouldRealign=False, shouldReport=False, shouldVerbose=False): moveForward = NSPoint(1, 0) moveBackward = NSPoint(-1, 0) noModifier = NSNumber.numberWithUnsignedInteger_(0) layerCount = 0 if thisLayer: for thisPath in thisLayer.paths: oldPathCoordinates = [n.position for n in thisPath.nodes] for thisNode in thisPath.nodes: if thisNode.type == GSOFFCURVE: oldPosition = NSPoint(thisNode.position.x, thisNode.position.y) selectedNode = NSMutableArray.arrayWithObject_( thisNode) thisLayer.setSelection_(selectedNode) self.Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveForward, noModifier) self.Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveBackward, noModifier) for i, coordinate in enumerate(oldPathCoordinates): if thisPath.nodes[i].position != coordinate: layerCount += 1 # put handle back if not desired by user: if not shouldRealign: thisPath.nodes[i].position = coordinate thisLayer.setSelection_(()) if shouldVerbose: if layerCount: if shouldRealign: print(u" ⚠️ Realigned %i handle%s." % (layerCount, "" if layerCount == 1 else "s")) else: print(u" ❌ %i handle%s are unaligned." % (layerCount, "" if layerCount == 1 else "s")) else: print(u" ✅ All BCPs OK.") return layerCount
def get_file_urls_from_pasteboard(pasteboard, uti_type_filter=None): """Return the file NSURL objects in the pasteboard with an optional UTI type filter. :param NSPasteboard pasteboard: pasteboard :param uti_type_filter: a list of UTIs in string form :type uti_type_filter: list of Uniform Type Identifier strings :return: NSURL objects satisfying the UTI restriction (if any) :rtype: list of NSURL """ options = NSMutableDictionary.dictionaryWithCapacity_(2) options.setObject_forKey_(NSNumber.numberWithBool_(True), NSPasteboardURLReadingFileURLsOnlyKey) if uti_type_filter: options.setObject_forKey_( uti_type_filter, NSPasteboardURLReadingContentsConformToTypesKey) nsurls = pasteboard.readObjectsForClasses_options_([NSURL], options) return nsurls
def valueForCountry_(self, code): for iso_code in self.countries.keys(): if code == iso_code: return NSNumber.numberWithInt_(100) return NSNumber.numberWithInt_(0)
def _addIncomingSession(self, session, streams, is_update_proposal): view = self.getItemView() self.sessions[session] = view settings = SIPSimpleSettings() stream_type_list = list(set(stream.type for stream in streams)) if len(self.sessions) == 1: if "screen-sharing" in stream_type_list: base_text = NSLocalizedString("Screen Sharing from %s", "Label") elif "video" in stream_type_list: base_text = NSLocalizedString("Video call from %s", "Label") elif "audio" in stream_type_list: base_text = NSLocalizedString("Audio call from %s", "Label") elif stream_type_list == ["file-transfer"]: base_text = NSLocalizedString("File transfer from %s", "Label") elif stream_type_list == ["chat"]: base_text = NSLocalizedString("Chat from %s", "Label") else: base_text = NSLocalizedString("Call from %s", "Label") title = base_text % format_identity_to_string(session.remote_identity, check_contact=True, format='compact') self.panel.setTitle_(title) if settings.sounds.enable_speech_synthesizer: self.speak_text = title self.startSpeechSynthesizerTimer() else: self.panel.setTitle_(NSLocalizedString("Multiple Incoming Calls", "Label")) NotificationCenter().add_observer(self, sender=session) subjectLabel = view.viewWithTag_(1) fromLabel = view.viewWithTag_(2) accountLabel = view.viewWithTag_(3) acceptButton = view.viewWithTag_(5) rejectButton = view.viewWithTag_(7) accepyOnlyButton = view.viewWithTag_(6) busyButton = view.viewWithTag_(8) callerIcon = view.viewWithTag_(99) chatIcon = view.viewWithTag_(31) audioIcon = view.viewWithTag_(32) fileIcon = view.viewWithTag_(33) screenIcon = view.viewWithTag_(34) videoIcon = view.viewWithTag_(35) stream_types = [s.type for s in streams] session_manager = SessionManager() have_audio_call = any(s for s in session_manager.sessions if s is not session and s.streams and 'audio' in (stream.type for stream in s.streams)) if not have_audio_call: self.startSpeechRecognition() typeCount = 0 if 'audio' in stream_types: frame = audioIcon.frame() typeCount+= 1 frame.origin.x = NSMaxX(view.frame()) - 10 - (NSWidth(frame) + 10) * typeCount audioIcon.setFrame_(frame) audioIcon.setHidden_(False) if not is_update_proposal: frame = view.frame() frame.size.height += 20 # give extra space for the counter label view.setFrame_(frame) if session.account.audio.auto_accept: have_audio_call = any(s for s in session_manager.sessions if s is not session and s.streams and 'audio' in (stream.type for stream in s.streams)) if not have_audio_call: self.enableAutoAnswer(view, session, session.account.audio.answer_delay) elif settings.answering_machine.enabled or (is_anonymous(session.remote_identity.uri) and session.account.pstn.anonymous_to_answering_machine): self.enableAnsweringMachine(view, session) if 'chat' in stream_types: frame = chatIcon.frame() typeCount+= 1 frame.origin.x = NSMaxX(view.frame()) - 10 - (NSWidth(frame) + 10) * typeCount chatIcon.setFrame_(frame) chatIcon.setHidden_(False) if 'screen-sharing' in stream_types: frame = screenIcon.frame() typeCount+= 1 frame.origin.x = NSMaxX(view.frame()) - 10 - (NSWidth(frame) + 10) * typeCount screenIcon.setFrame_(frame) screenIcon.setHidden_(False) if 'video' in stream_types: #have_video_call = any(s for s in session_manager.sessions if s is not session and s.streams and 'video' in (stream.type for stream in s.streams)) #if not have_video_call: # NSApp.delegate().contactsWindowController.showLocalVideoWindow() frame = videoIcon.frame() typeCount+= 1 frame.origin.x = NSMaxX(view.frame()) - 10 - (NSWidth(frame) + 10) * typeCount videoIcon.setFrame_(frame) videoIcon.setHidden_(False) is_file_transfer = False if 'file-transfer' in stream_types: is_file_transfer = True frame = fileIcon.frame() typeCount+= 1 frame.origin.x = NSMaxX(view.frame()) - 10 - (NSWidth(frame) + 10) * typeCount fileIcon.setFrame_(frame) fileIcon.setHidden_(False) if settings.file_transfer.auto_accept and NSApp.delegate().contactsWindowController.my_device_is_active: BlinkLogger().log_info(u"Auto answer enabled for file transfers from known contacts") self.enableAutoAnswer(view, session, random.uniform(10, 20)) self.sessionsListView.addSubview_(view) frame = self.sessionsListView.frame() frame.origin.y = self.extraHeight - 14 frame.size.height = self.sessionsListView.minimumHeight() self.sessionsListView.setFrame_(frame) height = frame.size.height + self.extraHeight size = NSMakeSize(NSWidth(self.panel.frame()), height) screenSize = NSScreen.mainScreen().frame().size if size.height > (screenSize.height * 2) / 3: size.height = (screenSize.height * 2) / 3 frame = self.panel.frame() frame.size.height = size.height frame.size.height = NSHeight(self.panel.frameRectForContentRect_(frame)) self.panel.setFrame_display_animate_(frame, True, True) self.sessionsListView.relayout() acceptButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(0)) rejectButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(2)) busyButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(3)) # no Busy or partial accept option for Stream Update Proposals busyButton.setHidden_(is_update_proposal or is_file_transfer) accepyOnlyButton.setHidden_(is_update_proposal) if is_file_transfer: busyButton.setAttributedTitle_("") if is_update_proposal: subject, only_button_title, only_button_object = self.format_subject_for_incoming_reinvite(session, streams) only_button_title = "" else: subject, only_button_title, only_button_object = self.format_subject_for_incoming_invite(session, streams) subjectLabel.setStringValue_(subject) accepyOnlyButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(only_button_object)) frame = subjectLabel.frame() frame.size.width = NSWidth(self.sessionsListView.frame()) - 80 - 40 * typeCount subjectLabel.setFrame_(frame) has_audio_streams = any(s for s in reduce(lambda a,b:a+b, [session.proposed_streams for session in self.sessions.keys()], []) if s.type=="audio") caller_contact = NSApp.delegate().contactsWindowController.getFirstContactMatchingURI(session.remote_identity.uri) if caller_contact: if caller_contact.icon: callerIcon.setImage_(caller_contact.icon) if not is_update_proposal and caller_contact.auto_answer and NSApp.delegate().contactsWindowController.my_device_is_active: if has_audio_streams: if not NSApp.delegate().contactsWindowController.has_audio: BlinkLogger().log_info(u"Auto answer enabled for this contact") video_requested = any(s for s in session.blink_supported_streams if s.type == "video") if video_requested and not settings.video.enable_when_auto_answer: blink_supported_streams = [s for s in session.blink_supported_streams if s.type != "video"] session.blink_supported_streams = blink_supported_streams self.enableAutoAnswer(view, session, session.account.audio.answer_delay) else: video_requested = any(s for s in session.blink_supported_streams if s.type == "video") if video_requested and not settings.video.enable_when_auto_answer: blink_supported_streams = [s for s in session.blink_supported_streams if s.type != "video"] session.blink_supported_streams = blink_supported_streams BlinkLogger().log_info(u"Auto answer enabled for this contact") self.enableAutoAnswer(view, session, session.account.audio.answer_delay) fromLabel.setStringValue_(u"%s" % format_identity_to_string(session.remote_identity, check_contact=True, format='full')) fromLabel.sizeToFit() if has_audio_streams: outdev = settings.audio.output_device indev = settings.audio.input_device if outdev == u"system_default": outdev = SIPManager()._app.engine.default_output_device if indev == u"system_default": indev = SIPManager()._app.engine.default_input_device outdev = outdev.strip() if outdev is not None else 'None' indev = indev.strip() if indev is not None else 'None' if outdev != indev: if indev.startswith('Built-in Mic') and outdev.startswith(u'Built-in Out'): self.deviceLabel.setStringValue_(NSLocalizedString("Using Built-in Microphone and Output", "Label")) else: self.deviceLabel.setStringValue_(NSLocalizedString("Using %s for output ", "Label") % outdev.strip() + NSLocalizedString(" and %s for input", "Label") % indev.strip()) else: self.deviceLabel.setStringValue_(NSLocalizedString("Using audio device", "Label") + " " + outdev.strip()) BlinkLogger().log_info(u"Using input/output audio devices: %s/%s" % (indev.strip(), outdev.strip())) self.deviceLabel.sizeToFit() self.deviceLabel.setHidden_(False) else: self.deviceLabel.setHidden_(True) acceptButton.setTitle_(NSLocalizedString("Accept", "Button title")) accepyOnlyButton.setTitle_(only_button_title or "") if False and sum(a.enabled for a in AccountManager().iter_accounts())==1: accountLabel.setHidden_(True) else: accountLabel.setHidden_(False) if isinstance(session.account, BonjourAccount): accountLabel.setStringValue_(NSLocalizedString("To Bonjour account", "Label")) else: to = format_identity_to_string(session.account) accountLabel.setStringValue_(NSLocalizedString("To %s", "Label") % to) accountLabel.sizeToFit() if len(self.sessions) == 1: self.acceptAllButton.setTitle_(NSLocalizedString("Accept", "Button title")) self.acceptAllButton.setHidden_(False) self.acceptButton.setTitle_(only_button_title or "") self.acceptButton.setHidden_(not only_button_title) self.rejectButton.setTitle_(NSLocalizedString("Reject", "Button title")) self.acceptAllButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(0)) self.rejectButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(2)) self.busyButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(3)) self.acceptButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(only_button_object)) self.answeringMachineButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(4)) self.conferenceButton.cell().setRepresentedObject_(NSNumber.numberWithInt_(5)) self.busyButton.setHidden_(is_update_proposal or is_file_transfer) for i in (5, 6, 7, 8): view.viewWithTag_(i).setHidden_(True) else: self.acceptAllButton.setHidden_(False) self.acceptAllButton.setTitle_(NSLocalizedString("Accept All", "Button title")) self.acceptButton.setHidden_(True) self.busyButton.setHidden_(is_update_proposal or is_file_transfer) self.rejectButton.setTitle_(NSLocalizedString("Reject All", "Button title")) for v in self.sessions.values(): for i in (5, 6, 7, 8): btn = v.viewWithTag_(i) btn.setHidden_(len(btn.attributedTitle()) == 0) if not has_audio_streams or is_update_proposal: self.answeringMachineButton.setHidden_(True) else: self.answeringMachineButton.setHidden_(not settings.answering_machine.show_in_alert_panel) if not self.isConferencing: self.conferenceButton.setHidden_(True) else: self.conferenceButton.setHidden_(False)
def realignLayer(self, thisLayer, shouldRealign=False, shouldReport=False, shouldVerbose=False): moveForward = NSPoint(1, 0) moveBackward = NSPoint(-1, 0) noModifier = NSNumber.numberWithUnsignedInteger_(0) layerCount = 0 if thisLayer: for thisPath in thisLayer.paths: oldPathCoordinates = [n.position for n in thisPath.nodes] for i, thisNode in enumerate(thisPath.nodes): if thisNode.type == GSOFFCURVE: # oldPosition = NSPoint(thisNode.position.x, thisNode.position.y) oncurve = None if thisNode.prevNode.type != GSOFFCURVE: oncurve = thisNode.prevNode opposingPoint = oncurve.prevNode elif thisNode.nextNode.type != GSOFFCURVE: oncurve = thisNode.nextNode opposingPoint = oncurve.nextNode handleStraight = (oncurve.x - thisNode.x) * ( oncurve.y - thisNode.y) == 0.0 if oncurve and oncurve.smooth and not handleStraight: # thisNode = angled handle, straighten it thisPath.setSmooth_withCenterPoint_oppositePoint_( thisNode, oncurve.position, opposingPoint.position, ) elif oncurve and opposingPoint and oncurve.smooth and handleStraight and opposingPoint.type == GSOFFCURVE: # thisNode = straight handle: align opposite handle thisPath.setSmooth_withCenterPoint_oppositePoint_( opposingPoint, oncurve.position, thisNode.position, ) else: selectedNode = NSMutableArray.arrayWithObject_( thisNode) thisLayer.setSelection_(selectedNode) self.Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveForward, noModifier) self.Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveBackward, noModifier) # TODO: # recode with GSPath.setSmooth_withCenterNode_oppositeNode_() for i, coordinate in enumerate(oldPathCoordinates): if thisPath.nodes[i].position != coordinate: layerCount += 1 # put handle back if not desired by user: if not shouldRealign: thisPath.nodes[i].position = coordinate thisLayer.setSelection_(()) if shouldReport and shouldVerbose: if layerCount: if shouldRealign: print(u" ⚠️ Realigned %i handle%s." % (layerCount, "" if layerCount == 1 else "s")) else: print(u" ❌ %i handle%s are unaligned." % (layerCount, "" if layerCount == 1 else "s")) else: print(u" ✅ All BCPs OK.") return layerCount
def __setattr__(self, attr, value): # check to see if the attribute has been # deprecated. if so, warn the caller and # update the attribute and value. if attr in self._deprecatedAttributes: newAttr, newValue = ufoLib.convertFontInfoValueForAttributeFromVersion1ToVersion2( attr, value) note = "The %s attribute has been deprecated. Use the new %s attribute." % ( attr, newAttr) warn(note, DeprecationWarning) attr = newAttr value = newValue _baseAttributes = ["_object", "changed", "selected", "getParent"] _renameAttributes = { "openTypeNameManufacturer": "manufacturer", "openTypeNameManufacturerURL": "manufacturerURL", "openTypeNameDesigner": "designer", "openTypeNameDesignerURL": "designerURL", # "openTypeNameLicense": "license", # "openTypeNameLicenseURL": "licenseURL", "fontName": "postscriptFontName", "vendorURL": "manufacturerURL", "uniqueID": "postscriptUniqueID", "otMacName": "openTypeNameCompatibleFullName" } _masterAttributes = [ "postscriptUnderlinePosition", "postscriptUnderlineThickness", "openTypeOS2StrikeoutSize", "openTypeOS2StrikeoutPosition" ] # setting a known attribute if attr in _masterAttributes: if type(value) == type([]): value = NSMutableArray.arrayWithArray_(value) elif type(value) == type(1): value = NSNumber.numberWithInt_(value) elif type(value) == type(1.2): value = NSNumber.numberWithFloat_(value) if attr in _renameAttributes: attr = _renameAttributes[attr] self._object._font.fontMasterAtIndex_( self._object._masterIndex).setValue_forKey_(value, attr) return if attr not in _baseAttributes: try: if type(value) == type([]): value = NSMutableArray.arrayWithArray_(value) elif type(value) == type(1): value = NSNumber.numberWithInt_(value) elif type(value) == type(1.2): value = NSNumber.numberWithFloat_(value) if attr in _renameAttributes: attr = _renameAttributes[attr] self._object._font.setValue_forKey_(value, attr) except: raise AttributeError("Unknown attribute %s." % attr) return elif attr in self.__dict__ or attr in self._baseAttributes: super(BaseInfo, self).__setattr__(attr, value) else: raise AttributeError("Unknown attribute %s." % attr)
def __setattr__(self, attr, value): # check to see if the attribute has been # deprecated. if so, warn the caller and # update the attribute and value. if attr in self._deprecatedAttributes: newAttr, newValue = ufoLib.convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value) note = "The %s attribute has been deprecated. Use the new %s attribute." % (attr, newAttr) warn(note, DeprecationWarning) attr = newAttr value = newValue _baseAttributes = ["_object", "changed", "selected", "getParent"] _renameAttributes = {"openTypeNameManufacturer": "manufacturer", "openTypeNameManufacturerURL": "manufacturerURL", "openTypeNameDesigner": "designer", "openTypeNameDesignerURL": "designerURL", # "openTypeNameLicense": "license", # "openTypeNameLicenseURL": "licenseURL", "fontName": "postscriptFontName", "vendorURL": "manufacturerURL", "uniqueID": "postscriptUniqueID", "otMacName": "openTypeNameCompatibleFullName" }; _masterAttributes = ["postscriptUnderlinePosition", "postscriptUnderlineThickness", "openTypeOS2StrikeoutSize", "openTypeOS2StrikeoutPosition"] # setting a known attribute if attr in _masterAttributes: if type(value) == type([]): value = NSMutableArray.arrayWithArray_(value) elif type(value) == type(1): value = NSNumber.numberWithInt_(value) elif type(value) == type(1.2): value = NSNumber.numberWithFloat_(value) if attr in _renameAttributes: attr = _renameAttributes[attr] self._object._font.fontMasterAtIndex_(self._object._masterIndex).setValue_forKey_(value, attr) return if attr not in _baseAttributes: try: if type(value) == type([]): value = NSMutableArray.arrayWithArray_(value) elif type(value) == type(1): value = NSNumber.numberWithInt_(value) elif type(value) == type(1.2): value = NSNumber.numberWithFloat_(value) if attr in _renameAttributes: attr = _renameAttributes[attr] self._object._font.setValue_forKey_(value, attr) except: raise AttributeError("Unknown attribute %s." % attr) return elif attr in self.__dict__ or attr in self._baseAttributes: super(BaseInfo, self).__setattr__(attr, value) else: raise AttributeError("Unknown attribute %s." % attr)
#!/usr/bin/python import glob import sys import QTKit from Foundation import NSNumber from AppKit import NSImage time = QTKit.QTMakeTime(20, 600) # QT close to 29.98 fps attrs = { QTKit.QTAddImageCodecType: "png", QTKit.QTAddImageCodecQuality: NSNumber.numberWithLong_(QTKit.codecHighQuality) } def create_movie(name): print "creating ", name movie, err = QTKit.QTMovie.alloc().initToWritableFile_error_(name, None) if movie is None: errmsg = "Could not create movie file: %s" % (name) raise IOError, errmsg return movie def add_frame(movie, imagefile): print "adding frame ", imagefile img = NSImage.alloc().initWithContentsOfFile_(imagefile) movie.addImage_forDuration_withAttributes_(img, time, attrs) movie = create_movie('/tmp/movie.mov') for a in sys.argv[1:]: for f in glob.iglob(a): add_frame(movie, f)
def show(self): BlinkLogger().log_debug('Show %s' % self) self.active = True if self.captureSession is None: # Find a video camera device = self.getDevice() if not device: return self.captureSession = AVCaptureSession.alloc().init() if self.captureSession.canSetSessionPreset_( AVCaptureSessionPresetHigh): self.captureSession.setSessionPreset_( AVCaptureSessionPresetHigh) NSWorkspace.sharedWorkspace().notificationCenter( ).addObserver_selector_name_object_( self, "computerDidWake:", NSWorkspaceDidWakeNotification, None) NSWorkspace.sharedWorkspace().notificationCenter( ).addObserver_selector_name_object_( self, "computerWillSleep:", NSWorkspaceWillSleepNotification, None) max_resolution = (0, 0) BlinkLogger().log_debug( "%s camera provides %d formats" % (device.localizedName(), len(device.formats()))) for desc in device.formats(): m = self.resolution_re.match(repr(desc)) if m: data = m.groupdict() width = int(data['width']) height = int(data['height']) BlinkLogger().log_debug( "Supported resolution: %dx%d %.2f" % (width, height, width / float(height))) if width > max_resolution[0]: max_resolution = (width, height) width, height = max_resolution if width == 0 or height == 0: width = 1280 height = 720 BlinkLogger().log_info( "Error: %s camera does not provide any supported video format" % device.localizedName()) else: if NSApp.delegate( ).contactsWindowController.sessionControllersManager.isMediaTypeSupported( 'video'): BlinkLogger().log_info( "Opened %s camera at %0.fx%0.f resolution" % (SIPApplication.video_device.real_name, width, height)) self.aspect_ratio = width / float( height) if width > height else height / float(width) self.captureDeviceInput = AVCaptureDeviceInput.alloc( ).initWithDevice_error_(device, None) if self.captureDeviceInput: try: self.captureSession.addInput_(self.captureDeviceInput[0]) except ValueError as e: BlinkLogger().log_info( 'Failed to add camera input to capture session: %s' % str(e)) return else: BlinkLogger().log_info('Failed to aquire input %s' % self) return self.setWantsLayer_(True) self.videoPreviewLayer = AVCaptureVideoPreviewLayer.alloc( ).initWithSession_(self.captureSession) self.layer().addSublayer_(self.videoPreviewLayer) self.videoPreviewLayer.setFrame_(self.layer().bounds()) self.videoPreviewLayer.setAutoresizingMask_( kCALayerWidthSizable | kCALayerHeightSizable) self.videoPreviewLayer.setBackgroundColor_( CGColorGetConstantColor(kCGColorBlack)) self.videoPreviewLayer.setVideoGravity_( AVLayerVideoGravityResizeAspectFill) self.videoPreviewLayer.setCornerRadius_(5.0) self.videoPreviewLayer.setMasksToBounds_(True) self.setMirroring() self.stillImageOutput = AVCaptureStillImageOutput.new() pixelFormat = NSNumber.numberWithInt_(kCVPixelFormatType_32BGRA) self.stillImageOutput.setOutputSettings_( NSDictionary.dictionaryWithObject_forKey_( pixelFormat, kCVPixelBufferPixelFormatTypeKey)) self.captureSession.addOutput_(self.stillImageOutput) if self.captureSession and self.videoPreviewLayer: BlinkLogger().log_info('Start aquire local video %s' % self) self.videoPreviewLayer.setBackgroundColor_( NSColor.colorWithCalibratedRed_green_blue_alpha_(0, 0, 0, 0.4)) self.captureSession.startRunning()
def valueForCountry_(self, code): for iso_code in list(self.countries.keys()): if code == iso_code: return NSNumber.numberWithInt_(100) return NSNumber.numberWithInt_(0)
#MenuTitle: Realign BCPs # -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__ = """ Realigns handles (BCPs) in current layers of selected glyphs. Useful for resetting out-of-sync handles, e.g., after a transform operation, after interpolation or after switching to a different grid. Hold down Option to process ALL layers of the glyph. """ from Foundation import NSPoint, NSEvent, NSNumber, NSMutableArray optionKeyFlag = 524288 optionKeyPressed = NSEvent.modifierFlags() & optionKeyFlag == optionKeyFlag thisFont = Glyphs.font moveForward = NSPoint(1, 1) moveBackward = NSPoint(-1, -1) noModifier = NSNumber.numberWithUnsignedInteger_(0) Tool = GlyphsPathPlugin.alloc().init() def realignLayer(thisLayer): countOfHandlesOnLayer = 0 for thisPath in thisLayer.paths: for thisNode in thisPath.nodes: if thisNode.type == GSOFFCURVE: countOfHandlesOnLayer += 1 selectedNode = NSMutableArray.arrayWithObject_(thisNode) thisLayer.setSelection_(selectedNode) Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveForward, noModifier) Tool.moveSelectionLayer_shadowLayer_withPoint_withModifier_( thisLayer, thisLayer, moveBackward, noModifier)
#!/usr/bin/python import glob import sys import QTKit from Foundation import NSNumber from AppKit import NSImage time = QTKit.QTMakeTime(20, 600) # QT close to 29.98 fps attrs = { QTKit.QTAddImageCodecType: "png", QTKit.QTAddImageCodecQuality: NSNumber.numberWithLong_(QTKit.codecHighQuality) } def create_movie(name): print "creating ", name movie, err = QTKit.QTMovie.alloc().initToWritableFile_error_(name, None) if movie is None: errmsg = "Could not create movie file: %s" % (name) raise IOError, errmsg return movie def add_frame(movie, imagefile): print "adding frame ", imagefile img = NSImage.alloc().initWithContentsOfFile_(imagefile) movie.addImage_forDuration_withAttributes_(img, time, attrs) movie = create_movie('/tmp/movie.mov')