def combineAudio(self, filepath): ''' Combine audio tracks into a single wav file. This by-passes the issues with Maya not playblasting multip audio tracks. :param filepath: filepath to store the combined audioTrack TODO: Deal with offset start and end data + silence ''' status = True if not len(self.audioNodes) > 1: raise ValueError( 'We need more than 1 audio node in order to compile') for audio in cmds.ls(type='audio'): audioNode = AudioNode(audio) if audioNode.path == filepath: if audioNode.isCompiled: log.info('Deleting currently compiled Audio Track : %s' % audioNode.path) if audioNode in self.audioNodes: self.audioNodes.remove(audioNode) audioNode.delete() break else: raise IOError( 'Combined Audio path is already imported into Maya') frmrange = self.getOverallRange() neg_adjustment = 0 if frmrange[0] < 0: neg_adjustment = frmrange[0] duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000 log.info('Audio BaseTrack duration = %f' % duration) baseTrack = audio_segment.AudioSegment.silent(duration) for audio in self.audioNodes: sound = audio_segment.AudioSegment.from_wav(audio.path) if sound.sample_width not in [1, 2, 4]: log.warning( '24bit Audio is NOT supported in Python audioop lib! : "%s" == %i' % (audio.audioNode, sound.sample_width)) status = False continue insertFrame = (audio.startFrame + abs(neg_adjustment)) log.info('inserting sound : %s at %f adjusted to %f' % \ (audio.audioNode, audio.startFrame, insertFrame)) baseTrack = baseTrack.overlay( sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000) baseTrack.export(filepath, format="wav") compiled = AudioNode.importAndActivate(filepath) compiled.stampCompiled(self.mayaNodes) compiled.startFrame = neg_adjustment if not status: raise StandardError( 'combine completed with errors: see script Editor for details')
def poseSave(self, nodes, filepath=None, useFilter=True, storeThumbnail=True): ''' Entry point for the generic PoseSave. :param nodes: nodes to store the data against OR the rootNode if the filter is active. :param filepath: posefile to save - if not given the pose is cached on this class instance. :param useFilter: use the filterSettings or not. ''' #push args to object - means that any poseHandler.py file has access to them self.filepath=filepath self.useFilter=useFilter if self.filepath: log.debug('PosePath given : %s' % filepath) self.buildInternalPoseData(nodes) if self.filepath: self._writePose(filepath) if storeThumbnail: sel=cmds.ls(sl=True,l=True) cmds.select(cl=True) r9General.thumbNailScreen(filepath,self.thumbnailRes[0],self.thumbnailRes[1]) if sel: cmds.select(sel) log.info('Pose Saved Successfully to : %s' % filepath)
def poseSave(self, nodes, filepath=None, useFilter=True, storeThumbnail=True): ''' Entry point for the generic PoseSave. :param nodes: nodes to store the data against OR the rootNode if the filter is active. :param filepath: posefile to save - if not given the pose is cached on this class instance. :param useFilter: use the filterSettings or not. ''' #push args to object - means that any poseHandler.py file has access to them self.filepath = filepath self.useFilter = useFilter if self.filepath: log.debug('PosePath given : %s' % filepath) self.buildInternalPoseData(nodes) if self.filepath: self._writePose(filepath) if storeThumbnail: sel = cmds.ls(sl=True, l=True) cmds.select(cl=True) r9General.thumbNailScreen(filepath, self.thumbnailRes[0], self.thumbnailRes[1]) if sel: cmds.select(sel) log.info('Pose Saved Successfully to : %s' % filepath)
def combineAudio(self, filepath): ''' Combine audio tracks into a single wav file. This by-passes the issues with Maya not playblasting multip audio tracks. :param filepath: filepath to store the combined audioTrack TODO: Deal with offset start and end data + silence ''' status=True failed=[] if not len(self.audioNodes)>1: raise ValueError('We need more than 1 audio node in order to compile') for audio in cmds.ls(type='audio'): audioNode=AudioNode(audio) if audioNode.path==filepath: if audioNode.isCompiled: log.info('Deleting currently compiled Audio Track : %s' % audioNode.path) if audioNode in self.audioNodes: self.audioNodes.remove(audioNode) audioNode.delete() break else: raise IOError('Combined Audio path is already imported into Maya') frmrange = self.getOverallRange() neg_adjustment=0 if frmrange[0] < 0: neg_adjustment=frmrange[0] duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000 log.info('Audio BaseTrack duration = %f' % duration) baseTrack = audio_segment.AudioSegment.silent(duration) for audio in self.audioNodes: if not os.path.exists(audio.path): log.warning('Audio file not found! : "%s" == %s' % (audio.audioNode, audio.path)) status = False failed.append(audio) continue sound = audio_segment.AudioSegment.from_wav(audio.path) if sound.sample_width not in [1, 2, 4]: log.warning('24bit Audio is NOT supported in Python audioop lib! : "%s" == %i' % (audio.audioNode, sound.sample_width)) status = False failed.append(audio) continue insertFrame = (audio.startFrame + abs(neg_adjustment)) log.info('inserting sound : %s at %f adjusted to %f' % \ (audio.audioNode, audio.startFrame, insertFrame)) baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000) baseTrack.export(filepath, format="wav") compiled=AudioNode(filepath=filepath) compiled.importAndActivate() compiled.stampCompiled(self.mayaNodes) compiled.startFrame=neg_adjustment if not status: raise StandardError('combine completed with errors: see script Editor for details')
def audioPathLoaded(filepath): ''' return any soundNodes in Maya that point to the given audio path ''' nodes = [] if not os.path.exists(filepath): return nodes for audio in cmds.ls(type='audio'): if r9General.formatPath(cmds.getAttr('%s.filename' % audio)) == r9General.formatPath(filepath): nodes.append(audio) return nodes
def audioPathLoaded(filepath): ''' return any soundNodes in Maya that point to the given audio path ''' nodes=[] if not os.path.exists(filepath): return nodes for audio in cmds.ls(type='audio'): if r9General.formatPath(cmds.getAttr('%s.filename' % audio)) == r9General.formatPath(filepath): nodes.append(audio) return nodes
def timecode_to_milliseconds(timecode, smpte=True, framerate=None): ''' from a properly formatted timecode return it in milliseconds r9Audio.timecode_to_milliseconds('09:00:00:00') :param timecode: '09:00:00:20' as a string :param smpte: calculate the milliseconds based on HH:MM:SS:FF (frames as last block) :param framerate: only used if smpte=True, the framerate to use in the conversion, default (None) uses the current scenes framerate ''' if not framerate: framerate=r9General.getCurrentFPS() data = timecode.split(':') if not len(data) ==4: raise IOError('timecode should be in the format "09:00:00:00"') if smpte and int(data[3])>framerate: raise IOError('timecode is badly formatted, frameblock is greater than given framerate') actual = int(data[0]) * 3600000 actual += int(data[1]) * 60000 actual += int(data[2]) * 1000 if smpte: actual += (int(data[3]) * 1000) / float(framerate) else: actual += int(data[3]) return actual
def startTime(self): ''' : PRO_PACK : Maya start time of the sound node in milliseconds ''' if self.isLoaded: return (self.startFrame / r9General.getCurrentFPS()) * 1000 return 0
def startTime(self): ''' this is in milliseconds ''' if self.isLoaded: return (self.startFrame / r9General.getCurrentFPS()) * 1000 return 0
def getLengthFromWav(self): ''' This uses the wav itself bypassing the Maya handling, why? In maya.standalone the audio isn't loaded correctly and always is of length 1! ''' with contextlib.closing(wave.open(self.path,'r')) as f: frames=f.getnframes() rate=f.getframerate() duration=frames/float(rate) return (duration) * r9General.getCurrentFPS()
def combineAudio(self, filepath): ''' Combine audio tracks into a single wav file. This by-passes the issues with Maya not playblasting multip audio tracks. @param filepath: filepath to store the combined audioTrack TODO: Deal with offset start and end data + silence ''' if not len(self.audioNodes)>1: raise ValueError('We need more than 1 audio node in order to compile') for audio in cmds.ls(type='audio'): audioNode=AudioNode(audio) if audioNode.path==filepath: if audioNode.isCompiled: log.info('Deleting currently compiled Audio Track : %s' % audioNode.path) if audioNode in self.audioNodes: self.audioNodes.remove(audioNode) audioNode.delete() break else: raise IOError('Combined Audio path is already imported into Maya') frmrange = self.getOverallRange() neg_adjustment=0 if frmrange[0] < 0: neg_adjustment=frmrange[0] duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000 log.info('Audio BaseTrack duration = %f' % duration) baseTrack = audio_segment.AudioSegment.silent(duration) for audio in self.audioNodes: sound = audio_segment.AudioSegment.from_wav(audio.path) insertFrame = (audio.startFrame + abs(neg_adjustment)) log.info('inserting sound : %s at %f adjusted to %f' % \ (audio.audioNode, audio.startFrame, insertFrame)) baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000) baseTrack.export(filepath, format="wav") compiled=AudioNode.importAndActivate(filepath) compiled.stampCompiled(self.mayaNodes) compiled.startFrame=neg_adjustment
def milliseconds_to_frame(milliseconds, framerate=None): ''' convert milliseconds into frames :param milliseconds: time in milliseconds :param framerate: when using smpte this is the framerate used in the FF block default (None) uses the current scenes framerate ''' if not framerate: framerate=r9General.getCurrentFPS() return (float(milliseconds) / 1000) * framerate
def frame_to_milliseconds(frame, framerate=None): ''' from a given frame return that time in milliseconds relative to the given framerate :param frame: current frame in Maya :param framerate: only used if smpte=True, the framerate to use in the conversion, default (None) uses the current scenes framerate ''' if not framerate: framerate=r9General.getCurrentFPS() return (frame / float(framerate)) * 1000
def milliseconds_to_Timecode(milliseconds, smpte=True, framerate=None): ''' convert milliseconds into correctly formatted timecode :param milliseconds: time in milliseconds :param smpte: format the timecode HH:MM:SS:FF where FF is frames :param framerate: when using smpte this is the framerate used in the FF block default (None) uses the current scenes framerate .. note:: * If smpte = False : the format will be HH:MM:SS:MSS = hours, minutes, seconds, milliseconds * If smpte = True : the format will be HH:MM:SS:FF = hours, minutes, seconds, frames ''' def __zeropad(value): if value<10: return '0%s' % value else: return value if not framerate: framerate=r9General.getCurrentFPS() if milliseconds > 3600000: hours = int(math.floor(milliseconds / 3600000)) milliseconds -= (hours * 3600000) else: hours = 0 if milliseconds > 60000: minutes = int(math.floor(milliseconds / 60000)) milliseconds -= (minutes * 60000) else: minutes = 0 if milliseconds > 1000: seconds = int(math.floor(milliseconds / 1000)) milliseconds -= (seconds * 1000) else: seconds = 0 frame = int(math.floor(milliseconds)) if smpte: frame = int(math.ceil((float(frame)/1000) * float(framerate))) return "{0}:{1}:{2}:{3}".format(__zeropad(hours), __zeropad(minutes), __zeropad(seconds), __zeropad(frame))
def endTime(self): ''' this is in milliseconds ''' return (self.endFrame / r9General.getCurrentFPS()) * 1000
def openAudioPath(self): path=self.path if path and os.path.exists(path): r9General.os_OpenFileDirectory(path)
def endTime(self): ''' : PRO_PACK : Maya end time of the sound node in milliseconds ''' return (self.endFrame / r9General.getCurrentFPS()) * 1000