コード例 #1
0
ファイル: Red9_Audio.py プロジェクト: GuidoPollini/MuTools
    def combineAudio(self, filepath):
        '''
        Combine audio tracks into a single wav file. This by-passes
        the issues with Maya not playblasting multip audio tracks.
        
        :param filepath: filepath to store the combined audioTrack
        TODO: Deal with offset start and end data + silence
        '''
        status=True
        failed=[]
        if not len(self.audioNodes)>1:
            raise ValueError('We need more than 1 audio node in order to compile')

        for audio in cmds.ls(type='audio'):
            audioNode=AudioNode(audio)
            if audioNode.path==filepath:
                if audioNode.isCompiled:
                    log.info('Deleting currently compiled Audio Track : %s' % audioNode.path)
                    if audioNode in self.audioNodes:
                        self.audioNodes.remove(audioNode)
                    audioNode.delete()
                    break
                else:
                    raise IOError('Combined Audio path is already imported into Maya')
        
        frmrange = self.getOverallRange()
        neg_adjustment=0
        if frmrange[0] < 0:
            neg_adjustment=frmrange[0]
            
        duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000
        log.info('Audio BaseTrack duration = %f' % duration)
        baseTrack = audio_segment.AudioSegment.silent(duration)

        for audio in self.audioNodes:
            if not os.path.exists(audio.path):
                log.warning('Audio file not found!  : "%s" == %s' % (audio.audioNode, audio.path))
                status = False
                failed.append(audio)
                continue
            sound = audio_segment.AudioSegment.from_wav(audio.path)
            if sound.sample_width not in [1, 2, 4]:
                log.warning('24bit Audio is NOT supported in Python audioop lib!  : "%s" == %i' % (audio.audioNode, sound.sample_width))
                status = False
                failed.append(audio)
                continue
            insertFrame = (audio.startFrame + abs(neg_adjustment))
            log.info('inserting sound : %s at %f adjusted to %f' % \
                     (audio.audioNode, audio.startFrame, insertFrame))
            baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000)

        baseTrack.export(filepath, format="wav")
        compiled=AudioNode(filepath=filepath)
        compiled.importAndActivate()
        compiled.stampCompiled(self.mayaNodes)
        compiled.startFrame=neg_adjustment
        
        if not status:
            raise StandardError('combine completed with errors: see script Editor for details')
コード例 #2
0
ファイル: Red9_Audio.py プロジェクト: jeanim/Red9_StudioPack
def timecode_to_milliseconds(timecode, smpte=True, framerate=None):
    '''
    from a properly formatted timecode return it in milliseconds
    r9Audio.timecode_to_milliseconds('09:00:00:00')
    
    :param timecode: '09:00:00:20' as a string
    :param smpte: calculate the milliseconds based on HH:MM:SS:FF (frames as last block)
    :param framerate: only used if smpte=True, the framerate to use in the conversion, 
        default (None) uses the current scenes framerate
    '''
    if not framerate:
        framerate=r9General.getCurrentFPS()
            
    data = timecode.split(':')
    if not len(data) ==4:
        raise IOError('timecode should be in the format "09:00:00:00"')
    if smpte and int(data[3])>framerate:
        raise IOError('timecode is badly formatted, frameblock is greater than given framerate')
    actual = int(data[0]) * 3600000
    actual += int(data[1]) * 60000
    actual += int(data[2]) * 1000
    if smpte:
        actual += (int(data[3]) * 1000) / float(framerate)
    else:
        actual += int(data[3])
    return actual
コード例 #3
0
 def startTime(self):
     '''
     : PRO_PACK : Maya start time of the sound node in milliseconds
     '''
     if self.isLoaded:
         return (self.startFrame / r9General.getCurrentFPS()) * 1000
     return 0
コード例 #4
0
 def startTime(self):
     '''
     this is in milliseconds
     '''
     if self.isLoaded:
         return (self.startFrame / r9General.getCurrentFPS()) * 1000
     return 0
コード例 #5
0
ファイル: Red9_Audio.py プロジェクト: jeanim/Red9_StudioPack
 def getLengthFromWav(self):
     '''
     This uses the wav itself bypassing the Maya handling, why?
     In maya.standalone the audio isn't loaded correctly and always is of length 1!
     '''
     with contextlib.closing(wave.open(self.path,'r')) as f:
         frames=f.getnframes()
         rate=f.getframerate()
         duration=frames/float(rate)
         return (duration) * r9General.getCurrentFPS()
コード例 #6
0
ファイル: Red9_Audio.py プロジェクト: nicolasboselli/test
    def combineAudio(self, filepath):
        '''
        Combine audio tracks into a single wav file. This by-passes
        the issues with Maya not playblasting multip audio tracks.
        @param filepath: filepath to store the combined audioTrack
        TODO: Deal with offset start and end data + silence
        '''
        if not len(self.audioNodes)>1:
            raise ValueError('We need more than 1 audio node in order to compile')

        for audio in cmds.ls(type='audio'):
            audioNode=AudioNode(audio)
            if audioNode.path==filepath:
                if audioNode.isCompiled:
                    log.info('Deleting currently compiled Audio Track : %s' % audioNode.path)
                    if audioNode in self.audioNodes:
                        self.audioNodes.remove(audioNode)
                    audioNode.delete()
                    break
                else:
                    raise IOError('Combined Audio path is already imported into Maya')
            
        frmrange = self.getOverallRange()
        neg_adjustment=0
        if frmrange[0] < 0:
            neg_adjustment=frmrange[0]
            
        duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000
        log.info('Audio BaseTrack duration = %f' % duration)
        baseTrack = audio_segment.AudioSegment.silent(duration)

        for audio in self.audioNodes:
            sound = audio_segment.AudioSegment.from_wav(audio.path)
            insertFrame = (audio.startFrame + abs(neg_adjustment))
            log.info('inserting sound : %s at %f adjusted to %f' % \
                     (audio.audioNode, audio.startFrame, insertFrame))
            baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000)

        baseTrack.export(filepath, format="wav")
        compiled=AudioNode.importAndActivate(filepath)
        compiled.stampCompiled(self.mayaNodes)
        compiled.startFrame=neg_adjustment
コード例 #7
0
ファイル: Red9_Audio.py プロジェクト: jeanim/Red9_StudioPack
def milliseconds_to_frame(milliseconds, framerate=None):
    '''
    convert milliseconds into frames
        
    :param milliseconds: time in milliseconds
    :param framerate: when using smpte this is the framerate used in the FF block
        default (None) uses the current scenes framerate
    '''
    if not framerate:
        framerate=r9General.getCurrentFPS()
    return  (float(milliseconds) / 1000) * framerate
コード例 #8
0
ファイル: Red9_Audio.py プロジェクト: Bumpybox/Tapp
def frame_to_milliseconds(frame, framerate=None):
    '''
    from a given frame return that time in milliseconds 
    relative to the given framerate
    :param frame: current frame in Maya
    :param framerate: only used if smpte=True, the framerate to use in the conversion, 
        default (None) uses the current scenes framerate
    '''
    if not framerate:
        framerate=r9General.getCurrentFPS()
    return (frame / float(framerate)) * 1000
コード例 #9
0
ファイル: Red9_Audio.py プロジェクト: jeanim/Red9_StudioPack
def milliseconds_to_Timecode(milliseconds, smpte=True, framerate=None):
        '''
        convert milliseconds into correctly formatted timecode
        
        :param milliseconds: time in milliseconds
        :param smpte: format the timecode HH:MM:SS:FF where FF is frames
        :param framerate: when using smpte this is the framerate used in the FF block
            default (None) uses the current scenes framerate
        
        .. note::
            * If smpte = False : the format will be HH:MM:SS:MSS = hours, minutes, seconds, milliseconds
            * If smpte = True  : the format will be HH:MM:SS:FF  = hours, minutes, seconds, frames
        '''
        def __zeropad(value):
            if value<10:
                return '0%s' % value
            else:
                return value

        if not framerate:
            framerate=r9General.getCurrentFPS()
            
        if milliseconds > 3600000:
            hours = int(math.floor(milliseconds / 3600000))
            milliseconds -= (hours * 3600000)
        else:
            hours = 0
        if milliseconds > 60000:
            minutes = int(math.floor(milliseconds / 60000))
            milliseconds -= (minutes * 60000)
        else:
            minutes = 0
        if milliseconds > 1000:
            seconds = int(math.floor(milliseconds / 1000))
            milliseconds -= (seconds * 1000)
        else:
            seconds = 0
        frame = int(math.floor(milliseconds))
        if smpte:
            frame = int(math.ceil((float(frame)/1000) * float(framerate)))
            
        return "{0}:{1}:{2}:{3}".format(__zeropad(hours),
                                        __zeropad(minutes),
                                        __zeropad(seconds),
                                        __zeropad(frame))
コード例 #10
0
ファイル: Red9_Audio.py プロジェクト: jeanim/Red9_StudioPack
 def endTime(self):
     '''
     this is in milliseconds
     '''
     return (self.endFrame / r9General.getCurrentFPS()) * 1000
コード例 #11
0
 def endTime(self):
     '''
     : PRO_PACK : Maya end time of the sound node in milliseconds
     '''
     return (self.endFrame / r9General.getCurrentFPS()) * 1000
コード例 #12
0
 def endTime(self):
     '''
     this is in milliseconds
     '''
     return (self.endFrame / r9General.getCurrentFPS()) * 1000
コード例 #13
0
 def startTime(self):
     '''
     this is in milliseconds
     '''
     return (self.startFrame / r9General.getCurrentFPS()) * 1000
コード例 #14
0
    def combineAudio(self, filepath):
        '''
        Combine audio tracks into a single wav file. This by-passes
        the issues with Maya not playblasting multip audio tracks.
        
        :param filepath: filepath to store the combined audioTrack
        TODO: Deal with offset start and end data + silence
        '''
        status = True
        if not len(self.audioNodes) > 1:
            raise ValueError(
                'We need more than 1 audio node in order to compile')

        for audio in cmds.ls(type='audio'):
            audioNode = AudioNode(audio)
            if audioNode.path == filepath:
                if audioNode.isCompiled:
                    log.info('Deleting currently compiled Audio Track : %s' %
                             audioNode.path)
                    if audioNode in self.audioNodes:
                        self.audioNodes.remove(audioNode)
                    audioNode.delete()
                    break
                else:
                    raise IOError(
                        'Combined Audio path is already imported into Maya')

        frmrange = self.getOverallRange()
        neg_adjustment = 0
        if frmrange[0] < 0:
            neg_adjustment = frmrange[0]

        duration = ((frmrange[1] + abs(neg_adjustment)) /
                    r9General.getCurrentFPS()) * 1000
        log.info('Audio BaseTrack duration = %f' % duration)
        baseTrack = audio_segment.AudioSegment.silent(duration)

        for audio in self.audioNodes:
            sound = audio_segment.AudioSegment.from_wav(audio.path)
            if sound.sample_width not in [1, 2, 4]:
                log.warning(
                    '24bit Audio is NOT supported in Python audioop lib!  : "%s" == %i'
                    % (audio.audioNode, sound.sample_width))
                status = False
                continue
            insertFrame = (audio.startFrame + abs(neg_adjustment))
            log.info('inserting sound : %s at %f adjusted to %f' % \
                     (audio.audioNode, audio.startFrame, insertFrame))
            baseTrack = baseTrack.overlay(
                sound,
                position=(insertFrame / r9General.getCurrentFPS()) * 1000)

        baseTrack.export(filepath, format="wav")
        compiled = AudioNode(filepath=filepath)
        compiled.importAndActivate()
        compiled.stampCompiled(self.mayaNodes)
        compiled.startFrame = neg_adjustment

        if not status:
            raise StandardError(
                'combine completed with errors: see script Editor for details')
コード例 #15
0
 def endTime(self):
     '''
     : PRO_PACK : Maya end time of the sound node in milliseconds
     '''
     return (self.endFrame / r9General.getCurrentFPS()) * 1000