コード例 #1
0
    def tagVideoFile (cls, videoFilePath, albumName, artistName,
                      albumArtFilePath, title, mediaType, year):
        """Adds some quicktime/MP4 tags to video file with
           <videoFilePath>"""

        Logging.trace(">>: %r", videoFilePath)

        ValidityChecker.isReadableFile(videoFilePath, "source video file")

        st = "== tagging %r" % videoFilePath
        OperatingSystem.showMessageOnConsole(st)

        tagToValueMap = {}
        tagToValueMap["album"]           = albumName
        tagToValueMap["albumArtist"]     = artistName
        tagToValueMap["artist"]          = artistName
        tagToValueMap["cover"]           = albumArtFilePath
        tagToValueMap["itunesMediaType"] = mediaType
        tagToValueMap["title"]           = title
        tagToValueMap["tvShowName"]      = albumName
        tagToValueMap["year"]            = year

        MP4TagManager.tagFile(videoFilePath, tagToValueMap)

        Logging.trace("<<")
コード例 #2
0
    def _makePdf(cls, processingPhase, targetFileNamePrefix, voiceNameList):
        """Processes lilypond file and generates extract or score PDF
           file."""

        Logging.trace(">>: targetFilePrefix = %r, voiceNameList=%r",
                      targetFileNamePrefix, voiceNameList)

        configData = cls._configData
        tempLilypondFilePath = configData.tempLilypondFilePath
        lilypondFile = LilypondFile(tempLilypondFilePath)
        lilypondFile.generate(
            configData.includeFilePath, configData.lilypondVersion,
            processingPhase, voiceNameList, configData.title,
            configData.songComposerText, configData.voiceNameToChordsMap,
            configData.voiceNameToLyricsMap,
            configData.voiceNameToScoreNameMap, configData.measureToTempoMap,
            configData.phaseAndVoiceNameToClefMap,
            configData.phaseAndVoiceNameToStaffListMap)
        cls._processLilypond(tempLilypondFilePath, targetFileNamePrefix)
        OperatingSystem.moveFile(targetFileNamePrefix + ".pdf",
                                 configData.targetDirectoryPath)
        OperatingSystem.removeFile(tempLilypondFilePath,
                                   configData.intermediateFilesAreKept)

        Logging.trace("<<")
コード例 #3
0
    def _compressAudio (self, audioFilePath, songTitle, targetFilePath):
        """Compresses audio file with <songTitle> in path with
          <audioFilePath> to AAC file at <targetFilePath>"""

        Logging.trace(">>: audioFile = %r, title = %r,"
                      + " targetFile = %r",
                      audioFilePath, songTitle, targetFilePath)

        cls = self.__class__

        OperatingSystem.showMessageOnConsole("== convert to AAC: "
                                             + songTitle)

        commandLine = iif(cls._aacCommandLine != "", cls._aacCommandLine,
                          ("%s -loglevel %s -aac_tns 0"
                           + " -i ${infile} -y ${outfile}")
                           % (cls._ffmpegCommand, _ffmpegLogLevel))
        variableMap = { "infile"  : audioFilePath,
                        "outfile" : targetFilePath }
        command = cls._replaceVariablesByValues(tokenize(commandLine),
                                                variableMap)

        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
コード例 #4
0
    def generateRawAudio (self, midiFilePath, voiceName, shiftOffset):
        """Generates audio wave file for <voiceName> from midi file
           with <midiFilePath> in target directory; if several midi
           tracks match voice name, the resulting audio files are
           mixed; output is dry (no chorus, reverb and delay) and
           contains leading and trailing silent passages; if
           <shiftOffset> is greater that zero, the target file is
           shifted by that amount"""

        Logging.trace(">>: voice = %s, midiFile = %r, shiftOffset = %7.3f",
                      voiceName, midiFilePath, shiftOffset)

        cls = self.__class__
        tempMidiFilePath = "tempRender.mid"
        isShifted = (shiftOffset > 0)
        defaultTemplate = "%s/%s.wav"
        filePathTemplate = iif(isShifted, "%s/%s-raw.wav", defaultTemplate)
        audioFilePath = filePathTemplate % (self._audioDirectoryPath,
                                            voiceName)

        self._makeFilteredMidiFile(voiceName, midiFilePath, tempMidiFilePath)
        self._convertMidiToAudio(tempMidiFilePath, audioFilePath)

        if isShifted:
            targetFilePath = defaultTemplate % (self._audioDirectoryPath,
                                                voiceName)
            cls._shiftAudioFile(audioFilePath, targetFilePath, shiftOffset)
            OperatingSystem.removeFile(audioFilePath,
                                       cls._intermediateFilesAreKept)

        OperatingSystem.removeFile(tempMidiFilePath,
                                   cls._intermediateFilesAreKept)

        Logging.trace("<<")
    def make (cls, pageDurationList):
        """Generate an MP4 video from durations in <pageDurationList>
           and generated PNG images."""

        Logging.trace(">>: %r", pageDurationList)

        # for each page an MP4 fragment file is generated and finally
        # concatenated into the target file
        concatSpecificationFile = \
                UTF8File(cls._concatSpecificationFileName, 'wt')

        for (i, pageDuration) in enumerate(pageDurationList):
            page = i + 1

            requiredNumberOfFrames = int(cls._frameRate * pageDuration) + 1
            pageFileName = cls._pageFileNameTemplate % page
            intermediateFileName = cls._intermediateFileNameTemplate % page

            # write file name to concatenation file
            normalizedFileName = intermediateFileName.replace("\\", "/")
            st = "file '%s'\n" % normalizedFileName
            concatSpecificationFile.write(st)

            # make silent video from single lilypond page
            command = ((cls._ffmpegCommand,
                       "-loglevel", cls._generatorLogLevel,
                       "-framerate", "1/" + str(requiredNumberOfFrames),
                       "-i", str(pageFileName),
                       "-vf", "scale=iw/%d:ih/%d" % (cls._scaleFactor,
                                                     cls._scaleFactor),
                       "-r", str(cls._frameRate),
                       "-t", "%02.2f" % pageDuration)
                       + iif(cls._ffmpegPresetName != "",
                             ("-fpre", cls._ffmpegPresetName),
                             ("-pix_fmt", "yuv420p",
                              "-profile:v", "baseline",
                              "-level", cls._defaultMp4BaselineLevel))
                       + ("-y", intermediateFileName))

            OperatingSystem.executeCommand(command, True)

        concatSpecificationFile.close()

        # concatenate silent video fragments into single file
        cls._pageCount = page
        command = (cls._ffmpegCommand,
                   "-safe", "0",
                   "-y",
                   "-loglevel", cls._generatorLogLevel,
                   "-f", "concat",
                   "-i", cls._concatSpecificationFileName,
                   "-codec", "copy",
                   cls.fileName)
        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
    def cleanUpConditionally (cls, filesAreKept):
        """Cleans up subtitle file if <filesAreKept> is unset,
           otherwise moves it to directory given by <targetPath>"""

        Logging.trace(">>: %r", filesAreKept)

        if cls.fileName == cls._tempFileName:
            OperatingSystem.removeFile(cls.fileName, filesAreKept)

        Logging.trace("<<")
    def cleanup (self):
        """Deletes all intermediate files."""

        Logging.trace(">>")

        filesAreKept = self._intermediateFilesAreKept
        OperatingSystem.removeFile(self._postscriptFileName, filesAreKept)
        MP4Video.cleanUpConditionally(filesAreKept)
        SubtitleFile.cleanUpConditionally(filesAreKept)

        Logging.trace("<<")
コード例 #8
0
    def _processLilypond(cls, lilypondFilePath, targetFileNamePrefix):
        """Processes <lilypondFilePath> and stores result in file with
           <targetFileNamePrefix>."""

        Logging.trace(">>: lilyFile = %r, targetFileNamePrefix=%r",
                      lilypondFilePath, targetFileNamePrefix)

        OperatingSystem.showMessageOnConsole("== processing " +
                                             targetFileNamePrefix)
        command = (cls._lilypondCommand, "--output", targetFileNamePrefix,
                   lilypondFilePath)
        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
    def _processLilypondFile (self):
        """Generates postscript file and picture files from lilypond
           file."""

        Logging.trace(">>: %r", self._lilypondFileName)

        command = (self._lilypondCommand,
                   "-l", "WARNING",
                   "-dno-point-and-click",
                   "--ps",
                   "--png",
                   "--output=" + self._pictureFileStem,
                   self._lilypondFileName)
        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
コード例 #10
0
    def copyOverrideFile (self, filePath, voiceName, shiftOffset):
        """Sets refined file from <filePath> for voice with
           <voiceName> and applies <shiftOffset>"""

        Logging.trace(">>: file = %r, voice = %r, offset = %7.3f",
                      filePath, voiceName, shiftOffset)

        cls = self.__class__
        message = "== overriding %r from file" % voiceName
        OperatingSystem.showMessageOnConsole(message)

        targetFilePath = (_processedAudioFileTemplate
                          % (self._audioDirectoryPath, voiceName))
        cls._shiftAudioFile(filePath, targetFilePath, shiftOffset)

        Logging.trace("<<")
コード例 #11
0
    def insertHardSubtitles (cls, sourceVideoFilePath, subtitleFilePath,
                             targetVideoFilePath, shiftOffset,
                             subtitleColor, subtitleFontSize,
                             ffmpegPresetName):
        """Inserts hard subtitles specified by an SRT file with
           <subtitleFilePath> into video given by
           <sourceVideoFilePath> resulting in video with
           <targetVideoFilePath>; <shiftOffset> tells the amount of
           empty time to be inserted before the video, <ffmpegPresetName>
           tells the ffmpeg preset used for the newly generated video,
           <subtitleColor> the RGB color of the subtitle,
           <subtitleFontSize> the size in pixels"""

        Logging.trace(">>: sourceVideo = %r, subtitleFile = %r,"
                      + " targetVideo = %r, subtitleFontSize = %d,"
                      + " subtitleColor = %d, ffmpegPreset = %r",
                      sourceVideoFilePath, subtitleFilePath,
                      targetVideoFilePath, subtitleFontSize,
                      subtitleColor, ffmpegPresetName)

        ValidityChecker.isReadableFile(sourceVideoFilePath,
                                       "source video file")

        st = "== hardcoding subtitles for %r" % sourceVideoFilePath
        OperatingSystem.showMessageOnConsole(st)

        subtitleOption = (("subtitles=%s:force_style='PrimaryColour=%d,"
                           + "FontSize=%d'")
                          % (subtitleFilePath, subtitleColor,
                             subtitleFontSize))

        command = ((cls._ffmpegCommand,
                    "-loglevel", "error",
                    "-itsoffset", str(shiftOffset),
                    "-i", sourceVideoFilePath,
                    "-vf", subtitleOption)
                   + iif(ffmpegPresetName != "",
                         ("-fpre", ffmpegPresetName),
                         ("-pix_fmt", "yuv420p",
                          "-profile:v", "baseline",
                          "-level", cls._defaultMp4BaselineLevel))
                   + ("-y", targetVideoFilePath))

        OperatingSystem.executeCommand(command, True)
        Logging.trace("<<")
コード例 #12
0
    def combine (cls, voiceNameList, trackDataList, sourceVideoFilePath,
                 targetVideoFilePath, subtitleFilePath):
        """Combines all final audio files (characterized by
           <trackDataList>) and the video given by
           <sourceVideoFilePath> into video in <targetVideoFilePath>;
           if <subtitleFilePath> is not empty, the given subtitle file
           is added as an additional track; <voiceNameList> gives the
           list of all voices"""

        Logging.trace(">>: voiceNameList = %r, trackDataList = %r,"
                      + " sourceVideo = %r, targetVideo = %r,"
                      + " subtitleFilePath = %r",
                      voiceNameList, trackDataList, sourceVideoFilePath,
                      targetVideoFilePath, subtitleFilePath)

        ValidityChecker.isReadableFile(sourceVideoFilePath,
                                       "source video file")

        st = "== combining audio and video for " + targetVideoFilePath
        OperatingSystem.showMessageOnConsole(st)

        audioTrackDataList = []

        for _, audioTrackData in enumerate(trackDataList):
            _, _, _, audioFilePath, description,\
              languageCode, _, _, _ = audioTrackData
            element = (audioFilePath, languageCode, description)
            audioTrackDataList.append(element)

        if cls._mp4boxCommand != "":
            command = cls._combineWithMp4box(sourceVideoFilePath,
                                             audioTrackDataList,
                                             subtitleFilePath,
                                             targetVideoFilePath)
        else:
            command = cls._combineWithFfmpeg(sourceVideoFilePath,
                                             audioTrackDataList,
                                             subtitleFilePath,
                                             targetVideoFilePath)

        OperatingSystem.executeCommand(command, True)
        Logging.trace("<<")
コード例 #13
0
    def mixdown (self, configData):
        """Combines the processed audio files for all voices in
           <configData.voiceNameList> into several combination files and
           converts them to aac format; <configData> defines the voice
           volumes, the relative amplification level, the optional
           voices as well as the tags and suffices for the final
           files"""

        Logging.trace(">>: configData = %r", configData)

        cls = self.__class__

        voiceProcessingList = \
            cls.constructSettingsForAudioTracks(configData)

        for v in voiceProcessingList:
            currentVoiceNameList, albumName, songTitle, \
              targetFilePath, _, languageCode, voiceNameToAudioLevelMap, \
              masteringEffectList, amplificationLevel = v
            waveIntermediateFilePath = ("%s/result_%s.wav"
                                        % (self._audioDirectoryPath, languageCode))
            OperatingSystem.showMessageOnConsole("== make mix file: %s"
                                                 % songTitle)

            if configData.parallelTrackFilePath != "":
                parallelTrackVolume = configData.parallelTrackVolume
                voiceNameToAudioLevelMap["parallel"] = parallelTrackVolume

            self._mixdownVoicesToWavFile(currentVoiceNameList,
                                         voiceNameToAudioLevelMap,
                                         configData.parallelTrackFilePath,
                                         masteringEffectList,
                                         amplificationLevel,
                                         waveIntermediateFilePath)
            self._compressAudio(waveIntermediateFilePath, songTitle,
                                targetFilePath)
            cls._tagAudio(targetFilePath, configData, songTitle, albumName)

            #OperatingSystem.removeFile(waveIntermediateFilePath,
            #                           cls._intermediateFilesAreKept)

        Logging.trace("<<")
    def ensureFileExistence (cls, fileName, fileKind):
        """Checks whether file with <fileName> exists, otherwise gives
           error message about file kind mentioning file name."""

        Logging.trace(">>: name = %r, kind = %s", fileName, fileKind)

        errorTemplate = "%s file does not exist - %r"
        errorMessage = errorTemplate % (fileKind, fileName)
        cls.check(OperatingSystem.hasFile(fileName), errorMessage)

        Logging.trace("<<")
コード例 #15
0
    def _shiftAudioFileExternally (cls, commandLine, audioFilePath,
                                   shiftedFilePath, shiftOffset):
        """Shifts audio file in <audioFilePath> to shifted audio in
           <shiftedFilePath> with silence prefix of length
           <shiftOffset> using external command with <commandLine>"""

        Logging.trace(">>: commandLine = %r,"
                      + " infile = %r, outfile = %r,"
                      + " shiftOffset = %7.3f",
                      commandLine, audioFilePath,
                      shiftedFilePath, shiftOffset)

        variableMap = { "infile"   : audioFilePath,
                        "outfile"  : shiftedFilePath,
                        "duration" : "%7.3f" % shiftOffset }
        command = cls._replaceVariablesByValues(tokenize(commandLine),
                                                variableMap)
        OperatingSystem.executeCommand(command, True,
                                       stdout=OperatingSystem.nullDevice)

        Logging.trace("<<")
コード例 #16
0
    def _shiftAudioFile (cls, audioFilePath, shiftedFilePath, shiftOffset):
        """Shifts audio file in <audioFilePath> to shifted audio in
           <shiftedFilePath> with silence prefix of length
           <shiftOffset>"""

        Logging.trace(">>: infile = %r, outfile = %r,"
                      + " shiftOffset = %7.3f",
                      audioFilePath, shiftedFilePath, shiftOffset)

        OperatingSystem.showMessageOnConsole("== shifting %r by %7.3fs"
                                             % (shiftedFilePath, shiftOffset))

        if "paddingCommandLine" not in cls._audioProcessorMap:
            _WavFile.shiftAudio(audioFilePath, shiftedFilePath, shiftOffset)
        else:
            commandLine = cls._audioProcessorMap["paddingCommandLine"]
            cls._shiftAudioFileExternally(commandLine,
                                          audioFilePath, shiftedFilePath,
                                          shiftOffset)

        Logging.trace("<<")
    def ensureProgramAvailability (cls, programName, programPath, option):
        """Checks whether program on <programPath> is available and otherwise
           gives error message and exits. <option> is the only
           command-line option for program."""

        Logging.trace(">>: '%s %s'", programName, option)

        cls.check(OperatingSystem.programIsAvailable(programPath, option),
                  ("cannot execute %s program - path %r'"
                   % (programName, programPath)))

        Logging.trace("<<")
コード例 #18
0
    def _convertMidiToAudio (self, voiceMidiFilePath, targetFilePath):
        """Converts voice data in midi file with <voiceMidiFilePath>
           to raw audio file with <targetFilePath>"""

        Logging.trace(">>: midiFile = %r, targetFile = %r",
                      voiceMidiFilePath, targetFilePath)

        cls = self.__class__

        # processing midi file via given command
        OperatingSystem.showMessageOnConsole("== convertMidiToWav "
                                             + targetFilePath)

        variableMap = { "infile"  : voiceMidiFilePath,
                        "outfile" : targetFilePath }
        command = \
            cls._replaceVariablesByValues(cls._midiToWavRenderingCommandList,
                                          variableMap)
        OperatingSystem.executeCommand(command, True,
                                       stdout=OperatingSystem.nullDevice)

        Logging.trace("<<")
コード例 #19
0
    def generateRefinedAudio (self, voiceName, soundVariant, reverbLevel):
        """Generates refined audio wave file for <voiceName> from raw
           audio file in target directory; <soundVariant> gives the
           kind of postprocessing ('COPY', 'STD', 'EXTREME', ...) and
           <reverbLevel> the percentage of reverb to be used for that
           voice"""

        Logging.trace(">>: voice = %s, variant = %s, reverb = %4.3f",
                      voiceName, soundVariant, reverbLevel)

        cls = self.__class__
        extendedSoundVariant = soundVariant.capitalize()
        isCopyVariant = (extendedSoundVariant == "Copy")

        if isCopyVariant:
            soundStyleName = "COPY"
        else:
            simpleVoiceName = humanReadableVoiceName(voiceName)
            capitalizedVoiceName = simpleVoiceName.capitalize()
            isSimpleKeyboard = (capitalizedVoiceName == "Keyboardsimple")
            capitalizedVoiceName = iif(isSimpleKeyboard, "Keyboard",
                                       capitalizedVoiceName)
            soundStyleName = \
                "soundStyle%s%s" % (capitalizedVoiceName,
                                    extendedSoundVariant)

        message = "== processing %s (%s)" % (voiceName, soundVariant)
        OperatingSystem.showMessageOnConsole(message)

        # prepare list of audio processing commands
        if isCopyVariant:
            audioProcessingEffects = ""
        elif soundStyleName in cls._soundStyleNameToEffectsMap:
            audioProcessingEffects = \
                cls._soundStyleNameToEffectsMap[soundStyleName]
        else:
            audioProcessingEffects = ""
            message = ("unknown variant %s replaced by copy default"
                       % soundVariant)
            Logging.trace("--: " + message)
            OperatingSystem.showMessageOnConsole(message)
            isCopyVariant = True

        if not isCopyVariant:
            # add reverb if applicable
            reverbLevel = adaptToRange(int(reverbLevel * 100.0), 0, 100)
            reverbEffect = iif2(reverbLevel == 0, "",
                                not cls._audioProcessorIsSox, "",
                                " reverb %d" % reverbLevel)

            if reverbLevel > 0 and not cls._audioProcessorIsSox:
                message = "reverberation skipped, please use explicit reverb"
                OperatingSystem.showMessageOnConsole(message)

            audioProcessingEffects += reverbEffect

        self._processAudioRefinement(voiceName, audioProcessingEffects)

        Logging.trace("<<")
コード例 #20
0
    def processMidi(cls):
        """Generates midi file from lilypond file."""

        Logging.trace(">>")

        configData = cls._configData
        intermediateFilesAreKept = configData.intermediateFilesAreKept
        tempLilypondFilePath = configData.tempLilypondFilePath
        lilypondFile = LilypondFile(tempLilypondFilePath)
        lilypondFile.generate(
            configData.includeFilePath, configData.lilypondVersion, "midi",
            configData.midiVoiceNameList, configData.title,
            configData.songComposerText, configData.voiceNameToChordsMap,
            configData.voiceNameToLyricsMap,
            configData.voiceNameToScoreNameMap, configData.measureToTempoMap,
            configData.phaseAndVoiceNameToClefMap,
            configData.phaseAndVoiceNameToStaffListMap)

        tempMidiFileNamePrefix = (configData.intermediateFileDirectoryPath +
                                  cls._pathSeparator +
                                  configData.fileNamePrefix + "-temp")
        tempMidiFileName = tempMidiFileNamePrefix + ".mid"
        targetMidiFileName = (cls._midiFileNameTemplate %
                              configData.fileNamePrefix)

        cls._processLilypond(tempLilypondFilePath, tempMidiFileNamePrefix)

        # postprocess MIDI file
        OperatingSystem.showMessageOnConsole("== adapting MIDI into " +
                                             targetMidiFileName)
        trackToSettingsMap = cls._calculateTrackToSettingsMap()

        midiTransformer = MidiTransformer(tempMidiFileName,
                                          intermediateFilesAreKept)
        midiTransformer.addMissingTrackNames()
        midiTransformer.humanizeTracks(
            configData.countInMeasureCount,
            configData.measureToHumanizationStyleNameMap)
        midiTransformer.positionInstruments(trackToSettingsMap)
        midiTransformer.addProcessingDateToTracks(trackToSettingsMap.keys())
        midiTransformer.save(targetMidiFileName)

        OperatingSystem.moveFile(targetMidiFileName,
                                 configData.targetDirectoryPath)
        OperatingSystem.removeFile(tempMidiFileName, intermediateFilesAreKept)
        OperatingSystem.removeFile(tempLilypondFilePath,
                                   intermediateFilesAreKept)

        Logging.trace("<<")
コード例 #21
0
    def _tagAudio (cls, audioFilePath, configData, songTitle, albumName):
        """Tags M4A audio file with <songTitle> at <audioFilePath>
           with tags specified by <configData>, <songTitle> and
           <albumName>"""

        Logging.trace(">>: audioFile = %r, configData = %r,"
                      + " title = %r, album = %r",
                      audioFilePath, configData, songTitle, albumName)

        artistName = configData.artistName

        tagToValueMap = {}
        tagToValueMap["album"]       = albumName
        tagToValueMap["albumArtist"] = artistName
        tagToValueMap["artist"]      = artistName
        tagToValueMap["cover"]       = configData.albumArtFilePath
        tagToValueMap["title"]       = songTitle
        tagToValueMap["track"]       = configData.trackNumber
        tagToValueMap["year"]        = configData.songYear

        OperatingSystem.showMessageOnConsole("== tagging AAC: " + songTitle)
        MP4TagManager.tagFile(audioFilePath, tagToValueMap)

        Logging.trace("<<")
    def cleanUpConditionally (cls, filesAreKept):
        """Deletes all intermediate files when <filesAreKept> is unset"""

        Logging.trace(">>: %r", filesAreKept)

        for page in range(1, cls._pageCount + 1):
            Logging.trace("--: %d", page)
            fileName = cls._intermediateFileNameTemplate % page
            OperatingSystem.removeFile(fileName, filesAreKept)
            fileName = cls._pageFileNameTemplate % page
            OperatingSystem.removeFile(fileName, filesAreKept)

        OperatingSystem.removeFile(cls._concatSpecificationFileName,
                                 filesAreKept)

        if cls.fileName and cls.fileName == cls._tempFileName:
            OperatingSystem.removeFile(cls.fileName, filesAreKept)

        Logging.trace("<<")
コード例 #23
0
    def mixdown (cls, sourceFilePathList, volumeFactorList,
                 amplificationLevel, targetFilePath):
        """Mixes WAV audio files given in <sourceFilePathList> to target WAV
           file with <targetFilePath> with volumes given by
           <volumeFactorList> with loudness amplification given by
           <amplificationLevel> via Python modules only"""

        Logging.trace(">>: sourceFiles = %r, volumeFactors = %r,"
                      + " level = %4.3f, targetFile = %r",
                      sourceFilePathList, volumeFactorList,
                      amplificationLevel, targetFilePath)

        OperatingSystem.showMessageOnConsole("  MIX", False)
        sourceFileList = [cls(name, "r") for name in sourceFilePathList]
        sourceFileCount = len(sourceFileList)
        channelCount, sampleSize, frameRate, _ = \
            sourceFileList[0].getParameters()
        frameCount = max([file.frameCount() for file in sourceFileList])
        resultSampleList = (frameCount * channelCount) * [ 0 ]

        for i in range(sourceFileCount):
            OperatingSystem.showMessageOnConsole(" %d" % (i + 1), False)
            sourceFile   = sourceFileList[i]
            volumeFactor = volumeFactorList[i]
            cls._mix(resultSampleList, sourceFile, volumeFactor)

        maximumVolume = cls.maximumVolume(resultSampleList)
        maxValue = cls.maximumSampleValue
        amplificationFactor = pow(2.0, amplificationLevel / 6.0206)
        scalingFactor = (1.0 if maximumVolume < (maxValue // 10)
                         else (maxValue * amplificationFactor) / maximumVolume)
        Logging.trace("--: maxVolume = %d, factor = %4.3f",
                      maximumVolume, scalingFactor)
        OperatingSystem.showMessageOnConsole(" S")
        cls.scale(resultSampleList, scalingFactor)

        for file in sourceFileList:
            file.close()

        targetFile = cls(targetFilePath, "w")
        targetFrameCount = len(resultSampleList) // channelCount
        targetFile.setParameters(channelCount, sampleSize,
                                 frameRate, targetFrameCount)
        targetFile.writeSamples(resultSampleList)
        targetFile.close()

        Logging.trace("<<")
コード例 #24
0
    def _mixdownToWavFileExternally (self, sourceFilePathList,
                                     volumeFactorList, masteringEffectList,
                                     amplificationLevel, targetFilePath):
        """Mixes WAV audio files given in <sourceFilePathList> to target WAV
           file with <targetFilePath> with volumes given by
           <volumeFactorList> with loudness amplification given by
           <amplificationLevel> using external command;
           <masteringEffectList> gives the refinement effects for this
           track (if any) to be applied after mixdown"""

        Logging.trace(">>: sourceFiles = %r, volumeFactors = %r,"
                      + " masteringEffects = %r, level = %4.3f,"
                      + " targetFile = %r",
                      sourceFilePathList, volumeFactorList,
                      masteringEffectList, amplificationLevel,
                      targetFilePath)

        cls = self.__class__

        # some shorthands
        audioProcMap     = cls._audioProcessorMap
        replaceVariables = cls._replaceVariablesByValues

        # check whether mastering is done after mixdown
        masteringPassIsRequired = (amplificationLevel != 0
                                   or masteringEffectList > "")
        intermediateFilePath = self._audioDirectoryPath + "/result-mix.wav"
        intermediateFilePath = iif(masteringPassIsRequired,
                                   intermediateFilePath, targetFilePath)

        Logging.trace("--: masteringPass = %r, intermediateFile = %r",
                      masteringPassIsRequired, intermediateFilePath)

        # do the mixdown of the audio sources
        commandRegexp = re.compile(r"([^\[]*)\[([^\]]+)\](.*)")
        mixingCommandLine = audioProcMap["mixingCommandLine"]
        match = commandRegexp.search(mixingCommandLine)

        if match is None:
            Logging.trace("--: bad command line format for mix - %r",
                          mixingCommandLine)
        else:
            commandPrefix        = tokenize(match.group(1))
            commandRepeatingPart = tokenize(match.group(2))
            commandSuffix        = tokenize(match.group(3))

            elementCount = len(sourceFilePathList)
            command = []

            for i in range(elementCount):
                volumeFactor = volumeFactorList[i]
                filePath     = sourceFilePathList[i]
                variableMap  = { "factor" : volumeFactor,
                                 "infile" : filePath }
                part = replaceVariables(commandRepeatingPart, variableMap)
                command.extend(part)

            Logging.trace("--: repeating part = %r", command)

            commandList = commandPrefix + command + commandSuffix
            variableMap = { "outfile" : intermediateFilePath }
            command = replaceVariables(commandList, variableMap)
            OperatingSystem.executeCommand(command, True,
                                           stdout=OperatingSystem.nullDevice)

        if masteringPassIsRequired:
            # do mastering and amplification
            amplificationEffect = audioProcMap["amplificationEffect"]
            amplificationEffectTokenList = tokenize(amplificationEffect)
            variableMap  = { "amplificationLevel" : amplificationLevel }
            nmEffectPartList = replaceVariables(amplificationEffectTokenList,
                                                variableMap)
            effectList = (tokenize(masteringEffectList)
                          + nmEffectPartList)

            refinementCommandLine = audioProcMap["refinementCommandLine"]
            refinementCommandList = tokenize(refinementCommandLine)
            variableMap = { "infile"  : intermediateFilePath,
                            "outfile" : targetFilePath,
                            "effects" : effectList }
            command = replaceVariables(refinementCommandList, variableMap)
            OperatingSystem.executeCommand(command, True)
            OperatingSystem.removeFile(intermediateFilePath,
                                       cls._intermediateFilesAreKept)

        Logging.trace("<<")
コード例 #25
0
    def _processAudioRefinement (self, voiceName, audioProcessingEffects):
        """Handles audio processing given by <audioProcessingEffects>"""

        Logging.trace(">>: voice = %s, effects = %r",
                      voiceName, audioProcessingEffects)

        cls = self.__class__

        debugFileCount = 0
        separator = cls._audioProcessorMap["chainSeparator"]
        chainCommandList = splitAndStrip(audioProcessingEffects, separator)
        chainCommandCount = len(chainCommandList)
        commandList = tokenize(cls._audioProcessorMap["refinementCommandLine"])

        for chainIndex, chainProcessingEffects in enumerate(chainCommandList):
            Logging.trace("--: chain[%d] = %r",
                          chainIndex, chainProcessingEffects)
            chainPosition = iif3(chainCommandCount == 1, "SINGLE",
                                 chainIndex == 0, "FIRST",
                                 chainIndex == chainCommandCount - 1, "LAST",
                                 "OTHER")
            chainPartCommandList = splitAndStrip(chainProcessingEffects,
                                                 "tee ")
            partCount = len(chainPartCommandList)

            for partIndex, partProcessingEffects \
                in enumerate(chainPartCommandList):

                partPosition = iif3(partCount == 1, "SINGLE",
                                    partIndex == 0, "FIRST",
                                    partIndex == partCount - 1, "LAST",
                                    "OTHER")
                partCommandTokenList = tokenize(partProcessingEffects)
                sourceList, currentTarget, debugFileCount = \
                    self._extractEffectListSrcAndTgt(voiceName, chainPosition,
                                                     partPosition,
                                                     debugFileCount,
                                                     partCommandTokenList)

                if (len(partCommandTokenList) == 0
                    or partCommandTokenList[0] != "mix"):
                    currentSource = sourceList[0]
                    variableMap = { "infile"   : currentSource,
                                    "outfile"  : currentTarget,
                                    "effects"  : partCommandTokenList }
                    command = cls._replaceVariablesByValues(commandList,
                                                            variableMap)
                    OperatingSystem.executeCommand(command, True)
                else:
                    volumeList = partCommandTokenList[1:]

                    if len(volumeList) < len(sourceList):
                        Logging.trace("--: bad argument pairing for mix")
                    else:
                        for i in range(len(sourceList)):
                            volume = volumeList[i]
                            valueName = "value %d in mix" % (i+1)
                            ValidityChecker.isNumberString(volume,
                                                           valueName, True)
                            volumeList[i] = float(volume)

                        self._mixdownToWavFile(sourceList, volumeList,
                                               "", 0.0, currentTarget)

        Logging.trace("<<")
コード例 #26
0
    def processFinalVideo(cls):
        """Generates final videos from silent video, audio tracks and
           subtitle files."""

        Logging.trace(">>")

        configData = cls._configData
        tempSubtitleFilePath = "tempSubtitle.srt"
        tempMp4FilePath = "tempVideoWithSubtitles.mp4"

        # --- shift subtitles ---
        subtitleFilePath = "%s/%s" % (configData.targetDirectoryPath,
                                      (subtitleFileNameTemplate %
                                       configData.fileNamePrefix))
        VideoAudioCombiner.shiftSubtitleFile(subtitleFilePath,
                                             tempSubtitleFilePath,
                                             configData.shiftOffset)

        for _, videoFileKind in configData.videoFileKindMap.items():
            silentMp4FilePath = (
                ("%s/" + silentVideoFileNameTemplate) %
                (configData.targetDirectoryPath, configData.fileNamePrefix,
                 videoFileKind.fileNameSuffix))
            videoTargetName = videoFileKind.target

            if videoTargetName not in configData.videoTargetMap:
                Logging.trace("--: unknown video target %s for file kind %s",
                              videoTargetName, videoFileKind.name)
            else:
                videoTarget = configData.videoTargetMap[videoTargetName]

                if not videoTarget.subtitlesAreHardcoded:
                    videoFilePath = silentMp4FilePath
                    effectiveSubtitleFilePath = tempSubtitleFilePath
                else:
                    videoFilePath = tempMp4FilePath
                    effectiveSubtitleFilePath = ""
                    VideoAudioCombiner.insertHardSubtitles( \
                                            silentMp4FilePath,
                                            tempSubtitleFilePath,
                                            videoFilePath,
                                            configData.shiftOffset,
                                            videoTarget.subtitleColor,
                                            videoTarget.subtitleFontSize,
                                            videoTarget.ffmpegPresetName)

                targetDirectoryPath = videoFileKind.directoryPath
                ValidityChecker.isDirectory(targetDirectoryPath,
                                            "video target directory")
                targetVideoFilePath = (
                    "%s/%s%s-%s.mp4" %
                    (targetDirectoryPath, configData.targetFileNamePrefix,
                     configData.fileNamePrefix, videoTarget.name))
                trackDataList = \
                   AudioTrackManager.constructSettingsForAudioTracks(configData)

                VideoAudioCombiner.combine(videoFileKind.voiceNameList,
                                           trackDataList, videoFilePath,
                                           targetVideoFilePath,
                                           effectiveSubtitleFilePath)

                mediaType = "TV Show"
                VideoAudioCombiner.tagVideoFile(targetVideoFilePath,
                                                configData.albumName,
                                                configData.artistName,
                                                configData.albumArtFilePath,
                                                configData.title, mediaType,
                                                configData.songYear)

        intermediateFilesAreKept = configData.intermediateFilesAreKept
        OperatingSystem.removeFile(tempSubtitleFilePath,
                                   intermediateFilesAreKept)
        OperatingSystem.removeFile(tempMp4FilePath, intermediateFilesAreKept)

        Logging.trace("<<")
コード例 #27
0
    def processSilentVideo(cls):
        """Generates video without audio from lilypond file."""

        Logging.trace(">>")

        mmPerInch = 25.4
        configData = cls._configData
        intermediateFilesAreKept = configData.intermediateFilesAreKept
        intermediateFileDirectoryPath = \
            configData.intermediateFileDirectoryPath
        targetDirectoryPath = configData.targetDirectoryPath
        targetSubtitleFileName = (
            targetDirectoryPath + cls._pathSeparator +
            (subtitleFileNameTemplate % configData.fileNamePrefix))
        tempLilypondFilePath = configData.tempLilypondFilePath

        for _, videoFileKind in configData.videoFileKindMap.items():
            message = ("== generating silent video for %s" %
                       videoFileKind.name)
            OperatingSystem.showMessageOnConsole(message)
            videoTargetName = videoFileKind.target

            if videoTargetName not in configData.videoTargetMap:
                Logging.trace("--: unknown video target %s for file kind %s",
                              videoTargetName, videoFileKind.name)
            else:
                videoTarget = configData.videoTargetMap[videoTargetName]
                effectiveVideoResolution = (videoTarget.resolution *
                                            videoTarget.scalingFactor)
                factor = mmPerInch / videoTarget.resolution
                videoWidth = videoTarget.width * factor
                videoHeight = videoTarget.height * factor
                videoLineWidth = videoWidth - 2 * videoTarget.leftRightMargin
                lilypondFile = LilypondFile(tempLilypondFilePath)
                lilypondFile.setVideoParameters(videoTarget.name,
                                                effectiveVideoResolution,
                                                videoTarget.systemSize,
                                                videoTarget.topBottomMargin,
                                                videoWidth, videoHeight,
                                                videoLineWidth)
                lilypondFile.generate(
                    configData.includeFilePath, configData.lilypondVersion,
                    "video", videoFileKind.voiceNameList, configData.title,
                    configData.songComposerText,
                    configData.voiceNameToChordsMap,
                    configData.voiceNameToLyricsMap,
                    configData.voiceNameToScoreNameMap,
                    configData.measureToTempoMap,
                    configData.phaseAndVoiceNameToClefMap,
                    configData.phaseAndVoiceNameToStaffListMap)
                targetMp4FileName = (
                    targetDirectoryPath + cls._pathSeparator +
                    (silentVideoFileNameTemplate %
                     (configData.fileNamePrefix, videoFileKind.fileNameSuffix))
                )
                videoGenerator = \
                    LilypondPngVideoGenerator(tempLilypondFilePath,
                                              targetMp4FileName,
                                              targetSubtitleFileName,
                                              configData.measureToTempoMap,
                                              countInMeasures,
                                              videoTarget.frameRate,
                                              videoTarget.scalingFactor,
                                              videoTarget.ffmpegPresetName,
                                              intermediateFileDirectoryPath,
                                              intermediateFilesAreKept)
                videoGenerator.process()
                videoGenerator.cleanup()

                ##OperatingSystem.moveFile(targetMp4FileName,
                ##                         configData.targetDirectoryPath)
                ##OperatingSystem.moveFile(targetSubtitleFileName,
                ##                         configData.targetDirectoryPath)

        OperatingSystem.removeFile(tempLilypondFilePath,
                                   intermediateFilesAreKept)

        Logging.trace("<<")