def _compressAudio (self, audioFilePath, songTitle, targetFilePath):
        """Compresses audio file with <songTitle> in path with
          <audioFilePath> to AAC file at <targetFilePath>"""

        Logging.trace(">>: audioFile = %r, title = %r,"
                      + " targetFile = %r",
                      audioFilePath, songTitle, targetFilePath)

        cls = self.__class__

        OperatingSystem.showMessageOnConsole("== convert to AAC: "
                                             + songTitle)

        commandLine = iif(cls._aacCommandLine != "", cls._aacCommandLine,
                          ("%s -loglevel %s -aac_tns 0"
                           + " -i ${infile} -y ${outfile}")
                           % (cls._ffmpegCommand, _ffmpegLogLevel))
        variableMap = { "infile"  : audioFilePath,
                        "outfile" : targetFilePath }
        command = cls._replaceVariablesByValues(tokenize(commandLine),
                                                variableMap)

        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
Ejemplo n.º 2
0
    def tagVideoFile (cls, videoFilePath, albumName, artistName,
                      albumArtFilePath, title, mediaType, year):
        """Adds some quicktime/MP4 tags to video file with
           <videoFilePath>"""

        Logging.trace(">>: %r", videoFilePath)

        ValidityChecker.isReadableFile(videoFilePath, "source video file")

        st = "== tagging %r" % videoFilePath
        OperatingSystem.showMessageOnConsole(st)

        tagToValueMap = {}
        tagToValueMap["album"]           = albumName
        tagToValueMap["albumArtist"]     = artistName
        tagToValueMap["artist"]          = artistName
        tagToValueMap["cover"]           = albumArtFilePath
        tagToValueMap["itunesMediaType"] = mediaType
        tagToValueMap["title"]           = title
        tagToValueMap["tvShowName"]      = albumName
        tagToValueMap["year"]            = year

        MP4TagManager.tagFile(videoFilePath, tagToValueMap)

        Logging.trace("<<")
Ejemplo n.º 3
0
    def _processLilypond(cls, lilypondFilePath, targetFileNamePrefix):
        """Processes <lilypondFilePath> and stores result in file with
           <targetFileNamePrefix>."""

        Logging.trace(">>: lilyFile = %r, targetFileNamePrefix=%r",
                      lilypondFilePath, targetFileNamePrefix)

        OperatingSystem.showMessageOnConsole("== processing " +
                                             targetFileNamePrefix)
        command = (cls._lilypondCommand, "--output", targetFileNamePrefix,
                   lilypondFilePath)
        OperatingSystem.executeCommand(command, True)

        Logging.trace("<<")
Ejemplo n.º 4
0
    def processMidi(cls):
        """Generates midi file from lilypond file."""

        Logging.trace(">>")

        configData = cls._configData
        intermediateFilesAreKept = configData.intermediateFilesAreKept
        tempLilypondFilePath = configData.tempLilypondFilePath
        lilypondFile = LilypondFile(tempLilypondFilePath)
        lilypondFile.generate(
            configData.includeFilePath, configData.lilypondVersion, "midi",
            configData.midiVoiceNameList, configData.title,
            configData.songComposerText, configData.voiceNameToChordsMap,
            configData.voiceNameToLyricsMap,
            configData.voiceNameToScoreNameMap, configData.measureToTempoMap,
            configData.phaseAndVoiceNameToClefMap,
            configData.phaseAndVoiceNameToStaffListMap)

        tempMidiFileNamePrefix = (configData.intermediateFileDirectoryPath +
                                  cls._pathSeparator +
                                  configData.fileNamePrefix + "-temp")
        tempMidiFileName = tempMidiFileNamePrefix + ".mid"
        targetMidiFileName = (cls._midiFileNameTemplate %
                              configData.fileNamePrefix)

        cls._processLilypond(tempLilypondFilePath, tempMidiFileNamePrefix)

        # postprocess MIDI file
        OperatingSystem.showMessageOnConsole("== adapting MIDI into " +
                                             targetMidiFileName)
        trackToSettingsMap = cls._calculateTrackToSettingsMap()

        midiTransformer = MidiTransformer(tempMidiFileName,
                                          intermediateFilesAreKept)
        midiTransformer.addMissingTrackNames()
        midiTransformer.humanizeTracks(
            configData.countInMeasureCount,
            configData.measureToHumanizationStyleNameMap)
        midiTransformer.positionInstruments(trackToSettingsMap)
        midiTransformer.addProcessingDateToTracks(trackToSettingsMap.keys())
        midiTransformer.save(targetMidiFileName)

        OperatingSystem.moveFile(targetMidiFileName,
                                 configData.targetDirectoryPath)
        OperatingSystem.removeFile(tempMidiFileName, intermediateFilesAreKept)
        OperatingSystem.removeFile(tempLilypondFilePath,
                                   intermediateFilesAreKept)

        Logging.trace("<<")
    def copyOverrideFile (self, filePath, voiceName, shiftOffset):
        """Sets refined file from <filePath> for voice with
           <voiceName> and applies <shiftOffset>"""

        Logging.trace(">>: file = %r, voice = %r, offset = %7.3f",
                      filePath, voiceName, shiftOffset)

        cls = self.__class__
        message = "== overriding %r from file" % voiceName
        OperatingSystem.showMessageOnConsole(message)

        targetFilePath = (_processedAudioFileTemplate
                          % (self._audioDirectoryPath, voiceName))
        cls._shiftAudioFile(filePath, targetFilePath, shiftOffset)

        Logging.trace("<<")
Ejemplo n.º 6
0
    def insertHardSubtitles (cls, sourceVideoFilePath, subtitleFilePath,
                             targetVideoFilePath, shiftOffset,
                             subtitleColor, subtitleFontSize,
                             ffmpegPresetName):
        """Inserts hard subtitles specified by an SRT file with
           <subtitleFilePath> into video given by
           <sourceVideoFilePath> resulting in video with
           <targetVideoFilePath>; <shiftOffset> tells the amount of
           empty time to be inserted before the video, <ffmpegPresetName>
           tells the ffmpeg preset used for the newly generated video,
           <subtitleColor> the RGB color of the subtitle,
           <subtitleFontSize> the size in pixels"""

        Logging.trace(">>: sourceVideo = %r, subtitleFile = %r,"
                      + " targetVideo = %r, subtitleFontSize = %d,"
                      + " subtitleColor = %d, ffmpegPreset = %r",
                      sourceVideoFilePath, subtitleFilePath,
                      targetVideoFilePath, subtitleFontSize,
                      subtitleColor, ffmpegPresetName)

        ValidityChecker.isReadableFile(sourceVideoFilePath,
                                       "source video file")

        st = "== hardcoding subtitles for %r" % sourceVideoFilePath
        OperatingSystem.showMessageOnConsole(st)

        subtitleOption = (("subtitles=%s:force_style='PrimaryColour=%d,"
                           + "FontSize=%d'")
                          % (subtitleFilePath, subtitleColor,
                             subtitleFontSize))

        command = ((cls._ffmpegCommand,
                    "-loglevel", "error",
                    "-itsoffset", str(shiftOffset),
                    "-i", sourceVideoFilePath,
                    "-vf", subtitleOption)
                   + iif(ffmpegPresetName != "",
                         ("-fpre", ffmpegPresetName),
                         ("-pix_fmt", "yuv420p",
                          "-profile:v", "baseline",
                          "-level", cls._defaultMp4BaselineLevel))
                   + ("-y", targetVideoFilePath))

        OperatingSystem.executeCommand(command, True)
        Logging.trace("<<")
    def mixdown (self, configData):
        """Combines the processed audio files for all voices in
           <configData.voiceNameList> into several combination files and
           converts them to aac format; <configData> defines the voice
           volumes, the relative amplification level, the optional
           voices as well as the tags and suffices for the final
           files"""

        Logging.trace(">>: configData = %r", configData)

        cls = self.__class__

        voiceProcessingList = \
            cls.constructSettingsForAudioTracks(configData)

        for v in voiceProcessingList:
            currentVoiceNameList, albumName, songTitle, \
              targetFilePath, _, languageCode, voiceNameToAudioLevelMap, \
              masteringEffectList, amplificationLevel = v
            waveIntermediateFilePath = ("%s/result_%s.wav"
                                        % (self._audioDirectoryPath, languageCode))
            OperatingSystem.showMessageOnConsole("== make mix file: %s"
                                                 % songTitle)

            if configData.parallelTrackFilePath != "":
                parallelTrackVolume = configData.parallelTrackVolume
                voiceNameToAudioLevelMap["parallel"] = parallelTrackVolume

            self._mixdownVoicesToWavFile(currentVoiceNameList,
                                         voiceNameToAudioLevelMap,
                                         configData.parallelTrackFilePath,
                                         masteringEffectList,
                                         amplificationLevel,
                                         waveIntermediateFilePath)
            self._compressAudio(waveIntermediateFilePath, songTitle,
                                targetFilePath)
            cls._tagAudio(targetFilePath, configData, songTitle, albumName)

            #OperatingSystem.removeFile(waveIntermediateFilePath,
            #                           cls._intermediateFilesAreKept)

        Logging.trace("<<")
Ejemplo n.º 8
0
    def combine (cls, voiceNameList, trackDataList, sourceVideoFilePath,
                 targetVideoFilePath, subtitleFilePath):
        """Combines all final audio files (characterized by
           <trackDataList>) and the video given by
           <sourceVideoFilePath> into video in <targetVideoFilePath>;
           if <subtitleFilePath> is not empty, the given subtitle file
           is added as an additional track; <voiceNameList> gives the
           list of all voices"""

        Logging.trace(">>: voiceNameList = %r, trackDataList = %r,"
                      + " sourceVideo = %r, targetVideo = %r,"
                      + " subtitleFilePath = %r",
                      voiceNameList, trackDataList, sourceVideoFilePath,
                      targetVideoFilePath, subtitleFilePath)

        ValidityChecker.isReadableFile(sourceVideoFilePath,
                                       "source video file")

        st = "== combining audio and video for " + targetVideoFilePath
        OperatingSystem.showMessageOnConsole(st)

        audioTrackDataList = []

        for _, audioTrackData in enumerate(trackDataList):
            _, _, _, audioFilePath, description,\
              languageCode, _, _, _ = audioTrackData
            element = (audioFilePath, languageCode, description)
            audioTrackDataList.append(element)

        if cls._mp4boxCommand != "":
            command = cls._combineWithMp4box(sourceVideoFilePath,
                                             audioTrackDataList,
                                             subtitleFilePath,
                                             targetVideoFilePath)
        else:
            command = cls._combineWithFfmpeg(sourceVideoFilePath,
                                             audioTrackDataList,
                                             subtitleFilePath,
                                             targetVideoFilePath)

        OperatingSystem.executeCommand(command, True)
        Logging.trace("<<")
    def _shiftAudioFile (cls, audioFilePath, shiftedFilePath, shiftOffset):
        """Shifts audio file in <audioFilePath> to shifted audio in
           <shiftedFilePath> with silence prefix of length
           <shiftOffset>"""

        Logging.trace(">>: infile = %r, outfile = %r,"
                      + " shiftOffset = %7.3f",
                      audioFilePath, shiftedFilePath, shiftOffset)

        OperatingSystem.showMessageOnConsole("== shifting %r by %7.3fs"
                                             % (shiftedFilePath, shiftOffset))

        if "paddingCommandLine" not in cls._audioProcessorMap:
            _WavFile.shiftAudio(audioFilePath, shiftedFilePath, shiftOffset)
        else:
            commandLine = cls._audioProcessorMap["paddingCommandLine"]
            cls._shiftAudioFileExternally(commandLine,
                                          audioFilePath, shiftedFilePath,
                                          shiftOffset)

        Logging.trace("<<")
    def _convertMidiToAudio (self, voiceMidiFilePath, targetFilePath):
        """Converts voice data in midi file with <voiceMidiFilePath>
           to raw audio file with <targetFilePath>"""

        Logging.trace(">>: midiFile = %r, targetFile = %r",
                      voiceMidiFilePath, targetFilePath)

        cls = self.__class__

        # processing midi file via given command
        OperatingSystem.showMessageOnConsole("== convertMidiToWav "
                                             + targetFilePath)

        variableMap = { "infile"  : voiceMidiFilePath,
                        "outfile" : targetFilePath }
        command = \
            cls._replaceVariablesByValues(cls._midiToWavRenderingCommandList,
                                          variableMap)
        OperatingSystem.executeCommand(command, True,
                                       stdout=OperatingSystem.nullDevice)

        Logging.trace("<<")
    def generateRefinedAudio (self, voiceName, soundVariant, reverbLevel):
        """Generates refined audio wave file for <voiceName> from raw
           audio file in target directory; <soundVariant> gives the
           kind of postprocessing ('COPY', 'STD', 'EXTREME', ...) and
           <reverbLevel> the percentage of reverb to be used for that
           voice"""

        Logging.trace(">>: voice = %s, variant = %s, reverb = %4.3f",
                      voiceName, soundVariant, reverbLevel)

        cls = self.__class__
        extendedSoundVariant = soundVariant.capitalize()
        isCopyVariant = (extendedSoundVariant == "Copy")

        if isCopyVariant:
            soundStyleName = "COPY"
        else:
            simpleVoiceName = humanReadableVoiceName(voiceName)
            capitalizedVoiceName = simpleVoiceName.capitalize()
            isSimpleKeyboard = (capitalizedVoiceName == "Keyboardsimple")
            capitalizedVoiceName = iif(isSimpleKeyboard, "Keyboard",
                                       capitalizedVoiceName)
            soundStyleName = \
                "soundStyle%s%s" % (capitalizedVoiceName,
                                    extendedSoundVariant)

        message = "== processing %s (%s)" % (voiceName, soundVariant)
        OperatingSystem.showMessageOnConsole(message)

        # prepare list of audio processing commands
        if isCopyVariant:
            audioProcessingEffects = ""
        elif soundStyleName in cls._soundStyleNameToEffectsMap:
            audioProcessingEffects = \
                cls._soundStyleNameToEffectsMap[soundStyleName]
        else:
            audioProcessingEffects = ""
            message = ("unknown variant %s replaced by copy default"
                       % soundVariant)
            Logging.trace("--: " + message)
            OperatingSystem.showMessageOnConsole(message)
            isCopyVariant = True

        if not isCopyVariant:
            # add reverb if applicable
            reverbLevel = adaptToRange(int(reverbLevel * 100.0), 0, 100)
            reverbEffect = iif2(reverbLevel == 0, "",
                                not cls._audioProcessorIsSox, "",
                                " reverb %d" % reverbLevel)

            if reverbLevel > 0 and not cls._audioProcessorIsSox:
                message = "reverberation skipped, please use explicit reverb"
                OperatingSystem.showMessageOnConsole(message)

            audioProcessingEffects += reverbEffect

        self._processAudioRefinement(voiceName, audioProcessingEffects)

        Logging.trace("<<")
    def _tagAudio (cls, audioFilePath, configData, songTitle, albumName):
        """Tags M4A audio file with <songTitle> at <audioFilePath>
           with tags specified by <configData>, <songTitle> and
           <albumName>"""

        Logging.trace(">>: audioFile = %r, configData = %r,"
                      + " title = %r, album = %r",
                      audioFilePath, configData, songTitle, albumName)

        artistName = configData.artistName

        tagToValueMap = {}
        tagToValueMap["album"]       = albumName
        tagToValueMap["albumArtist"] = artistName
        tagToValueMap["artist"]      = artistName
        tagToValueMap["cover"]       = configData.albumArtFilePath
        tagToValueMap["title"]       = songTitle
        tagToValueMap["track"]       = configData.trackNumber
        tagToValueMap["year"]        = configData.songYear

        OperatingSystem.showMessageOnConsole("== tagging AAC: " + songTitle)
        MP4TagManager.tagFile(audioFilePath, tagToValueMap)

        Logging.trace("<<")
    def mixdown (cls, sourceFilePathList, volumeFactorList,
                 amplificationLevel, targetFilePath):
        """Mixes WAV audio files given in <sourceFilePathList> to target WAV
           file with <targetFilePath> with volumes given by
           <volumeFactorList> with loudness amplification given by
           <amplificationLevel> via Python modules only"""

        Logging.trace(">>: sourceFiles = %r, volumeFactors = %r,"
                      + " level = %4.3f, targetFile = %r",
                      sourceFilePathList, volumeFactorList,
                      amplificationLevel, targetFilePath)

        OperatingSystem.showMessageOnConsole("  MIX", False)
        sourceFileList = [cls(name, "r") for name in sourceFilePathList]
        sourceFileCount = len(sourceFileList)
        channelCount, sampleSize, frameRate, _ = \
            sourceFileList[0].getParameters()
        frameCount = max([file.frameCount() for file in sourceFileList])
        resultSampleList = (frameCount * channelCount) * [ 0 ]

        for i in range(sourceFileCount):
            OperatingSystem.showMessageOnConsole(" %d" % (i + 1), False)
            sourceFile   = sourceFileList[i]
            volumeFactor = volumeFactorList[i]
            cls._mix(resultSampleList, sourceFile, volumeFactor)

        maximumVolume = cls.maximumVolume(resultSampleList)
        maxValue = cls.maximumSampleValue
        amplificationFactor = pow(2.0, amplificationLevel / 6.0206)
        scalingFactor = (1.0 if maximumVolume < (maxValue // 10)
                         else (maxValue * amplificationFactor) / maximumVolume)
        Logging.trace("--: maxVolume = %d, factor = %4.3f",
                      maximumVolume, scalingFactor)
        OperatingSystem.showMessageOnConsole(" S")
        cls.scale(resultSampleList, scalingFactor)

        for file in sourceFileList:
            file.close()

        targetFile = cls(targetFilePath, "w")
        targetFrameCount = len(resultSampleList) // channelCount
        targetFile.setParameters(channelCount, sampleSize,
                                 frameRate, targetFrameCount)
        targetFile.writeSamples(resultSampleList)
        targetFile.close()

        Logging.trace("<<")
Ejemplo n.º 14
0
    def processSilentVideo(cls):
        """Generates video without audio from lilypond file."""

        Logging.trace(">>")

        mmPerInch = 25.4
        configData = cls._configData
        intermediateFilesAreKept = configData.intermediateFilesAreKept
        intermediateFileDirectoryPath = \
            configData.intermediateFileDirectoryPath
        targetDirectoryPath = configData.targetDirectoryPath
        targetSubtitleFileName = (
            targetDirectoryPath + cls._pathSeparator +
            (subtitleFileNameTemplate % configData.fileNamePrefix))
        tempLilypondFilePath = configData.tempLilypondFilePath

        for _, videoFileKind in configData.videoFileKindMap.items():
            message = ("== generating silent video for %s" %
                       videoFileKind.name)
            OperatingSystem.showMessageOnConsole(message)
            videoTargetName = videoFileKind.target

            if videoTargetName not in configData.videoTargetMap:
                Logging.trace("--: unknown video target %s for file kind %s",
                              videoTargetName, videoFileKind.name)
            else:
                videoTarget = configData.videoTargetMap[videoTargetName]
                effectiveVideoResolution = (videoTarget.resolution *
                                            videoTarget.scalingFactor)
                factor = mmPerInch / videoTarget.resolution
                videoWidth = videoTarget.width * factor
                videoHeight = videoTarget.height * factor
                videoLineWidth = videoWidth - 2 * videoTarget.leftRightMargin
                lilypondFile = LilypondFile(tempLilypondFilePath)
                lilypondFile.setVideoParameters(videoTarget.name,
                                                effectiveVideoResolution,
                                                videoTarget.systemSize,
                                                videoTarget.topBottomMargin,
                                                videoWidth, videoHeight,
                                                videoLineWidth)
                lilypondFile.generate(
                    configData.includeFilePath, configData.lilypondVersion,
                    "video", videoFileKind.voiceNameList, configData.title,
                    configData.songComposerText,
                    configData.voiceNameToChordsMap,
                    configData.voiceNameToLyricsMap,
                    configData.voiceNameToScoreNameMap,
                    configData.measureToTempoMap,
                    configData.phaseAndVoiceNameToClefMap,
                    configData.phaseAndVoiceNameToStaffListMap)
                targetMp4FileName = (
                    targetDirectoryPath + cls._pathSeparator +
                    (silentVideoFileNameTemplate %
                     (configData.fileNamePrefix, videoFileKind.fileNameSuffix))
                )
                videoGenerator = \
                    LilypondPngVideoGenerator(tempLilypondFilePath,
                                              targetMp4FileName,
                                              targetSubtitleFileName,
                                              configData.measureToTempoMap,
                                              countInMeasures,
                                              videoTarget.frameRate,
                                              videoTarget.scalingFactor,
                                              videoTarget.ffmpegPresetName,
                                              intermediateFileDirectoryPath,
                                              intermediateFilesAreKept)
                videoGenerator.process()
                videoGenerator.cleanup()

                ##OperatingSystem.moveFile(targetMp4FileName,
                ##                         configData.targetDirectoryPath)
                ##OperatingSystem.moveFile(targetSubtitleFileName,
                ##                         configData.targetDirectoryPath)

        OperatingSystem.removeFile(tempLilypondFilePath,
                                   intermediateFilesAreKept)

        Logging.trace("<<")