def _writeMidiHeader(self): """Converts head line in <self._lineList> at <self._position> to midi stream and appends to <self._byteList>""" currentLine = self._peekLine() Logging.trace(">>: %d - %s", self._position, currentLine) cls = self.__class__ currentLine = self._getLine() partList = currentLine.split(" ") Assertion.check(len(partList) == 4, "bad MIDI header format") headerLength = 6 fileFormat = int(partList[1]) trackCount = int(partList[2]) timeDivision = int(partList[3]) self._writeStringBytes(cls._fileHead) self._writeIntBytes(headerLength, 4) self._writeIntBytes(fileFormat, 2) self._writeIntBytes(trackCount, 2) self._writeIntBytes(timeDivision, 2) Logging.trace("<<") return trackCount
def pageDurationList (cls, pageToMeasureMap, measureToDurationMap): """Calculates page duration list based on mapping of pages to measures <pageToMeasureMap> and the mapping of measures to durations <measureToDurationMap>""" Logging.trace(">>: pToM = %r, mToD = %r", pageToMeasureMap, measureToDurationMap) result = [] previousPageMeasureNumber = min(measureToDurationMap.keys()) pageList = list(pageToMeasureMap.keys()) pageList.sort() for page in pageList: if page > 1: currentPageMeasureNumber = pageToMeasureMap[page] # calculate duration of previous page from # <previousMeasure> to <currentMeasure> - 1 pageDuration = 0 for measureNumber in range(previousPageMeasureNumber, currentPageMeasureNumber): pageDuration += measureToDurationMap[measureNumber] result.append(pageDuration) previousPageMeasureNumber = currentPageMeasureNumber Logging.trace("<<: %r", result) return result
def _readMidiTrack(self): """Converts track in midi stream <self._byteList> at <self._position> and appends text representation of all events to <self._lineList>""" Logging.trace(">>: %d", self._position) cls = self.__class__ header = self._readStringBytes(4) length = self._readIntBytes(4) Assertion.check(header == cls._trackHead, "track header chunk expected") self._appendToLineList(cls._trackHead) self._currentTime = 0 isTrackEnd = False while not isTrackEnd: isTrackEnd = self._readMidiEvent() self._appendToLineList(cls._trackEndMarker) Logging.trace("<<")
def generateRawAudio (self, midiFilePath, voiceName, shiftOffset): """Generates audio wave file for <voiceName> from midi file with <midiFilePath> in target directory; if several midi tracks match voice name, the resulting audio files are mixed; output is dry (no chorus, reverb and delay) and contains leading and trailing silent passages; if <shiftOffset> is greater that zero, the target file is shifted by that amount""" Logging.trace(">>: voice = %s, midiFile = %r, shiftOffset = %7.3f", voiceName, midiFilePath, shiftOffset) cls = self.__class__ tempMidiFilePath = "tempRender.mid" isShifted = (shiftOffset > 0) defaultTemplate = "%s/%s.wav" filePathTemplate = iif(isShifted, "%s/%s-raw.wav", defaultTemplate) audioFilePath = filePathTemplate % (self._audioDirectoryPath, voiceName) self._makeFilteredMidiFile(voiceName, midiFilePath, tempMidiFilePath) self._convertMidiToAudio(tempMidiFilePath, audioFilePath) if isShifted: targetFilePath = defaultTemplate % (self._audioDirectoryPath, voiceName) cls._shiftAudioFile(audioFilePath, targetFilePath, shiftOffset) OperatingSystem.removeFile(audioFilePath, cls._intermediateFilesAreKept) OperatingSystem.removeFile(tempMidiFilePath, cls._intermediateFilesAreKept) Logging.trace("<<")
def _appendToByteList(self, intList): """Appends integer list <intList> to internal byte list and traces operation""" Logging.trace("--: %d -> %s", len(self._byteList), intListToHex(intList)) self._byteList.extend(intList)
def _initializeOtherModuleData (self): """Initializes other data in different classes from current object.""" Logging.trace(">>: %r", self) # set commands MP4Video._ffmpegCommand = self._ffmpegCommand # intermediate file names or paths MP4Video._concatSpecificationFileName = \ self._makePath("temp-concat.txt") MP4Video._intermediateFileNameTemplate = \ self._makePath("temp%d.mp4") MP4Video._pageFileNameTemplate = self._pictureFileStem + "-page%d.png" # technical parameters MP4Video._frameRate = self._frameRate MP4Video._scaleFactor = self._scaleFactor MP4Video._ffmpegPresetName = self._ffmpegPresetName MP4Video._generatorLogLevel = _ffmpegLogLevel # file parameters SubtitleFile.setName(self._targetSubtitleFileName) MP4Video.setName(self._targetMp4FileName) Logging.trace("<<")
def _checkParameters (self): """Checks whether data given is plausible for subsequent processing.""" Logging.trace(">>: %r", self) # check the executables Assertion.ensureProgramAvailability("lilypond", self._lilypondCommand, "-v") # check the input files Assertion.ensureFileExistence(self._lilypondFileName, "lilypond") # check the numeric parameters ValidityChecker.isNumberString(self._countInMeasures, "count-in measures", floatIsAllowed=True) ValidityChecker.isNumberString(self._frameRate, "frame rate", floatIsAllowed=True, rangeKind=">0") Assertion.check(len(self._measureToTempoMap) > 0, "at least one tempo must be specified") self._countInMeasures = float(self._countInMeasures) self._frameRate = float(self._frameRate) MP4Video.checkParameters() Logging.trace("<<: parameters okay")
def _combineWithMp4box (cls, sourceVideoFilePath, audioTrackDataList, subtitleFilePath, targetVideoFilePath): """Combines video in <sourceVideoFilePath> and audio tracks specified by <audioTrackDataList> to new file in <targetVideoFilePath>; if <subtitleFilePath> is not empty, a subtile is added""" Logging.trace(">>: sourceVideo = %r, targetVideo = %r," + " audioTracks = %r, subtitleFile = %r", sourceVideoFilePath, audioTrackDataList, subtitleFilePath, targetVideoFilePath) command = [ cls._mp4boxCommand, "-isma", "-ipod", "-strict-error", sourceVideoFilePath ] for (audioFilePath, language, description) in audioTrackDataList: option = ("%s#audio:group=2:lang=%s:name=\"%s\"" % (audioFilePath, language, description)) command.extend([ "-add", option ]) if subtitleFilePath > "": command.extend([ "-add", subtitleFilePath + "#handler=sbtl" ]) command.extend([ "-out", targetVideoFilePath ]) Logging.trace("<<: %r", command) return command
def shiftAudio (cls, audioFilePath, shiftedFilePath, shiftOffset): """Shifts audio file in <audioFilePath> to shifted audio in <shiftedFilePath> with silence prefix of length <shiftOffset> using internal python modules only""" Logging.trace(">>: infile = %r, outfile = %r," + " shiftOffset = %7.3f", audioFilePath, shiftedFilePath, shiftOffset) sourceFile = cls.open(audioFilePath, "r") targetFile = cls.open(shiftedFilePath, "w") channelCount, sampleSize, frameRate, frameCount = \ sourceFile.getParameters() silenceFrameCount = round(frameRate * shiftOffset) targetFile.setParameters(channelCount, sampleSize, frameRate, frameCount + silenceFrameCount) # insert padding with silence rawSampleList = (silenceFrameCount * channelCount) * [ 0 ] targetFile.writeSamplesRaw(rawSampleList) # copy samples over rawSampleList = sourceFile.readAllSamplesRaw() targetFile.writeSamplesRaw(rawSampleList) # close files sourceFile.close() targetFile.close() Logging.trace("<<")
def frameCount (self): """Returns frame count of audio file""" Logging.trace(">>: %r", self) result = self._file.getnframes() Logging.trace("<<: %r", result) return result
def _print(self, relativeIndentationLevel, st, isBuffered=True): """writes <st> to current lilypond file <self>; if <isBuffered> is set, the line is not directly written, but buffered""" cls = self.__class__ if relativeIndentationLevel < 0: cls._indentationLevel += relativeIndentationLevel if isBuffered: indentation = indentationPerLevel * cls._indentationLevel effectiveProcessingState = self._processingState template = "--: /%d/ =>%r" else: indentation = "" effectiveProcessingState = cls._ProcessingState_beforeInclusion template = "--: /%d/ <=%r" Logging.trace(template, effectiveProcessingState, st.strip("\n")) st = indentation + st self._processedTextBuffer[effectiveProcessingState].append(st) if relativeIndentationLevel > 0: cls._indentationLevel += relativeIndentationLevel
def definedMacroNameSet(cls, includeFileName): """returns set of all defined macros in include file with <includeFileName>; does a very simple analysis and assumes that a definition line consists of the name and an equals sign""" Logging.trace(">>: %r", includeFileName) result = set() includeFile = UTF8File(includeFileName, "rt") lineList = includeFile.readlines() includeFile.close() definitionRegExp = re.compile(r" *([a-zA-Z]+) *=") for line in lineList: matchResult = definitionRegExp.match(line) if matchResult: macroName = matchResult.group(1) result.update([macroName]) Logging.trace("<<: %r", result) return result
def _writeVoice(self, voiceName): """puts out the score part for <voiceName>""" Logging.trace(">>: %s", voiceName) cls = self.__class__ voiceStaffList = cls._getPVEntry(self._phaseAndVoiceNameToStaffListMap, self._phase, voiceName, ["Staff"]) voiceStaffCount = len(voiceStaffList) if not self._isMidiScore: self._writeChords(voiceName) extensionList = iif2(voiceStaffCount == 1, [""], voiceStaffCount == 2, ["Top", "Bottom"], ["Top", "Middle", "Bottom"]) if voiceStaffCount > 1: voiceStaff = "GrandStaff" self._printLine(+1, "\\new %s <<" % voiceStaff) self._writeVoiceStaffInstrumentSettings(voiceName, voiceStaff) for i, voiceStaff in enumerate(voiceStaffList): extension = extensionList[i] self._writeSingleVoiceStaff(voiceName, extension, voiceStaff, voiceStaffCount == 1) if voiceStaffCount > 1: self._printLine(-1, ">>") Logging.trace("<<")
def _writeChords(self, voiceName): """writes chords for voice with <voiceName> (if applicable)""" Logging.trace(">>: %s", voiceName) cls = self.__class__ target = self._phase if voiceName in self._voiceNameToChordsMap: if target in self._voiceNameToChordsMap[voiceName]: lilypondVoiceName = cls._lilypondVoiceName(voiceName) lilypondVoiceName = (lilypondVoiceName[0].upper() + lilypondVoiceName[1:]) chordsName = "chords" + lilypondVoiceName chordsMacroName = chordsName + target.capitalize() alternativeMacroNameList = [ chordsName, "chords" + target.capitalize(), "allChords" ] self._ensureMacroAvailability(chordsMacroName, alternativeMacroNameList) st = ( "\\new ChordNames {" + iif(self._isExtractScore, " \\compressFullBarRests", "") + " \\" + chordsMacroName + " }") self._printLine(0, st) Logging.trace("<<")
def _writeNonPdfHeader(self): """writes the header of a lilypond file based on <self> targetting for a MIDI file or a video""" Logging.trace(">>: %r", self) # print default count-in definition (two measures, four beats) self._printLine(0, "countIn = { R1*2\\mf }") self._printLine( 0, ("drumsCountIn = \\drummode" + " { ss2\\mf ss | ss4 ss ss ss | }")) # print tempo track self._printLine(+1, "tempoTrack = {") measureList = list(self._songMeasureToTempoMap.keys()) measureList.sort() previousMeasure = 0 for measure in measureList: if measure == 1: self._printLine(0, "\\initialTempo") else: tempo = self._songMeasureToTempoMap[measure][0] skipCount = measure - previousMeasure self._printLine(0, "\\skip 1*%d" % skipCount) self._printLine(0, "%%%d\n" % measure) self._printLine(0, "\\tempo 4 =%d" % tempo) previousMeasure = measure self._printLine(-1, "}") Logging.trace("<<")
def tagVideoFile (cls, videoFilePath, albumName, artistName, albumArtFilePath, title, mediaType, year): """Adds some quicktime/MP4 tags to video file with <videoFilePath>""" Logging.trace(">>: %r", videoFilePath) ValidityChecker.isReadableFile(videoFilePath, "source video file") st = "== tagging %r" % videoFilePath OperatingSystem.showMessageOnConsole(st) tagToValueMap = {} tagToValueMap["album"] = albumName tagToValueMap["albumArtist"] = artistName tagToValueMap["artist"] = artistName tagToValueMap["cover"] = albumArtFilePath tagToValueMap["itunesMediaType"] = mediaType tagToValueMap["title"] = title tagToValueMap["tvShowName"] = albumName tagToValueMap["year"] = year MP4TagManager.tagFile(videoFilePath, tagToValueMap) Logging.trace("<<")
def _replaceVariablesByValues (cls, stringList, variableMap): """Replaces all occurrences of variables in <stringList> by values given by <variableMap>""" Logging.trace(">>: list = %r, map = %r", stringList, variableMap) result = [] variableRegexp = re.compile(r"\$\{([a-zA-Z]+)\}") for st in stringList: st = str(st) match = variableRegexp.match(st) if match is None: result.append(st) else: variable = match.group(1) replacement = variableMap.get(variable, st) if isString(replacement) or isinstance(replacement, Number): result.append(str(replacement)) else: result.extend(replacement) Logging.trace("<<: %r", result) return result
def _compressAudio (self, audioFilePath, songTitle, targetFilePath): """Compresses audio file with <songTitle> in path with <audioFilePath> to AAC file at <targetFilePath>""" Logging.trace(">>: audioFile = %r, title = %r," + " targetFile = %r", audioFilePath, songTitle, targetFilePath) cls = self.__class__ OperatingSystem.showMessageOnConsole("== convert to AAC: " + songTitle) commandLine = iif(cls._aacCommandLine != "", cls._aacCommandLine, ("%s -loglevel %s -aac_tns 0" + " -i ${infile} -y ${outfile}") % (cls._ffmpegCommand, _ffmpegLogLevel)) variableMap = { "infile" : audioFilePath, "outfile" : targetFilePath } command = cls._replaceVariablesByValues(tokenize(commandLine), variableMap) OperatingSystem.executeCommand(command, True) Logging.trace("<<")
def initialize (cls, aacCommandLine, audioProcessorMap, ffmpegCommand, midiToWavCommandLine, soundStyleNameToEffectsMap, intermediateFilesAreKept): """Sets some global processing data like e.g. the command paths.""" Logging.trace(">>: aac = %r, audioProcessor = %r," + " ffmpeg = %r, midiToWavCommand = %r," + " soundStyleNameToEffectsMap = %r," + " debugging = %r", aacCommandLine, audioProcessorMap, ffmpegCommand, midiToWavCommandLine, soundStyleNameToEffectsMap, intermediateFilesAreKept) cls._aacCommandLine = aacCommandLine cls._audioProcessorMap = audioProcessorMap cls._intermediateFilesAreKept = intermediateFilesAreKept cls._ffmpegCommand = ffmpegCommand cls._midiToWavRenderingCommandList = tokenize(midiToWavCommandLine) cls._soundStyleNameToEffectsMap = soundStyleNameToEffectsMap # check whether sox is used as audio processor commandList = tokenize(cls._audioProcessorMap["refinementCommandLine"]) command = commandList[0].lower() cls._audioProcessorIsSox = (command.endswith("sox") or command.endswith("sox.exe")) Logging.trace("<<")
def _makePdf(cls, processingPhase, targetFileNamePrefix, voiceNameList): """Processes lilypond file and generates extract or score PDF file.""" Logging.trace(">>: targetFilePrefix = %r, voiceNameList=%r", targetFileNamePrefix, voiceNameList) configData = cls._configData tempLilypondFilePath = configData.tempLilypondFilePath lilypondFile = LilypondFile(tempLilypondFilePath) lilypondFile.generate( configData.includeFilePath, configData.lilypondVersion, processingPhase, voiceNameList, configData.title, configData.songComposerText, configData.voiceNameToChordsMap, configData.voiceNameToLyricsMap, configData.voiceNameToScoreNameMap, configData.measureToTempoMap, configData.phaseAndVoiceNameToClefMap, configData.phaseAndVoiceNameToStaffListMap) cls._processLilypond(tempLilypondFilePath, targetFileNamePrefix) OperatingSystem.moveFile(targetFileNamePrefix + ".pdf", configData.targetDirectoryPath) OperatingSystem.removeFile(tempLilypondFilePath, configData.intermediateFilesAreKept) Logging.trace("<<")
def _adjustValueForSpecialTags(cls, tagName, value): """Adjusts <value> for given <tagName> and returns adjusted value""" Logging.trace(">>: tagName = %r, value = %r", tagName, value) if tagName in cls._tagNameAndValueToNewValueMap: # map value to one character encoding result = cls._tagNameAndValueToNewValueMap[tagName][value] elif tagName == "track": result = [(int(value), 999)] elif tagName == "cover": coverFileName = value isPngFile = coverFileName.lower().endswith('.png') imageFormat = iif(isPngFile, MP4Cover.FORMAT_PNG, MP4Cover.FORMAT_JPEG) coverFile = UTF8File(coverFileName, "rb") singleCover = MP4Cover(coverFile.read(), imageFormat) result = [singleCover] coverFile.close() else: result = str(value) resultRepresentation = iif(tagName != "cover", result, str(result)[:100] + "...") Logging.trace("<<: %r (%s)", resultRepresentation, type(result)) return result
def checkAndSetFromMap(self, attributeNameToValueMap): """Checks validity of variables in <attributeNameToValueMap> and assigns them to current audio track descriptor""" cls = self.__class__ # set optional attributes to default values _setToDefault(attributeNameToValueMap, "amplificationLevel", 0.0) _setToDefault(attributeNameToValueMap, "description", "") _setToDefault(attributeNameToValueMap, "masteringEffectList", "") # check and set object values name = attributeNameToValueMap["name"] AttributeManager.checkForTypesAndCompleteness( name, "audio track", attributeNameToValueMap, cls._attributeNameToKindMap) AttributeManager.setAttributesFromMap(self, attributeNameToValueMap) # adapt values self.audioGroupList = convertStringToList(self.audioGroupList, "/") self.amplificationLevel = float(self.amplificationLevel) Logging.trace("--: vntalm = %r", self.voiceNameToAudioLevelMap) self.voiceNameToAudioLevelMap = \ { key : float(value) for (key, value) in self.voiceNameToAudioLevelMap.items() }
def measureToDurationMap (cls, measureToTempoMap, countInMeasures, lastMeasureNumber): """Calculates mapping from measure number to duration based on tempo track in <measureToTempoMap> and the number of <countInMeasures>.""" Logging.trace(">>: measureToTempoMap = %r, countInMeasures = %d," + " lastMeasureNumber = %d", measureToTempoMap, countInMeasures, lastMeasureNumber) firstMeasureNumber = 1 Assertion.check(firstMeasureNumber in measureToTempoMap, "tempo track must contain setting for first measure") (tempo, measureLength) = measureToTempoMap[firstMeasureNumber] duration = cls.measureDuration(tempo, measureLength) firstMeasureOffset = duration * countInMeasures result = {} measureList = range(firstMeasureNumber, lastMeasureNumber + 1) for measureNumber in measureList: if measureNumber in measureToTempoMap: (tempo, measureLength) = measureToTempoMap[measureNumber] duration = cls.measureDuration(tempo, measureLength) isNormalMeasureNumber = (measureNumber > firstMeasureNumber) currentMeasureDuration = (duration + iif(isNormalMeasureNumber, 0, firstMeasureOffset)) result[measureNumber] = currentMeasureDuration Logging.trace("<<: %r", result) return result
def initialize (cls, ffmpegCommand, lilypondCommand): """Sets module-specific configuration variables""" Logging.trace(">>: ffmpeg = %r, lilypond = %r", ffmpegCommand, lilypondCommand) globals()['_ffmpegCommand'] = ffmpegCommand globals()['_lilypondCommand'] = lilypondCommand Logging.trace("<<")
def setName (cls, name): """Sets name of postscript file.""" Logging.trace(">>: %r", name) Assertion.ensureFileExistence(name, "postscript") cls._fileName = name Logging.trace("<<")
def __init__ (self, audioDirectoryPath): """Initializes generator with target directory of all audio files to be stored in <audioDirectoryPath>""" Logging.trace(">>: audioDirectoryPath = %r", audioDirectoryPath) self._audioDirectoryPath = audioDirectoryPath Logging.trace("<<")
def make (cls, pageDurationList): """Generate an MP4 video from durations in <pageDurationList> and generated PNG images.""" Logging.trace(">>: %r", pageDurationList) # for each page an MP4 fragment file is generated and finally # concatenated into the target file concatSpecificationFile = \ UTF8File(cls._concatSpecificationFileName, 'wt') for (i, pageDuration) in enumerate(pageDurationList): page = i + 1 requiredNumberOfFrames = int(cls._frameRate * pageDuration) + 1 pageFileName = cls._pageFileNameTemplate % page intermediateFileName = cls._intermediateFileNameTemplate % page # write file name to concatenation file normalizedFileName = intermediateFileName.replace("\\", "/") st = "file '%s'\n" % normalizedFileName concatSpecificationFile.write(st) # make silent video from single lilypond page command = ((cls._ffmpegCommand, "-loglevel", cls._generatorLogLevel, "-framerate", "1/" + str(requiredNumberOfFrames), "-i", str(pageFileName), "-vf", "scale=iw/%d:ih/%d" % (cls._scaleFactor, cls._scaleFactor), "-r", str(cls._frameRate), "-t", "%02.2f" % pageDuration) + iif(cls._ffmpegPresetName != "", ("-fpre", cls._ffmpegPresetName), ("-pix_fmt", "yuv420p", "-profile:v", "baseline", "-level", cls._defaultMp4BaselineLevel)) + ("-y", intermediateFileName)) OperatingSystem.executeCommand(command, True) concatSpecificationFile.close() # concatenate silent video fragments into single file cls._pageCount = page command = (cls._ffmpegCommand, "-safe", "0", "-y", "-loglevel", cls._generatorLogLevel, "-f", "concat", "-i", cls._concatSpecificationFileName, "-codec", "copy", cls.fileName) OperatingSystem.executeCommand(command, True) Logging.trace("<<")
def _readMidiEvent(self): """Reads event in midi stream <self._byteList> at <self._position> and updates <self._lineList> accordingly""" Logging.trace(">>: %d", self._position) cls = self.__class__ deltaTime = self._readVariableBytes() self._currentTime += int(deltaTime) eventByte = self._readIntBytes(1) eventKind = self._midiEventKind(eventByte) Assertion.check(eventKind is not None, "bad MIDI format: expected event byte") eventLength = cls._eventKindToLengthMap[eventKind] if eventLength == "R": # meta event eventKind, st = self._readMetaEvent() elif eventLength == "V": # sysex event eventLength = self._readVariableBytes() st = "" for i in range(eventLength): value = self._readIntBytes(1) st = st + iif(i > 0, " ", "") + "%d" % value else: # channel message channel = eventByte % 16 + 1 valueA = self._readIntBytes(1) st = "ch=%d" % channel if eventLength == 2: valueB = self._readIntBytes(1) if eventByte < 0xB0: st += " n=%d v=%d" % (valueA, valueB) elif eventByte < 0xC0: st += " c=%d v=%d" % (valueA, valueB) elif eventByte < 0xD0: st += " p=%d" % valueA elif eventByte < 0xE0: st += " v=%d" % valueA else: st += " v=%d" % (valueA + valueB * 128) st = iif(st == "", st, " " + st) currentLine = "%d %s%s" % (self._currentTime, eventKind, st) self._appendToLineList(currentLine) isTrackEnd = (eventKind == "Meta TrkEnd") Logging.trace("<<") return isTrackEnd
def initialize (cls, ffmpegCommand, mp4boxCommand): """Sets the internal command names""" Logging.trace(">>: ffmpegCommand = %r, mp4boxCommand = %r", ffmpegCommand, mp4boxCommand) cls._ffmpegCommand = ffmpegCommand cls._mp4boxCommand = mp4boxCommand Logging.trace("<<")
def writeSamplesRaw (self, rawSampleList): """Writes all frames in a raw <sampleList> to <self>""" Logging.trace(">>: %r", self) packFormat = "<%uh" % len(rawSampleList) sampleString = struct.pack(packFormat, *rawSampleList) self._file.writeframesraw(sampleString) Logging.trace("<<")