def postProcessData(self): ''' Replace client.data with a list that only includes each key once. ''' client = self.client if client is None: return [] from operator import itemgetter countAxes = self.countAxes if not common.isIterable(countAxes): countAxes = (countAxes,) axesIndices = tuple([self.axisDataMap[axisName] for axisName in countAxes]) thisIndex = self.axisDataMap[self.axisName] selector = itemgetter(*axesIndices) relevantData = [selector(innerTuple) for innerTuple in client.data] counter = collections.Counter(tuple(relevantData)) newClientData = [] for counterKey in counter: innerList = [None] * (len(axesIndices) + 1) if len(axesIndices) > 1: for dependentIndex in axesIndices: innerList[dependentIndex] = counterKey[dependentIndex] else: # single axesIndices means the counterKey will not be a tuple: innerList[axesIndices[0]] = counterKey innerList[thisIndex] = counter[counterKey] newClientData.append(innerList) client.data = sorted(newClientData) return client.data
def names(self, values): if not common.isIterable(values): raise exceptions21.MetadataException( '.names must be a list -- do you mean .name instead?') self._names = [] # reset for n in values: self._names.append(Text(n))
def cacheMetadata(corpusNames=('local',), verbose=True): ''' Rebuild the metadata cache. ''' if not common.isIterable(corpusNames): corpusNames = [corpusNames] for name in corpusNames: corpora.Corpus._metadataBundles[name] = None metadata.caching.cacheMetadata(corpusNames, verbose=verbose)
def cacheMetadata(corpusNames=('local', ), verbose=True): ''' Rebuild the metadata cache. ''' if not common.isIterable(corpusNames): corpusNames = [corpusNames] for name in corpusNames: corpora.Corpus._metadataBundles[name] = None metadata.cacheMetadata(corpusNames, verbose=verbose)
def cacheMetadata(corpusNames=('local',), verbose=True): ''' Rebuild the metadata cache. ''' if not common.isIterable(corpusNames): corpusNames = [corpusNames] for name in corpusNames: # todo -- create cache names for local corpora manager._metadataBundles[name] = None metadata.caching.cacheMetadata(corpusNames, verbose=verbose)
def cacheMetadata(corpusNames=('local', ), verbose=True): ''' Rebuild the metadata cache. ''' if not common.isIterable(corpusNames): corpusNames = [corpusNames] for name in corpusNames: # todo -- create cache names for local corpora manager._metadataBundles[name] = None metadata.caching.cacheMetadata(corpusNames, verbose=verbose)
def getWork( workName, movementNumber=None, fileExtensions=None, ): ''' this parse method is called from `corpus.parse()` and does nothing differently from it. Searches all corpora for a file that matches the name and returns it parsed. ''' addXMLWarning = False workNameJoined = workName mxlWorkName = workName if workName in (None, ''): raise CorpusException('a work name must be provided as an argument') if not common.isListLike(fileExtensions): fileExtensions = [fileExtensions] if common.isIterable(workName): workNameJoined = os.path.sep.join(workName) if workNameJoined.endswith(".xml"): # might be compressed MXL file mxlWorkName = os.path.splitext(workNameJoined)[0] + ".mxl" addXMLWarning = True filePaths = None for corpusObject in iterateCorpora(): workList = corpusObject.getWorkList(workName, movementNumber, fileExtensions) if not workList and addXMLWarning: workList = corpusObject.getWorkList(mxlWorkName, movementNumber, fileExtensions) if not workList: continue if len(workList) >= 1: filePaths = workList break if filePaths is None: warningMessage = 'Could not find a' if addXMLWarning: warningMessage += 'n xml or mxl' warningMessage += ' work that met this criterion: {0};'.format( workName) warningMessage += ' if you are searching for a file on disk, ' warningMessage += 'use "converter" instead of "corpus".' raise CorpusException(warningMessage) else: if len(filePaths) == 1: return filePaths[0] else: return filePaths
def notes(self, newNotes): ''' Sets notes to an iterable of Note or Unpitched objects ''' if not common.isIterable(newNotes): raise TypeError('notes must be set with an iterable') if not all( isinstance(n, (note.Unpitched, note.Note)) for n in newNotes): raise TypeError( 'every element of notes must be a note.Note or note.Unpitched object' ) self._notes.clear() self.add(newNotes, runSort=False)
def getWork(workName, movementNumber=None, fileExtensions=None, ): ''' this parse method is called from `corpus.parse()` and does nothing differently from it. Searches all corpora for a file that matches the name and returns it parsed. ''' addXMLWarning = False workNameJoined = workName mxlWorkName = workName if workName in (None, ''): raise CorpusException( 'a work name must be provided as an argument') if not common.isListLike(fileExtensions): fileExtensions = [fileExtensions] if common.isIterable(workName): workNameJoined = os.path.sep.join(workName) if workNameJoined.endswith(".xml"): # might be compressed MXL file mxlWorkName = os.path.splitext(workNameJoined)[0] + ".mxl" addXMLWarning = True filePaths = None for corpusObject in iterateCorpora(): workList = corpusObject.getWorkList(workName, movementNumber, fileExtensions) if not workList and addXMLWarning: workList = corpusObject.getWorkList(mxlWorkName, movementNumber, fileExtensions) if not workList: continue if len(workList) >= 1: filePaths = workList break if filePaths is None: warningMessage = 'Could not find a' if addXMLWarning: warningMessage += 'n xml or mxl' warningMessage += ' work that met this criterion: {0};'.format(workName) warningMessage += ' if you are searching for a file on disk, ' warningMessage += 'use "converter" instead of "corpus".' raise CorpusException(warningMessage) else: if len(filePaths) == 1: return filePaths[0] else: return filePaths
def __init__(self, pitches=None): if pitches is None: raise TranspositionException('Must have some input') if not common.isIterable(pitches): raise TranspositionException('Must be a list or tuple') if not pitches: raise TranspositionException( 'Must have at least one element in list') # p0 = pitches[0] # if not isinstance(p0, pitch.Pitch): # raise TranspositionException('List must have pitch objects') self.pitches = pitches self.allTranspositions = None self.allNormalOrders = None self.distinctNormalOrders = None
def postProcessData(self): ''' Replace client.data with a list that only includes each key once. ''' client = self.client if client is None: return [] from operator import itemgetter countAxes = self.countAxes if not common.isIterable(countAxes): countAxes = (countAxes, ) axesIndices = tuple( [self.axisDataMap[axisName] for axisName in countAxes]) thisIndex = self.axisDataMap[self.axisName] selector = itemgetter(*axesIndices) relevantData = [selector(innerTuple) for innerTuple in client.data] # all the format dicts will soon be smooshed, so get all the data from it: tupleFormatDict = {} for dataPoint in client.data: dataIndex = selector(dataPoint) formatDict = dataPoint[-1] if not isinstance(formatDict, dict): continue if dataIndex in tupleFormatDict: # already saw one: tupleFormatDict[dataIndex].update(formatDict) else: tupleFormatDict[dataIndex] = formatDict counter = collections.Counter(relevantData) newClientData = [] for counterKey in counter: innerList = [None] * (len(axesIndices) + 1) if len(axesIndices) > 1: for dependentIndex in axesIndices: innerList[dependentIndex] = counterKey[dependentIndex] else: # single axesIndices means the counterKey will not be a tuple: innerList[axesIndices[0]] = counterKey innerList[thisIndex] = counter[counterKey] formatDict = tupleFormatDict.get(counterKey, {}) newClientData.append(tuple(innerList) + (formatDict, )) client.data = sorted(newClientData) return client.data
def postProcessData(self): ''' Replace client.data with a list that only includes each key once. ''' client = self.client if client is None: return [] from operator import itemgetter countAxes = self.countAxes if not common.isIterable(countAxes): countAxes = (countAxes,) axesIndices = tuple([self.axisDataMap[axisName] for axisName in countAxes]) thisIndex = self.axisDataMap[self.axisName] selector = itemgetter(*axesIndices) relevantData = [selector(innerTuple) for innerTuple in client.data] # all the format dicts will soon be smooshed, so get all the data from it: tupleFormatDict = {} for dataPoint in client.data: dataIndex = selector(dataPoint) formatDict = dataPoint[-1] if not isinstance(formatDict, dict): continue if dataIndex in tupleFormatDict: # already saw one: tupleFormatDict[dataIndex].update(formatDict) else: tupleFormatDict[dataIndex] = formatDict counter = collections.Counter(relevantData) newClientData = [] for counterKey in counter: innerList = [None] * (len(axesIndices) + 1) if len(axesIndices) > 1: for dependentIndex in axesIndices: innerList[dependentIndex] = counterKey[dependentIndex] else: # single axesIndices means the counterKey will not be a tuple: innerList[axesIndices[0]] = counterKey innerList[thisIndex] = counter[counterKey] formatDict = tupleFormatDict.get(counterKey, {}) newClientData.append(tuple(innerList) + (formatDict,)) client.data = sorted(newClientData) return client.data
def __init__(self, srcStream, filterList=None, restoreActiveSites=True, activeInformation=None): if srcStream.isSorted is False and srcStream.autoSort: srcStream.sort() self.srcStream = srcStream self.index = 0 # use .elements instead of ._elements/etc. so that it is sorted... self.srcStreamElements = srcStream.elements self.streamLength = len(self.srcStreamElements) # this information can help a self.elementsLength = len(self.srcStream._elements) self.sectionIndex = -1 self.iterSection = '_elements' self.cleanupOnStop = False self.restoreActiveSites = restoreActiveSites self.overrideDerivation = None if filterList is None: filterList = [] elif not common.isIterable(filterList): filterList = [filterList] elif isinstance(filterList, tuple) or isinstance(filterList, set): filterList = list(filterList) # mutable.... # self.filters is a list of expressions that # return True or False for an element for # whether it should be yielded. self.filters = filterList self._len = None self._matchingElements = None # keep track of where we are in the parse. # esp important for recursive streams... if activeInformation is not None: self.activeInformation = activeInformation else: self.activeInformation = {} self.updateActiveInformation()
def cacheMetadata(corpusNames=None, useMultiprocessing=True, verbose=False): ''' Cache metadata from corpora in `corpusNames` as local cache files. Call as ``metadata.cacheMetadata()`` ''' from music21.corpus import manager localCorporaNames = manager.listLocalCorporaNames(skipNone=True) if corpusNames is None: corpusNames = localCorporaNames[:] + [ 'local', 'core', ] # + 'virtual'] if not common.isIterable(corpusNames): corpusNames = (corpusNames, ) timer = common.Timer() timer.start() # store list of file paths that caused an error failingFilePaths = [] # the core cache is based on local files stored in music21 # (no-longer-existent virtual is on-line) for corpusName in corpusNames: corpusObject = manager.fromName(corpusName) failingFilePaths += corpusObject.cacheMetadata(useMultiprocessing, verbose, timer) message = f'cache: final writing time: {timer} seconds' if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) for failingFilePath in failingFilePaths: message = f'path failed to parse: {failingFilePath}' if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message)
def cacheMetadata(corpusNames=None, useMultiprocessing=True, verbose=False): ''' Cache metadata from corpora in `corpusNames` as local cache files. Call as ``metadata.cacheMetadata()`` ''' from music21.corpus import manager localCorporaNames = manager.listLocalCorporaNames(skipNone=True) if corpusNames is None: corpusNames = localCorporaNames[:] + ['local', 'core',] # + 'virtual'] if not common.isIterable(corpusNames): corpusNames = (corpusNames,) timer = common.Timer() timer.start() # store list of file paths that caused an error failingFilePaths = [] # the core cache is based on local files stored in music21 # (no-longer-existent virtual is on-line) for corpusName in corpusNames: corpusObject = manager.fromName(corpusName) failingFilePaths += corpusObject.cacheMetadata(useMultiprocessing, verbose, timer) message = 'cache: final writing time: {0} seconds'.format(timer) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) for failingFilePath in failingFilePaths: message = 'path failed to parse: {0}'.format(failingFilePath) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message)
def plotStream( streamObj, graphFormat=None, xValue=None, yValue=None, zValue=None, **keywords, ): ''' Given a stream and any keyword configuration arguments, create and display a plot. Note: plots require matplotlib to be installed. Plot methods can be specified as additional arguments or by keyword. Two keyword arguments can be given: `format` and `values`. If positional arguments are given, the first is taken as `format` and the rest are collected as `values`. If `format` is the class name, that class is collected. Additionally, every :class:`~music21.graph.PlotStream` subclass defines one `format` string and a list of `values` strings. The `format` parameter defines the type of Graph (e.g. scatter, histogram, colorGrid). The `values` list defines what values are graphed (e.g. quarterLength, pitch, pitchClass). If a user provides a `format` and one or more `values` strings, a plot with the corresponding profile, if found, will be generated. If not, the first Plot to match any of the defined specifiers will be created. In the case of :class:`~music21.graph.PlotWindowedAnalysis` subclasses, the :class:`~music21.analysis.discrete.DiscreteAnalysis` subclass :attr:`~music21.analysis.discrete.DiscreteAnalysis.identifiers` list is added to the Plot's `values` list. Available plots include the following: * :class:`~music21.graph.plot.HistogramPitchSpace` * :class:`~music21.graph.plot.HistogramPitchClass` * :class:`~music21.graph.plot.HistogramQuarterLength` * :class:`~music21.graph.plot.ScatterPitchSpaceQuarterLength` * :class:`~music21.graph.plot.ScatterPitchClassQuarterLength` * :class:`~music21.graph.plot.ScatterPitchClassOffset` * :class:`~music21.graph.plot.ScatterPitchSpaceDynamicSymbol` * :class:`~music21.graph.plot.HorizontalBarPitchSpaceOffset` * :class:`~music21.graph.plot.HorizontalBarPitchClassOffset` * :class:`~music21.graph.plot.ScatterWeightedPitchSpaceQuarterLength` * :class:`~music21.graph.plot.ScatterWeightedPitchClassQuarterLength` * :class:`~music21.graph.plot.ScatterWeightedPitchSpaceDynamicSymbol` * :class:`~music21.graph.plot.Plot3DBarsPitchSpaceQuarterLength` * :class:`~music21.graph.plot.WindowedKey` * :class:`~music21.graph.plot.WindowedAmbitus` * :class:`~music21.graph.plot.Dolan` >>> s = corpus.parse('bach/bwv324.xml') #_DOCS_HIDE >>> thePlot = s.plot('histogram', 'pitch', doneAction=None) #_DOCS_HIDE >>> #_DOCS_SHOW s = corpus.parse('bach/bwv57.8') >>> #_DOCS_SHOW thePlot = s.plot('histogram', 'pitch') .. image:: images/HistogramPitchSpace.* :width: 600 >>> s = corpus.parse('bach/bwv324.xml') #_DOCS_HIDE >>> thePlot = s.plot('pianoroll', doneAction=None) #_DOCS_HIDE >>> #_DOCS_SHOW s = corpus.parse('bach/bwv57.8') >>> #_DOCS_SHOW thePlot = s.plot('pianoroll') .. image:: images/HorizontalBarPitchSpaceOffset.* :width: 600 ''' plotMake = findPlot.getPlotsToMake(graphFormat, xValue, yValue, zValue) # environLocal.printDebug(['plotClassName found', plotMake]) obj = None for plotInfo in plotMake: if not common.isIterable(plotInfo): plotClassName = plotInfo plotDict = None else: plotClassName, plotDict = plotInfo obj = plotClassName(streamObj, **keywords) if plotDict: for axisName, axisClass in plotDict.items(): attrName = 'axis' + axisName.upper() setattr(obj, attrName, axisClass(obj, axisName)) obj.run() if obj: return obj # just first one...
def fontFamily(self, newFamily): if common.isIterable(newFamily): self._fontFamily = newFamily else: self._fontFamily = [f.strip() for f in newFamily.split(',')]
def search(self, query=None, field=None, fileExtensions=None, **kwargs): r''' Perform search, on all stored metadata, permit regular expression matching. >>> workList = corpus.corpora.CoreCorpus().getWorkList('ciconia') >>> metadataBundle = metadata.bundles.MetadataBundle() >>> failedPaths = metadataBundle.addFromPaths( ... workList, ... parseUsingCorpus=False, ... useMultiprocessing=False, ... storeOnDisk=False, #_DOCS_HIDE ... ) >>> failedPaths [] >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer' ... ) >>> searchResult <music21.metadata.bundles.MetadataBundle {1 entry}> >>> len(searchResult) 1 >>> searchResult[0] <music21.metadata.bundles.MetadataEntry: ciconia_quod_jactatur_xml> >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer', ... fileExtensions=('.krn',), ... ) >>> len(searchResult) # no files in this format 0 >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer', ... fileExtensions=('.xml'), ... ) >>> len(searchResult) 1 Searches can also use keyword args: >>> metadataBundle.search(composer='cicon') <music21.metadata.bundles.MetadataBundle {1 entry}> ''' if fileExtensions is not None and not common.isIterable(fileExtensions): fileExtensions = [fileExtensions] newMetadataBundle = MetadataBundle() if query is None and field is None: if not kwargs: raise MetadataBundleException('Query cannot be empty') field, query = kwargs.popitem() for key in self._metadataEntries: metadataEntry = self._metadataEntries[key] # ignore stub entries if metadataEntry.metadata is None: continue sp = metadataEntry.sourcePath if not isinstance(sp, pathlib.Path): sp = pathlib.Path(sp) if metadataEntry.search(query, field)[0]: include = False if fileExtensions is not None: for fileExtension in fileExtensions: if fileExtension and fileExtension[0] != '.': fileExtension = '.' + fileExtension if sp.suffix == fileExtension: include = True break elif (fileExtension.endswith('xml') and sp.suffix in ('.mxl', '.mx')): include = True break else: include = True if include and key not in newMetadataBundle._metadataEntries: newMetadataBundle._metadataEntries[key] = metadataEntry newMetadataBundle._metadataEntries = OrderedDict( sorted(list(newMetadataBundle._metadataEntries.items()), key=lambda mde: mde[1].sourcePath)) if kwargs: return newMetadataBundle.search(**kwargs) return newMetadataBundle
def makeRests(s, refStreamOrTimeRange=None, fillGaps=False, timeRangeFromBarDuration=False, inPlace=True): ''' Given a Stream with an offset not equal to zero, fill with one Rest preceding this offset. This can be called on any Stream, a Measure alone, or a Measure that contains Voices. If `refStreamOrTimeRange` is provided as a Stream, this Stream is used to get min and max offsets. If a list is provided, the list assumed to provide minimum and maximum offsets. Rests will be added to fill all time defined within refStream. If `fillGaps` is True, this will create rests in any time regions that have no active elements. If `timeRangeFromBarDuration` is True, and the calling Stream is a Measure with a TimeSignature, the time range will be determined based on the .barDuration property. If `inPlace` is True, this is done in-place; if `inPlace` is False, this returns a modified deepcopy. >>> a = stream.Stream() >>> a.insert(20, note.Note()) >>> len(a) 1 >>> a.lowestOffset 20.0 >>> a.show('text') {20.0} <music21.note.Note C> Now make some rests... >>> b = a.makeRests(inPlace = False) >>> len(b) 2 >>> b.lowestOffset 0.0 >>> b.show('text') {0.0} <music21.note.Rest rest> {20.0} <music21.note.Note C> >>> b[0].duration.quarterLength 20.0 Same thing, but this time, with gaps... >>> a = stream.Stream() >>> a.insert(20, note.Note('C4')) >>> a.insert(30, note.Note('D4')) >>> len(a) 2 >>> a.lowestOffset 20.0 >>> a.show('text') {20.0} <music21.note.Note C> {30.0} <music21.note.Note D> >>> b = a.makeRests(fillGaps = True, inPlace = False) >>> len(b) 4 >>> b.lowestOffset 0.0 >>> b.show('text') {0.0} <music21.note.Rest rest> {20.0} <music21.note.Note C> {21.0} <music21.note.Rest rest> {30.0} <music21.note.Note D> Now with measures: >>> a = stream.Part() >>> a.insert(4, note.Note('C4')) >>> a.insert(8, note.Note('D4')) >>> len(a) 2 >>> a.lowestOffset 4.0 >>> a.insert(0, meter.TimeSignature('4/4')) >>> a.makeMeasures(inPlace = True) >>> a.show('text') {0.0} <music21.stream.Measure 1 offset=0.0> {0.0} <music21.clef.TrebleClef> {0.0} <music21.meter.TimeSignature 4/4> {4.0} <music21.stream.Measure 2 offset=4.0> {0.0} <music21.note.Note C> {8.0} <music21.stream.Measure 3 offset=8.0> {0.0} <music21.note.Note D> {1.0} <music21.bar.Barline style=final> >>> a.makeRests(fillGaps = True, inPlace=True) >>> a.show('text') {0.0} <music21.stream.Measure 1 offset=0.0> {0.0} <music21.clef.TrebleClef> {0.0} <music21.meter.TimeSignature 4/4> {0.0} <music21.note.Rest rest> {4.0} <music21.stream.Measure 2 offset=4.0> {0.0} <music21.note.Note C> {5.0} <music21.note.Rest rest> {8.0} <music21.stream.Measure 3 offset=8.0> {0.0} <music21.note.Note D> {1.0} <music21.bar.Barline style=final> Obviously there are problems TODO: fix them OMIT_FROM_DOCS TODO: default inPlace = False ''' from music21 import stream if not inPlace: # make a copy returnObj = copy.deepcopy(s) else: returnObj = s #environLocal.printDebug([ # 'makeRests(): object lowestOffset, highestTime', oLow, oHigh]) if refStreamOrTimeRange is None: # use local oLowTarget = 0 if timeRangeFromBarDuration and returnObj.isMeasure: # NOTE: this will raise an exception if no meter can be found oHighTarget = returnObj.barDuration.quarterLength else: oHighTarget = returnObj.highestTime elif isinstance(refStreamOrTimeRange, stream.Stream): oLowTarget = refStreamOrTimeRange.lowestOffset oHighTarget = refStreamOrTimeRange.highestTime #environLocal.printDebug([ # 'refStream used in makeRests', oLowTarget, oHighTarget, # len(refStreamOrTimeRange)]) # treat as a list elif common.isIterable(refStreamOrTimeRange): oLowTarget = min(refStreamOrTimeRange) oHighTarget = max(refStreamOrTimeRange) #environLocal.printDebug([ # 'offsets used in makeRests', oLowTarget, oHighTarget, # len(refStreamOrTimeRange)]) if returnObj.hasVoices(): bundle = returnObj.voices else: bundle = [returnObj] for v in bundle: v.elementsChanged() # required to get correct offset times oLow = v.lowestOffset oHigh = v.highestTime # create rest from start to end qLen = oLow - oLowTarget if qLen > 0: r = note.Rest() r.duration.quarterLength = qLen #environLocal.printDebug(['makeRests(): add rests', r, r.duration]) # place at oLowTarget to reach to oLow v._insertCore(oLowTarget, r) # create rest from end to highest qLen = oHighTarget - oHigh #environLocal.printDebug(['v', v, oHigh, oHighTarget, 'qLen', qLen]) if qLen > 0: r = note.Rest() r.duration.quarterLength = qLen # place at oHigh to reach to oHighTarget v._insertCore(oHigh, r) v.elementsChanged() # must update otherwise might add double r if fillGaps: gapStream = v.findGaps() if gapStream is not None: for e in gapStream: r = note.Rest() r.duration.quarterLength = e.duration.quarterLength v._insertCore(e.offset, r) v.elementsChanged() #environLocal.printDebug(['post makeRests show()', v]) # NOTE: this sorting has been found to be necessary, as otherwise # the resulting Stream is not sorted and does not get sorted in # preparing musicxml output if v.autoSort: v.sort() # with auto sort no longer necessary. #s.isSorted = False # changes elements # returnObj.elementsChanged() # if returnObj.autoSort: # returnObj.sort() if inPlace is not True: return returnObj
def listOfTimespanTreesByClass( inputStream, currentParentage=None, initialOffset=0, flatten=False, classLists=None, ): r''' Recurses through `inputStream`, and constructs TimespanTrees for each encountered substream and ElementTimespans for each encountered non-stream element. `classLists` should be a sequence of valid inputs for `isClassOrSubclass()`. One TimespanTree will be constructed for each element in `classLists`, in a single optimized pass through the `inputStream`. This is used internally by `streamToTimespanTree`. >>> score = timespans.makeExampleScore() Get everything in the score >>> trees = timespans.listOfTimespanTreesByClass(score) >>> trees [<TimespanTree {2} (-inf to inf) <music21.stream.Score ...>>] >>> for t in trees[0]: ... print(t) <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> Now filter the Notes and the Clefs & TimeSignatures of the score (flattened) into a list of two timespans >>> classLists = ['Note', ('Clef', 'TimeSignature')] >>> trees = timespans.listOfTimespanTreesByClass(score, classLists=classLists, flatten=True) >>> trees [<TimespanTree {12} (0.0 to 8.0) <music21.stream.Score ...>>, <TimespanTree {4} (0.0 to 0.0) <music21.stream.Score ...>>] ''' if currentParentage is None: currentParentage = (inputStream, ) ## fix non-tuple classLists -- first call only... if classLists: for i in range(len(classLists)): cl = classLists[i] if not common.isIterable(cl): classLists[i] = (cl, ) lastParentage = currentParentage[-1] if classLists is None or len(classLists) == 0: outputCollections = [trees.TimespanTree(source=lastParentage)] classLists = [] else: outputCollections = [ trees.TimespanTree(source=lastParentage) for _ in classLists ] # do this to avoid munging activeSites inputStreamElements = inputStream._elements[:] + inputStream._endElements for element in inputStreamElements: offset = element.getOffsetBySite(lastParentage) + initialOffset wasStream = False if element.isStream and \ not element.isSpanner and \ not element.isVariant: localParentage = currentParentage + (element, ) containedTimespanTrees = listOfTimespanTreesByClass( element, currentParentage=localParentage, initialOffset=offset, flatten=flatten, classLists=classLists, ) for outputTSC, subTSC in zip(outputCollections, containedTimespanTrees): if flatten is not False: # True or semiFlat outputTSC.insert(subTSC[:]) else: outputTSC.insert(subTSC) wasStream = True if not wasStream or flatten == 'semiFlat': parentOffset = initialOffset parentEndTime = initialOffset + lastParentage.duration.quarterLength endTime = offset + element.duration.quarterLength for classBasedTSC, classList in zip(outputCollections, classLists): if classList and not element.isClassOrSubclass(classList): continue elementTimespan = spans.ElementTimespan( element=element, parentage=tuple(reversed(currentParentage)), parentOffset=parentOffset, parentEndTime=parentEndTime, offset=offset, endTime=endTime, ) classBasedTSC.insert(elementTimespan) return outputCollections
def getRealized( self, useDynamicContext=True, useVelocity=True, useArticulations=True, baseLevel=0.5, clip=True, ): ''' Get a realized unit-interval scalar for this Volume. This scalar is to be applied to the dynamic range of whatever output is available, whatever that may be. The `baseLevel` value is a middle value between 0 and 1 that all scalars modify. This also becomes the default value for unspecified dynamics. When scalars (between 0 and 1) are used, their values are doubled, such that mid-values (around .5, which become 1) make no change. This can optionally take into account `dynamicContext`, `useVelocity`, and `useArticulation`. If `useDynamicContext` is True, a context search for a dynamic will be done, else dynamics are ignored. Alternatively, the useDynamicContext may supply a Dynamic object that will be used instead of a context search. If `useArticulations` is True and client is not None, any articulations found on that client will be used to adjust the volume. Alternatively, the `useArticulations` parameter may supply a list of articulations that will be used instead of that available on a client. The `velocityIsRelative` tag determines if the velocity value includes contextual values, such as dynamics and and accents, or not. >>> s = stream.Stream() >>> s.repeatAppend(note.Note('d3', quarterLength=.5), 8) >>> s.insert([0, dynamics.Dynamic('p'), 1, dynamics.Dynamic('mp'), 2, dynamics.Dynamic('mf'), 3, dynamics.Dynamic('f')]) >>> s.notes[0].volume.getRealized() 0.496... >>> s.notes[1].volume.getRealized() 0.496... >>> s.notes[2].volume.getRealized() 0.63779... >>> s.notes[7].volume.getRealized() 0.99212... velocity, if set, will be scaled by dynamics >>> s.notes[7].volume.velocity = 20 >>> s.notes[7].volume.getRealized() 0.22047... unless we set the velocity to not be relative... >>> s.notes[7].volume.velocityIsRelative = False >>> s.notes[7].volume.getRealized() 0.1574803... ''' #velocityIsRelative might be best set at import. e.g., from MIDI, # velocityIsRelative is False, but in other applications, it may not # be val = baseLevel dm = None # no dynamic mark # velocity is checked first; the range between 0 and 1 is doubled, # to 0 to 2. a velocityScalar of .7 thus scales the base value of # .5 by 1.4 to become .7 if useVelocity: if self._velocity is not None: if not self.velocityIsRelative: # if velocity is not relateive # it should fully determines output independent of anything # else val = self.velocityScalar else: val = val * (self.velocityScalar * 2.0) # this value provides a good default velocity, as .5 is low # this not a scalar application but a shift. else: # target :0.70866 val += 0.20866 # only change the val from here if velocity is relative if self.velocityIsRelative: if useDynamicContext is not False: if hasattr( useDynamicContext, 'classes') and 'Dynamic' in useDynamicContext.classes: dm = useDynamicContext # it is a dynamic elif self.client is not None: dm = self.getDynamicContext() # dm may be None else: environLocal.printDebug([ 'getRealized():', 'useDynamicContext is True but no dynamic supplied or found in context' ]) if dm is not None: # double scalare (so range is between 0 and 1) and scale # t he current val (around the base) val = val * (dm.volumeScalar * 2.0) # userArticulations can be a list of 1 or more articulation objects # as well as True/False if useArticulations is not False: am = None if common.isIterable(useArticulations): am = useArticulations elif (hasattr(useArticulations, 'classes') and 'Articulation' in useArticulations.classes): am = [useArticulations] # place in a list elif self.client is not None: am = self.client.articulations if am is not None: for a in am: # add in volume shift for all articulations val += a.volumeShift if clip: # limit between 0 and 1 if val > 1: val = 1.0 elif val < 0: val = 0.0 # might to rebalance range after scalings # always update cached result each time this is called self._cachedRealized = val return val
def parseInputToPrimitive(self, inpVal): ''' Determines what format a given input is in and returns a value in that format.. First checks if it is the name of a variable defined in the parsedDataDict or the name of an allowable function. In either of these cases, it will return the actual value of the data or the actual function. Next, it will check if the string is an int, float, boolean, or none, returning the appropriate value. If it is a quoted string then it will remove the quotes on the ends and return it as a string. If it has square braces indicating a list, the inner elements will be parsed using this same function recursively. (Note that recursive lists like [1, 2, [3, 4]] are not yet supported If the input corresponds to none of these types, it is returned as a string. >>> agenda = alpha.webapps.Agenda() >>> agenda.addData("a",2) >>> agenda.addData("b",[1, 2, 3],"list") >>> processor = alpha.webapps.CommandProcessor(agenda) >>> processor.parseInputToPrimitive("a") 2 >>> processor.parseInputToPrimitive("b") [1, 2, 3] >>> processor.parseInputToPrimitive("1.0") 1.0 >>> processor.parseInputToPrimitive("2") 2 >>> processor.parseInputToPrimitive("True") True >>> processor.parseInputToPrimitive("False") False >>> processor.parseInputToPrimitive("None") == None True >>> processor.parseInputToPrimitive("'hi'") 'hi' >>> processor.parseInputToPrimitive("'Madam I\'m Adam'") "Madam I'm Adam" >>> processor.parseInputToPrimitive("[1, 2, 3]") [1, 2, 3] >>> processor.parseInputToPrimitive("[1, 'hi', 3.0, True, a, justAStr]") [1, 'hi', 3.0, True, 2, 'justAStr'] ''' returnVal = None if common.isNum(inpVal): return inpVal if common.isIterable(inpVal): return [self.parseInputToPrimitive(element) for element in inpVal] if not isinstance(inpVal, str): self.recordError("Unknown type for parseInputToPrimitive "+str(inpVal)) strVal = inpVal strVal = strVal.strip() # removes whitespace on ends if strVal in self.parsedDataDict: # Used to specify data via variable name returnVal = self.parsedDataDict[strVal] elif strVal in availableFunctions: # Used to specify function via variable name returnVal = strVal else: try: returnVal = int(strVal) except ValueError: try: returnVal = float(strVal) except ValueError: if strVal == "True": returnVal = True elif strVal == "None": returnVal = None elif strVal == "False": returnVal = False elif strVal[0] == '"' and strVal[-1] == '"': # Double Quoted String returnVal = strVal[1:-1] # remove quotes elif strVal[0] == "'" and strVal[-1] == "'": # Single Quoted String returnVal = strVal[1:-1] # remove quotes elif strVal[0] == "[" and strVal[-1] == "]": # List listElements = strVal[1:-1].split(",") # remove [] and split by commas returnVal = [self.parseInputToPrimitive(element) for element in listElements] else: returnVal = cgiescape(str(strVal)) return returnVal
def music21ModWSGIFeatureApplication(environ, start_response): ''' Music21 webapp to demonstrate processing of a zip file containing scores. Will be moved and integrated into __init__.py upon developing a standardized URL format as application that can perform variety of commands on user-uploaded files ''' status = '200 OK' pathInfo = environ['PATH_INFO'] # Contents of path after mount point of wsgi app but before question mark if pathInfo == '/uploadForm': output = getUploadForm() response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] #command = pathInfo formFields = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ) # Check if form data is present. If not found, display error try: unused_subUploadFormFile = formFields['subUploadForm'] except: html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Error:</h1> <p>Form information not found</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Get file from POST uploadedFile = formFields['fileupload'].file filename = formFields['fileupload'].filename uploadType = formFields['fileupload'].type # Check if filename is empty - display no file chosen error if filename == "": html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Music 21 Feature Extraction:</h1> <p><b>Error:</b> No file selected</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Check if uploadType is zip - display no file chosen error if uploadType != "application/zip": html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Music 21 Feature Extraction:</h1> <p><b>Error:</b> File not in .zip format</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Setup Feature Extractors and Data Set ds = features.DataSet(classLabel='Class') featureIDList = list() # Check if features have been selected. Else display error try: unused_featureFile = formFields['features'] except: html = """ <html ><body> <h1>Error:</h1> <p>No extractors selected</p> <p><a href='/music21/featureapp/uploadForm'>try again</a></p> </body></html> """ return html if common.isIterable(formFields['features']): print(formFields['features']) for featureId in formFields['features']: featureIDList.append(str(featureId.value)) else: featureIDList.append(formFields['features'].value) fes = features.extractorsById(featureIDList) ds.addFeatureExtractors(fes) # Create ZipFile Object zipf = zipfile.ZipFile(uploadedFile, 'r') # Loop Through Files for scoreFileInfo in zipf.infolist(): filePath = scoreFileInfo.filename # Skip Directories if(filePath.endswith('/')): continue scoreFile = zipf.open(filePath) # Use Music21's converter to parse file parsedFile = idAndParseFile(scoreFile,filePath) # If valid music21 format, add to data set if parsedFile is not None: # Split into directory structure and filname pathPartitioned = filePath.rpartition('/') directory = pathPartitioned[0] filename = pathPartitioned[2] if directory == "": directory = 'uncategorized' ds.addData(parsedFile,classValue=directory,id=filename) # Process data set ds.process() # Get output format from POST and set appropriate output: outputFormatID = formFields['outputformat'].value if outputFormatID == CSV_OUTPUT_ID: output = features.OutputCSV(ds).getString() elif outputFormatID == ORANGE_OUTPUT_ID: output = features.OutputTabOrange(ds).getString() elif outputFormatID == ARFF_OUTPUT_ID: output = features.OutputARFF(ds).getString() else: output = "invalid output format" response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output]
def listOfTreesByClass(inputStream, currentParentage=None, initialOffset=0.0, flatten=False, classLists=None, useTimespans=False): r''' Recurses through `inputStream`, and constructs TimespanTrees for each encountered substream and PitchedTimespan for each encountered non-stream element. `classLists` should be a sequence of valid inputs for `isClassOrSubclass()`. One TimespanTree will be constructed for each element in `classLists`, in a single optimized pass through the `inputStream`. This is used internally by `streamToTimespanTree`. >>> score = tree.makeExampleScore() Get everything in the score >>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True) >>> treeList [<TimespanTree {2} (-inf to inf) <music21.stream.Score ...>>] >>> tl0 = treeList[0] >>> for t in tl0: ... print(t) <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> Now filter the Notes and the Clefs & TimeSignatures of the score (flattened) into a list of two TimespanTrees >>> classLists = ['Note', ('Clef', 'TimeSignature')] >>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True, ... classLists=classLists, flatten=True) >>> treeList [<TimespanTree {12} (0.0 to 8.0) <music21.stream.Score ...>>, <TimespanTree {4} (0.0 to 0.0) <music21.stream.Score ...>>] ''' if currentParentage is None: currentParentage = (inputStream, ) ## fix non-tuple classLists -- first call only... if classLists: for i, cl in enumerate(classLists): if not common.isIterable(cl): classLists[i] = (cl, ) lastParentage = currentParentage[-1] if useTimespans: treeClass = timespanTree.TimespanTree else: treeClass = trees.OffsetTree if classLists is None or not classLists: outputTrees = [treeClass(source=lastParentage)] classLists = [] else: outputTrees = [treeClass(source=lastParentage) for _ in classLists] # do this to avoid munging activeSites inputStreamElements = inputStream._elements[:] + inputStream._endElements for element in inputStreamElements: offset = lastParentage.elementOffset(element) + initialOffset wasStream = False if element.isStream: localParentage = currentParentage + (element, ) containedTrees = listOfTreesByClass( element, currentParentage=localParentage, initialOffset=offset, flatten=flatten, classLists=classLists, useTimespans=useTimespans) for outputTree, subTree in zip(outputTrees, containedTrees): if flatten is not False: # True or semiFlat outputTree.insert(subTree[:]) else: outputTree.insert(subTree.lowestPosition(), subTree) wasStream = True if not wasStream or flatten == 'semiFlat': parentOffset = initialOffset parentEndTime = initialOffset + lastParentage.duration.quarterLength endTime = offset + element.duration.quarterLength for classBasedTree, classList in zip(outputTrees, classLists): if classList and not element.isClassOrSubclass(classList): continue if useTimespans: if hasattr(element, 'pitches' ) and 'music21.key.Key' not in element.classSet: spanClass = spans.PitchedTimespan else: spanClass = spans.ElementTimespan elementTimespan = spanClass( element=element, parentage=tuple(reversed(currentParentage)), parentOffset=parentOffset, parentEndTime=parentEndTime, offset=offset, endTime=endTime) classBasedTree.insert(elementTimespan) else: classBasedTree.insert(offset, element) return outputTrees
def listOfTimespanTreesByClass( inputStream, currentParentage=None, initialOffset=0, flatten=False, classLists=None, ): r''' Recurses through `inputStream`, and constructs TimespanTrees for each encountered substream and ElementTimespans for each encountered non-stream element. `classLists` should be a sequence of valid inputs for `isClassOrSubclass()`. One TimespanTree will be constructed for each element in `classLists`, in a single optimized pass through the `inputStream`. This is used internally by `streamToTimespanTree`. >>> score = timespans.makeExampleScore() Get everything in the score >>> trees = timespans.listOfTimespanTreesByClass(score) >>> trees [<TimespanTree {2} (-inf to inf) <music21.stream.Score ...>>] >>> for t in trees[0]: ... print(t) <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> Now filter the Notes and the Clefs & TimeSignatures of the score (flattened) into a list of two timespans >>> classLists = ['Note', ('Clef', 'TimeSignature')] >>> trees = timespans.listOfTimespanTreesByClass(score, classLists=classLists, flatten=True) >>> trees [<TimespanTree {12} (0.0 to 8.0) <music21.stream.Score ...>>, <TimespanTree {4} (0.0 to 0.0) <music21.stream.Score ...>>] ''' if currentParentage is None: currentParentage = (inputStream,) ## fix non-tuple classLists -- first call only... if classLists: for i in range(len(classLists)): cl = classLists[i] if not common.isIterable(cl): classLists[i] = (cl,) lastParentage = currentParentage[-1] if classLists is None or len(classLists) == 0: outputCollections = [trees.TimespanTree(source=lastParentage)] classLists = [] else: outputCollections = [ trees.TimespanTree(source=lastParentage) for _ in classLists ] # do this to avoid munging activeSites inputStreamElements = inputStream._elements[:] + inputStream._endElements for element in inputStreamElements: offset = element.getOffsetBySite(lastParentage) + initialOffset wasStream = False if element.isStream and \ not element.isSpanner and \ not element.isVariant: localParentage = currentParentage + (element,) containedTimespanTrees = listOfTimespanTreesByClass( element, currentParentage=localParentage, initialOffset=offset, flatten=flatten, classLists=classLists, ) for outputTSC, subTSC in zip(outputCollections, containedTimespanTrees): if flatten is not False: # True or semiFlat outputTSC.insert(subTSC[:]) else: outputTSC.insert(subTSC) wasStream = True if not wasStream or flatten == 'semiFlat': parentOffset = initialOffset parentEndTime = initialOffset + lastParentage.duration.quarterLength endTime = offset + element.duration.quarterLength for classBasedTSC, classList in zip(outputCollections, classLists): if classList and not element.isClassOrSubclass(classList): continue elementTimespan = spans.ElementTimespan( element=element, parentage=tuple(reversed(currentParentage)), parentOffset=parentOffset, parentEndTime=parentEndTime, offset=offset, endTime=endTime, ) classBasedTSC.insert(elementTimespan) return outputCollections
def listOfTreesByClass(inputStream, currentParentage=None, initialOffset=0.0, flatten=False, classLists=None, useTimespans=False): r''' Recurses through `inputStream`, and constructs TimespanTrees for each encountered substream and PitchedTimespan for each encountered non-stream element. `classLists` should be a sequence of valid inputs for `isClassOrSubclass()`. One TimespanTree will be constructed for each element in `classLists`, in a single optimized pass through the `inputStream`. This is used internally by `streamToTimespanTree`. >>> score = tree.makeExampleScore() Get everything in the score >>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True) >>> treeList [<TimespanTree {2} (-inf to inf) <music21.stream.Score ...>>] >>> tl0 = treeList[0] >>> for t in tl0: ... print(t) <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> <TimespanTree {4} (-inf to inf) <music21.stream.Part ...>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>> <TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>> Now filter the Notes and the Clefs & TimeSignatures of the score (flattened) into a list of two TimespanTrees >>> classLists = ['Note', ('Clef', 'TimeSignature')] >>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True, ... classLists=classLists, flatten=True) >>> treeList [<TimespanTree {12} (0.0 to 8.0) <music21.stream.Score ...>>, <TimespanTree {4} (0.0 to 0.0) <music21.stream.Score ...>>] ''' if currentParentage is None: currentParentage = (inputStream,) ## fix non-tuple classLists -- first call only... if classLists: for i, cl in enumerate(classLists): if not common.isIterable(cl): classLists[i] = (cl,) lastParentage = currentParentage[-1] if useTimespans: treeClass = timespanTree.TimespanTree else: treeClass = trees.OffsetTree if classLists is None or not classLists: outputTrees = [treeClass(source=lastParentage)] classLists = [] else: outputTrees = [treeClass(source=lastParentage) for _ in classLists] # do this to avoid munging activeSites inputStreamElements = inputStream._elements[:] + inputStream._endElements for element in inputStreamElements: offset = lastParentage.elementOffset(element) + initialOffset wasStream = False if element.isStream: localParentage = currentParentage + (element,) containedTrees = listOfTreesByClass(element, currentParentage=localParentage, initialOffset=offset, flatten=flatten, classLists=classLists, useTimespans=useTimespans) for outputTree, subTree in zip(outputTrees, containedTrees): if flatten is not False: # True or semiFlat outputTree.insert(subTree[:]) else: outputTree.insert(subTree.lowestPosition(), subTree) wasStream = True if not wasStream or flatten == 'semiFlat': parentOffset = initialOffset parentEndTime = initialOffset + lastParentage.duration.quarterLength endTime = offset + element.duration.quarterLength for classBasedTree, classList in zip(outputTrees, classLists): if classList and not element.isClassOrSubclass(classList): continue if useTimespans: if hasattr(element, 'pitches') and 'music21.key.Key' not in element.classSet: spanClass = spans.PitchedTimespan else: spanClass = spans.ElementTimespan elementTimespan = spanClass(element=element, parentage=tuple(reversed(currentParentage)), parentOffset=parentOffset, parentEndTime=parentEndTime, offset=offset, endTime=endTime) classBasedTree.insert(elementTimespan) else: classBasedTree.insert(offset, element) return outputTrees
def getWorkList( self, workName, movementNumber=None, fileExtensions=None, ): r''' Search the corpus and return a list of filenames of works, always in a list. If no matches are found, an empty list is returned. >>> from music21 import corpus >>> coreCorpus = corpus.corpora.CoreCorpus() # returns 1 even though there is a '.mus' file, which cannot be read... >>> len(coreCorpus.getWorkList('cpebach/h186')) 1 >>> len(coreCorpus.getWorkList('cpebach/h186', None, '.xml')) 1 >>> len(coreCorpus.getWorkList('schumann_clara/opus17', 3)) 1 >>> len(coreCorpus.getWorkList('schumann_clara/opus17', 2)) 0 Make sure that 'verdi' just gets the single Verdi piece and not the Monteverdi pieces: >>> len(coreCorpus.getWorkList('verdi')) 1 ''' if not common.isListLike(fileExtensions): fileExtensions = [fileExtensions] paths = self.getPaths(fileExtensions) results = [] workPath = pathlib.PurePath(workName) workPosix = workPath.as_posix().lower() # find all matches for the work name # TODO: this should match by path component, not just # substring for path in paths: if workPosix in path.as_posix().lower(): results.append(path) if results: # more than one matched...use more stringent criterion: # must have a slash before the name previousResults = results results = [] for path in previousResults: if '/' + workPosix in path.as_posix().lower(): results.append(path) if not results: results = previousResults movementResults = [] if movementNumber is not None and results: # store one ore more possible mappings of movement number movementStrList = [] # see if this is a pair if common.isIterable(movementNumber): movementStrList.append(''.join(str(x) for x in movementNumber)) movementStrList.append('-'.join( str(x) for x in movementNumber)) movementStrList.append('movement' + '-'.join( str(x) for x in movementNumber)) movementStrList.append('movement' + '-0'.join( str(x) for x in movementNumber)) else: movementStrList += [ '0{0}'.format(movementNumber), str(movementNumber), 'movement{0}'.format(movementNumber), ] for filePath in sorted(results): filename = filePath.name if filePath.suffix: filenameWithoutExtension = filePath.stem else: filenameWithoutExtension = None searchPartialMatch = True if filenameWithoutExtension is not None: # look for direct matches first for movementStr in movementStrList: # if movementStr.lower() in filePath.lower(): if filenameWithoutExtension.lower( ) == movementStr.lower(): movementResults.append(filePath) searchPartialMatch = False # if we have one direct match, all other matches must # be direct. this will match multiple files with different # file extensions if movementResults: continue if searchPartialMatch: for movementStr in movementStrList: if filename.startswith(movementStr.lower()): movementResults.append(filePath) if not movementResults: pass else: movementResults = results return sorted(set(movementResults))
def search(self, query=None, field=None, fileExtensions=None, **kwargs): r''' Perform search, on all stored metadata, permit regular expression matching. >>> workList = corpus.corpora.CoreCorpus().getWorkList('ciconia') >>> metadataBundle = metadata.bundles.MetadataBundle() >>> failedPaths = metadataBundle.addFromPaths( ... workList, ... parseUsingCorpus=False, ... useMultiprocessing=False, ... storeOnDisk=False, #_DOCS_HIDE ... ) >>> failedPaths [] >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer' ... ) >>> searchResult <music21.metadata.bundles.MetadataBundle {1 entry}> >>> len(searchResult) 1 >>> searchResult[0] <music21.metadata.bundles.MetadataEntry 'ciconia_quod_jactatur_xml'> >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer', ... fileExtensions=('.krn',), ... ) >>> len(searchResult) # no files in this format 0 >>> searchResult = metadataBundle.search( ... 'cicon', ... field='composer', ... fileExtensions=('.xml'), ... ) >>> len(searchResult) 1 Searches can also use keyword args: >>> metadataBundle.search(composer='cicon') <music21.metadata.bundles.MetadataBundle {1 entry}> ''' if fileExtensions is not None and not common.isIterable(fileExtensions): fileExtensions = [fileExtensions] newMetadataBundle = MetadataBundle() if query is None and field is None: if not kwargs: raise MetadataBundleException('Query cannot be empty') field, query = kwargs.popitem() for key in self._metadataEntries: metadataEntry = self._metadataEntries[key] # ignore stub entries if metadataEntry.metadata is None: continue sp = metadataEntry.sourcePath if not isinstance(sp, pathlib.Path): sp = pathlib.Path(sp) if metadataEntry.search(query, field)[0]: include = False if fileExtensions is not None: for fileExtension in fileExtensions: if fileExtension and fileExtension[0] != '.': fileExtension = '.' + fileExtension if sp.suffix == fileExtension: include = True break elif (fileExtension.endswith('xml') and sp.suffix in ('.mxl', '.mx')): include = True break else: include = True if include and key not in newMetadataBundle._metadataEntries: newMetadataBundle._metadataEntries[key] = metadataEntry newMetadataBundle._metadataEntries = OrderedDict( sorted(list(newMetadataBundle._metadataEntries.items()), key=lambda mde: mde[1].sourcePath)) if kwargs: return newMetadataBundle.search(**kwargs) return newMetadataBundle
def _parseData(self): ''' Parses data specified as strings in self.dataDict into objects in self.parsedDataDict ''' for (name,dataDictElement) in self.rawDataDict.items(): if 'data' not in dataDictElement: self.recordError("no data specified for data element " + str(dataDictElement)) continue dataStr = dataDictElement['data'] if 'fmt' in dataDictElement: fmt = dataDictElement['fmt'] if name in self.parsedDataDict: self.recordError("duplicate definition for data named " + str(name) + " " + str(dataDictElement)) continue if fmt not in availableDataFormats: self.recordError("invalid data format for data element " + str(dataDictElement)) continue if fmt == 'string' or fmt == 'str': if dataStr.count("'") == 2: # Single Quoted String data = dataStr.replace("'", "") # remove excess quotes elif dataStr.count("\"") == 2: # Double Quoted String data = dataStr.replace("\"", "") # remove excess quotes else: self.recordError("invalid string (not in quotes...) for data element " + str(dataDictElement)) continue elif fmt == 'int': try: data = int(dataStr) except ValueError: self.recordError("invalid integer for data element " + str(dataDictElement)) continue elif fmt in ['bool', 'boolean']: if dataStr in ['true', 'True']: data = True elif dataStr in ['false', 'False']: data = False else: self.recordError("invalid boolean for data element " + str(dataDictElement)) continue elif fmt == 'list': # in this case dataStr should actually be an list object. if not common.isIterable(dataStr): self.recordError("list format must actually be a list structure " + str(dataDictElement)) continue data = [] for elementStr in dataStr: if isinstance(elementStr, str): dataElement = self.parseInputToPrimitive(elementStr) else: dataElement = elementStr data.append(dataElement) elif fmt == 'file': data = dataStr else: if fmt in ['xml', 'musicxml']: if dataStr.find("<!DOCTYPE") == -1: dataStr = ( '<!DOCTYPE score-partwise PUBLIC ' + '"-//Recordare//DTD MusicXML 1.1 Partwise//EN"' + '"http://www.musicxml.org/dtds/partwise.dtd">' + dataStr) if dataStr.find("<?xml") == -1: dataStr = """<?xml version="1.0" encoding="UTF-8"?>""" + dataStr try: data = converter.parseData(dataStr) except converter.ConverterException as e: self.recordError("Error parsing data variable " + name + ": " + str(e) + "\n\n" + dataStr, e) continue else: # No format specified dataStr = str(dataStr) data = self.parseInputToPrimitive(dataStr) self.parsedDataDict[name] = data
def plotStream(streamObj, graphFormat=None, xValue=None, yValue=None, zValue=None, **keywords): ''' Given a stream and any keyword configuration arguments, create and display a plot. Note: plots require matplotib to be installed. Plot methods can be specified as additional arguments or by keyword. Two keyword arguments can be given: `format` and `values`. If positional arguments are given, the first is taken as `format` and the rest are collected as `values`. If `format` is the class name, that class is collected. Additionally, every :class:`~music21.graph.PlotStream` subclass defines one `format` string and a list of `values` strings. The `format` parameter defines the type of Graph (e.g. scatter, histogram, colorGrid). The `values` list defines what values are graphed (e.g. quarterLength, pitch, pitchClass). If a user provides a `format` and one or more `values` strings, a plot with the corresponding profile, if found, will be generated. If not, the first Plot to match any of the defined specifiers will be created. In the case of :class:`~music21.graph.PlotWindowedAnalysis` subclasses, the :class:`~music21.analysis.discrete.DiscreteAnalysis` subclass :attr:`~music21.analysis.discrete.DiscreteAnalysis.indentifiers` list is added to the Plot's `values` list. Available plots include the following: * :class:`~music21.graph.plot.HistogramPitchSpace` * :class:`~music21.graph.plot.HistogramPitchClass` * :class:`~music21.graph.plot.HistogramQuarterLength` * :class:`~music21.graph.plot.ScatterPitchSpaceQuarterLength` * :class:`~music21.graph.plot.ScatterPitchClassQuarterLength` * :class:`~music21.graph.plot.ScatterPitchClassOffset` * :class:`~music21.graph.plot.ScatterPitchSpaceDynamicSymbol` * :class:`~music21.graph.plot.HorizontalBarPitchSpaceOffset` * :class:`~music21.graph.plot.HorizontalBarPitchClassOffset` * :class:`~music21.graph.plot.ScatterWeightedPitchSpaceQuarterLength` * :class:`~music21.graph.plot.ScatterWeightedPitchClassQuarterLength` * :class:`~music21.graph.plot.ScatterWeightedPitchSpaceDynamicSymbol` * :class:`~music21.graph.plot.Plot3DBarsPitchSpaceQuarterLength` * :class:`~music21.graph.plot.WindowedKey` * :class:`~music21.graph.plot.WindowedAmbitus` * :class:`~music21.graph.plot.Dolan` >>> s = corpus.parse('bach/bwv324.xml') #_DOCS_HIDE >>> s.plot('histogram', 'pitch', doneAction=None) #_DOCS_HIDE >>> #_DOCS_SHOW s = corpus.parse('bach/bwv57.8') >>> #_DOCS_SHOW s.plot('histogram', 'pitch') .. image:: images/HistogramPitchSpace.* :width: 600 >>> s = corpus.parse('bach/bwv324.xml') #_DOCS_HIDE >>> s.plot('pianoroll', doneAction=None) #_DOCS_HIDE >>> #_DOCS_SHOW s = corpus.parse('bach/bwv57.8') >>> #_DOCS_SHOW s.plot('pianoroll') .. image:: images/HorizontalBarPitchSpaceOffset.* :width: 600 ''' plotMake = findPlot.getPlotsToMake(graphFormat, xValue, yValue, zValue) #environLocal.printDebug(['plotClassName found', plotMake]) for plotInfo in plotMake: if not common.isIterable(plotInfo): plotClassName = plotInfo plotDict = None else: plotClassName, plotDict = plotInfo obj = plotClassName(streamObj, **keywords) if plotDict: for axisName, axisClass in plotDict.items(): attrName = 'axis' + axisName.upper() setattr(obj, attrName, axisClass(obj, axisName)) obj.run()
def cacheMetadata(corpusNames=('local', 'core', 'virtual'), useMultiprocessing=True, verbose=False): ''' Cache metadata from corpora in `corpusNames` as local cache files: Call as ``metadata.cacheMetadata()`` ''' from music21 import corpus from music21 import metadata if not common.isIterable(corpusNames): corpusNames = (corpusNames, ) timer = common.Timer() timer.start() # store list of file paths that caused an error failingFilePaths = [] # the core cache is based on local files stored in music21 # virtual is on-line for corpusName in corpusNames: if corpusName == 'core': metadataBundle = metadata.MetadataBundle.fromCoreCorpus() paths = corpus.getCorePaths() useCorpus = True elif corpusName == 'local': metadataBundle = metadata.MetadataBundle.fromLocalCorpus() paths = corpus.getLocalPaths() useCorpus = False elif corpusName == 'virtual': metadataBundle = metadata.MetadataBundle.fromVirtualCorpus() paths = corpus.getVirtualPaths() useCorpus = False else: message = 'invalid corpus name provided: {0!r}'.format(corpusName) raise MetadataCacheException(message) message = 'metadata cache: starting processing of paths: {0}'.format( len(paths)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) failingFilePaths += metadataBundle.addFromPaths( paths, useCorpus=useCorpus, useMultiprocessing=useMultiprocessing, verbose=verbose) message = 'cache: writing time: {0} md items: {1}'.format( timer, len(metadataBundle)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) del metadataBundle message = 'cache: final writing time: {0} seconds'.format(timer) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) for failingFilePath in failingFilePaths: message = 'path failed to parse: {0}'.format(failingFilePath) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message)
def cacheMetadata( corpusNames=('local', 'core', 'virtual'), useMultiprocessing=True, verbose=False ): ''' Cache metadata from corpora in `corpusNames` as local cache files: Call as ``metadata.cacheMetadata()`` ''' from music21 import corpus from music21 import metadata if not common.isIterable(corpusNames): corpusNames = (corpusNames,) timer = common.Timer() timer.start() # store list of file paths that caused an error failingFilePaths = [] # the core cache is based on local files stored in music21 # virtual is on-line for corpusName in corpusNames: if corpusName == 'core': metadataBundle = metadata.MetadataBundle.fromCoreCorpus() paths = corpus.getCorePaths() useCorpus = True elif corpusName == 'local': metadataBundle = metadata.MetadataBundle.fromLocalCorpus() paths = corpus.getLocalPaths() useCorpus = False elif corpusName == 'virtual': metadataBundle = metadata.MetadataBundle.fromVirtualCorpus() paths = corpus.getVirtualPaths() useCorpus = False else: message = 'invalid corpus name provided: {0!r}'.format(corpusName) raise MetadataCacheException(message) message = 'metadata cache: starting processing of paths: {0}'.format( len(paths)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) failingFilePaths += metadataBundle.addFromPaths( paths, useCorpus=useCorpus, useMultiprocessing=useMultiprocessing, verbose=verbose ) message = 'cache: writing time: {0} md items: {1}'.format( timer, len(metadataBundle)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) del metadataBundle message = 'cache: final writing time: {0} seconds'.format(timer) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) for failingFilePath in failingFilePaths: message = 'path failed to parse: {0}'.format(failingFilePath) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message)
def music21ModWSGIFeatureApplication(environ, start_response): ''' Music21 webapp to demonstrate processing of a zip file containing scores. Will be moved and integrated into __init__.py upon developing a standardized URL format as application that can perform variety of commands on user-uploaded files ''' status = '200 OK' pathInfo = environ['PATH_INFO'] # Contents of path after mount point of wsgi app but before question mark if pathInfo == '/uploadForm': output = getUploadForm() response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] #command = pathInfo formFields = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ) # Check if form data is present. If not found, display error try: unused_subUploadFormFile = formFields['subUploadForm'] except: html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Error:</h1> <p>Form information not found</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Get file from POST uploadedFile = formFields['fileupload'].file filename = formFields['fileupload'].filename uploadType = formFields['fileupload'].type # Check if filename is empty - display no file chosen error if filename == "": html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Music 21 Feature Extraction:</h1> <p><b>Error:</b> No file selected</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Check if uploadType is zip - display no file chosen error if uploadType != "application/zip": html = """ <html > <body style='font-family:calibri' bgcolor='#EEE' onLoad="toggleExtractors('m21')"> <table border=0 width='100%'> <tr><td align='center'> <table border=0 width='500px' cellpadding='10px' style='background-color:#FFF'> <tr><td align='left'> <h1>Music 21 Feature Extraction:</h1> <p><b>Error:</b> File not in .zip format</p> <p><a href='/music21/featureapp/uploadForm'>Try Again</a></p> </td></tr></table> </td></tr></table> </body></html> """ response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(html)))] start_response(status, response_headers) return [html] # Setup Feature Extractors and Data Set ds = features.DataSet(classLabel='Class') featureIDList = list() # Check if features have been selected. Else display error try: unused_featureFile = formFields['features'] except: html = """ <html ><body> <h1>Error:</h1> <p>No extractors selected</p> <p><a href='/music21/featureapp/uploadForm'>try again</a></p> </body></html> """ return html if common.isIterable(formFields['features']): print(formFields['features']) for featureId in formFields['features']: featureIDList.append(str(featureId.value)) else: featureIDList.append(formFields['features'].value) fes = features.extractorsById(featureIDList) ds.addFeatureExtractors(fes) # Create ZipFile Object zipf = zipfile.ZipFile(uploadedFile, 'r') # Loop Through Files for scoreFileInfo in zipf.infolist(): filePath = scoreFileInfo.filename # Skip Directories if (filePath.endswith('/')): continue scoreFile = zipf.open(filePath) # Use Music21's converter to parse file parsedFile = idAndParseFile(scoreFile, filePath) # If valid music21 format, add to data set if parsedFile is not None: # Split into directory structure and filname pathPartitioned = filePath.rpartition('/') directory = pathPartitioned[0] filename = pathPartitioned[2] if directory == "": directory = 'uncategorized' ds.addData(parsedFile, classValue=directory, id=filename) # Process data set ds.process() # Get output format from POST and set appropriate output: outputFormatID = formFields['outputformat'].value if outputFormatID == CSV_OUTPUT_ID: output = features.OutputCSV(ds).getString() elif outputFormatID == ORANGE_OUTPUT_ID: output = features.OutputTabOrange(ds).getString() elif outputFormatID == ARFF_OUTPUT_ID: output = features.OutputARFF(ds).getString() else: output = "invalid output format" response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output]
def getWorkList( self, workName, movementNumber=None, fileExtensions=None, ): r''' Search the corpus and return a list of filenames of works, always in a list. If no matches are found, an empty list is returned. >>> from music21 import corpus >>> coreCorpus = corpus.corpora.CoreCorpus() # returns 1 even though there is a '.mus' file, which cannot be read... >>> len(coreCorpus.getWorkList('cpebach/h186')) 1 >>> len(coreCorpus.getWorkList('cpebach/h186', None, '.xml')) 1 >>> len(coreCorpus.getWorkList('schumann_clara/opus17', 3)) 1 >>> len(coreCorpus.getWorkList('schumann_clara/opus17', 2)) 0 Make sure that 'verdi' just gets the single Verdi piece and not the Monteverdi pieces: >>> len(coreCorpus.getWorkList('verdi')) 1 ''' if not common.isListLike(fileExtensions): fileExtensions = [fileExtensions] paths = self.getPaths(fileExtensions) results = [] # permit workName to be a list of paths/branches if common.isIterable(workName): workName = os.path.sep.join(workName) workSlashes = workName.replace('/', os.path.sep) # find all matches for the work name # TODO: this should match by path component, not just # substring for path in paths: if workName.lower() in path.lower(): results.append(path) elif workSlashes.lower() in path.lower(): results.append(path) if results: # more than one matched...use more stringent criterion: # must have a slash before the name previousResults = results results = [] longName = os.sep + workSlashes.lower() for path in previousResults: if longName in path.lower(): results.append(path) if not results: results = previousResults movementResults = [] if movementNumber is not None and results: # store one ore more possible mappings of movement number movementStrList = [] # see if this is a pair if common.isIterable(movementNumber): movementStrList.append( ''.join(str(x) for x in movementNumber)) movementStrList.append( '-'.join(str(x) for x in movementNumber)) movementStrList.append('movement' + '-'.join(str(x) for x in movementNumber)) movementStrList.append('movement' + '-0'.join(str(x) for x in movementNumber)) else: movementStrList += [ '0{0}'.format(movementNumber), str(movementNumber), 'movement{0}'.format(movementNumber), ] for filePath in sorted(results): filename = os.path.split(filePath)[1] if '.' in filename: filenameWithoutExtension = os.path.splitext(filename)[0] else: filenameWithoutExtension = None searchPartialMatch = True if filenameWithoutExtension is not None: # look for direct matches first for movementStr in movementStrList: #if movementStr.lower() in filePath.lower(): if filenameWithoutExtension.lower() == movementStr.lower(): movementResults.append(filePath) searchPartialMatch = False # if we have one direct match, all other matches must # be direct. this will match multiple files with different # file extensions if movementResults: continue if searchPartialMatch: for movementStr in movementStrList: if filename.startswith(movementStr.lower()): movementResults.append(filePath) if not movementResults: pass else: movementResults = results return sorted(set(movementResults))
def getRealized( self, useDynamicContext=True, useVelocity=True, useArticulations=True, baseLevel=0.5, clip=True, ): ''' Get a realized unit-interval scalar for this Volume. This scalar is to be applied to the dynamic range of whatever output is available, whatever that may be. The `baseLevel` value is a middle value between 0 and 1 that all scalars modify. This also becomes the default value for unspecified dynamics. When scalars (between 0 and 1) are used, their values are doubled, such that mid-values (around .5, which become 1) make no change. This can optionally take into account `dynamicContext`, `useVelocity`, and `useArticulation`. If `useDynamicContext` is True, a context search for a dynamic will be done, else dynamics are ignored. Alternatively, the useDynamicContext may supply a Dynamic object that will be used instead of a context search. If `useArticulations` is True and client is not None, any articulations found on that client will be used to adjust the volume. Alternatively, the `useArticulations` parameter may supply a list of articulations that will be used instead of that available on a client. The `velocityIsRelative` tag determines if the velocity value includes contextual values, such as dynamics and and accents, or not. >>> s = stream.Stream() >>> s.repeatAppend(note.Note('d3', quarterLength=.5), 8) >>> s.insert([0, dynamics.Dynamic('p'), ... 1, dynamics.Dynamic('mp'), ... 2, dynamics.Dynamic('mf'), ... 3, dynamics.Dynamic('f')]) >>> s.notes[0].volume.getRealized() 0.496... >>> s.notes[1].volume.getRealized() 0.496... >>> s.notes[2].volume.getRealized() 0.63779... >>> s.notes[7].volume.getRealized() 0.99212... velocity, if set, will be scaled by dynamics >>> s.notes[7].volume.velocity = 20 >>> s.notes[7].volume.getRealized() 0.22047... unless we set the velocity to not be relative... >>> s.notes[7].volume.velocityIsRelative = False >>> s.notes[7].volume.getRealized() 0.1574803... ''' #velocityIsRelative might be best set at import. e.g., from MIDI, # velocityIsRelative is False, but in other applications, it may not # be val = baseLevel dm = None # no dynamic mark # velocity is checked first; the range between 0 and 1 is doubled, # to 0 to 2. a velocityScalar of .7 thus scales the base value of # .5 by 1.4 to become .7 if useVelocity: if self._velocityScalar is not None: if not self.velocityIsRelative: # if velocity is not relateive # it should fully determines output independent of anything # else val = self._velocityScalar else: val = val * (self._velocityScalar * 2.0) # this value provides a good default velocity, as .5 is low # this not a scalar application but a shift. else: # target :0.70866 val += 0.20866 # only change the val from here if velocity is relative if self.velocityIsRelative: if useDynamicContext is not False: if hasattr(useDynamicContext, 'classes') and 'Dynamic' in useDynamicContext.classes: dm = useDynamicContext # it is a dynamic elif self.client is not None: dm = self.getDynamicContext() # dm may be None else: environLocal.printDebug(['getRealized():', 'useDynamicContext is True but no dynamic supplied or found in context']) if dm is not None: # double scalare (so range is between 0 and 1) and scale # t he current val (around the base) val = val * (dm.volumeScalar * 2.0) # userArticulations can be a list of 1 or more articulation objects # as well as True/False if useArticulations is not False: if common.isIterable(useArticulations): am = useArticulations elif (hasattr(useArticulations, 'classes') and 'Articulation' in useArticulations.classes): am = [useArticulations] # place in a list elif self.client is not None: am = self.client.articulations else: am = [] for a in am: # add in volume shift for all articulations val += a.volumeShift if clip: # limit between 0 and 1 if val > 1: val = 1.0 elif val < 0: val = 0.0 # might to rebalance range after scalings # always update cached result each time this is called self._cachedRealized = val return val
def cacheMetadata(corpusNames=None, useMultiprocessing=True, verbose=False): ''' Cache metadata from corpora in `corpusNames` as local cache files: Call as ``metadata.cacheMetadata()`` ''' from music21.corpus import corpora from music21.corpus import manager localCorporaNames = manager.listLocalCorporaNames(skipNone=True) if corpusNames is None: corpusNames = localCorporaNames[:] + ['local', 'core',] # + 'virtual'] if not common.isIterable(corpusNames): corpusNames = (corpusNames,) timer = common.Timer() timer.start() # store list of file paths that caused an error failingFilePaths = [] # the core cache is based on local files stored in music21 # virtual is on-line for corpusName in corpusNames: if corpusName == 'core': corporaObject = corpora.CoreCorpus() elif corpusName == 'local': corporaObject = corpora.LocalCorpus() # elif corpusName == 'virtual': # corporaObject = corpora.VirtualCorpus() elif corpusName in localCorporaNames: corporaObject = corpora.LocalCorpus(corpusName) else: message = 'invalid corpus name provided: {0!r}'.format(corpusName) raise MetadataCacheException(message) metadataBundle = corporaObject.metadataBundle paths = corporaObject.getPaths() message = '{} metadata cache: starting processing of paths: {}'.format( corpusName, len(paths)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) failingFilePaths += metadataBundle.addFromPaths( paths, parseUsingCorpus=corporaObject.parseUsingCorpus, useMultiprocessing=useMultiprocessing, verbose=verbose ) message = 'cache: writing time: {0} md items: {1}\n'.format( timer, len(metadataBundle)) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) message = 'cache: filename: {0}'.format(metadataBundle.filePath) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) del metadataBundle message = 'cache: final writing time: {0} seconds'.format(timer) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message) for failingFilePath in failingFilePaths: message = 'path failed to parse: {0}'.format(failingFilePath) if verbose is True: environLocal.warn(message) else: environLocal.printDebug(message)