def cachedRealizedStr(self): ''' Convenience property for testing. >>> v = volume.Volume(velocity=128) >>> v.cachedRealizedStr '1.0' ''' return str(round(self.cachedRealized, 2))
def cachedRealizedStr(self): ''' Convenience property for testing. >>> v = volume.Volume(velocity=128) >>> v.cachedRealizedStr '1.0' ''' return str(round(self.cachedRealized, 2))
def velocityScalar(self, value): if not common.isNum(value): raise VolumeException('value provided for velocityScalar must be a number, ' + 'not %s' % value) if value < 0: scalar = 0 elif value > 1: scalar = 1 else: scalar = value self._velocity = round(scalar * 127)
def velocityScalar(self, value): if not common.isNum(value): raise VolumeException( 'value provided for velocityScalar must be a number, ' + 'not %s' % value) if value < 0: scalar = 0 elif value > 1: scalar = 1 else: scalar = value self._velocity = round(scalar * 127)
def getRealizedStr(self, useDynamicContext=True, useVelocity=True, useArticulations=True, baseLevel=0.5, clip=True): '''Return the realized as rounded and formatted string value. Useful for testing. >>> v = volume.Volume(velocity=64) >>> v.getRealizedStr() '0.5' ''' val = self.getRealized(useDynamicContext=useDynamicContext, useVelocity=useVelocity, useArticulations=useArticulations, baseLevel=baseLevel, clip=clip) return str(round(val, 2))
def getRealizedStr(self, useDynamicContext=True, useVelocity=True, useArticulations=True, baseLevel=0.5, clip=True): '''Return the realized as rounded and formatted string value. Useful for testing. >>> v = volume.Volume(velocity=64) >>> v.getRealizedStr() '0.5' ''' val = self.getRealized(useDynamicContext=useDynamicContext, useVelocity=useVelocity, useArticulations=useArticulations, baseLevel=baseLevel, clip=clip) return str(round(val, 2))
def reduceMeasureToNChords(self, measureObj, numChords=1, weightAlgorithm=None, trimBelow=0.25): ''' >>> s = analysis.reduceChords.testMeasureStream1() >>> cr = analysis.reduceChords.ChordReducer() Reduce to a maximum of 3 chords; though here we will only get one because the other chord is below the trimBelow threshold. >>> newS = cr.reduceMeasureToNChords(s, 3, ... weightAlgorithm=cr.qlbsmpConsonance, trimBelow = 0.3) >>> newS.show('text') {0.0} <music21.meter.TimeSignature 4/4> {0.0} <music21.chord.Chord C4 E4 G4 C5> >>> newS.notes[0].quarterLength 4.0 ''' from music21 import note if measureObj.isFlat is False: mObj = measureObj.flat.notes else: mObj = measureObj.notes chordWeights = self.computeMeasureChordWeights(mObj, weightAlgorithm) if numChords > len(chordWeights): numChords = len(chordWeights) maxNChords = sorted(chordWeights, key=chordWeights.get, reverse=True)[:numChords] if len(maxNChords) == 0: r = note.Rest() r.quarterLength = mObj.duration.quarterLength for c in mObj: mObj.remove(c) mObj.insert(0, r) return mObj maxChordWeight = chordWeights[maxNChords[0]] trimmedMaxChords = [] for pcTuples in maxNChords: if chordWeights[pcTuples] >= maxChordWeight * trimBelow: trimmedMaxChords.append(pcTuples) #print chordWeights[pcTuples], maxChordWeight else: break currentGreedyChord = None currentGreedyChordPCs = None currentGreedyChordNewLength = 0.0 for c in mObj: if c.isNote: p = tuple(c.pitch.pitchClass) else: p = tuple(set([x.pitchClass for x in c.pitches])) if p in trimmedMaxChords and p != currentGreedyChordPCs: # keep this chord if currentGreedyChord is None and c.offset != 0.0: currentGreedyChordNewLength = c.offset c.offset = 0.0 elif currentGreedyChord is not None: currentGreedyChord.quarterLength = currentGreedyChordNewLength currentGreedyChordNewLength = 0.0 currentGreedyChord = c for n in c: n.tie = None if n.pitch.accidental is not None: n.pitch.accidental.displayStatus = None currentGreedyChordPCs = p currentGreedyChordNewLength += c.quarterLength else: currentGreedyChordNewLength += c.quarterLength mObj.remove(c) if currentGreedyChord is not None: currentGreedyChord.quarterLength = currentGreedyChordNewLength currentGreedyChordNewLength = 0.0 # even chord lengths... for i in range(1, len(mObj)): c = mObj[i] cOffsetCurrent = c.offset cOffsetSyncop = cOffsetCurrent - int(cOffsetCurrent) if round(cOffsetSyncop, 3) in [0.250, 0.125, 0.333, 0.063, 0.062]: lastC = mObj[i - 1] lastC.quarterLength -= cOffsetSyncop c.offset = int(cOffsetCurrent) c.quarterLength += cOffsetSyncop return mObj
def reduceThisMeasure(self, mI, measureIndex, maxChords, closedPosition, forceOctave): m = stream.Measure() m.number = measureIndex mIchord = mI.chordify() newPart = self.reduceMeasureToNChords( mIchord, maxChords, weightAlgorithm=self.qlbsmpConsonance, trimBelow=0.3) #newPart.show('text') cLast = None cLastEnd = 0.0 for cEl in newPart: cElCopy = copy.deepcopy(cEl) if 'Chord' in cEl.classes and closedPosition is not False: if forceOctave is not False: cElCopy.closedPosition(forceOctave=forceOctave, inPlace=True) else: cElCopy.closedPosition(inPlace=True) cElCopy.removeRedundantPitches(inPlace=True) newOffset = cEl.getOffsetBySite(newPart) # extend over gaps if cLast is not None: if round(newOffset - cLastEnd, 6) != 0.0: cLast.quarterLength += newOffset - cLastEnd cLast = cElCopy cLastEnd = newOffset + cElCopy.quarterLength m._insertCore(newOffset, cElCopy) tsContext = mI.parts[0].getContextByClass('TimeSignature') if tsContext is not None: if round(tsContext.barDuration.quarterLength - cLastEnd, 6) != 0.0: cLast.quarterLength += tsContext.barDuration.quarterLength - cLastEnd m.elementsChanged() # add ties if self._lastPitchedObject is not None: firstPitched = m[0] if self._lastPitchedObject.isNote and firstPitched.isNote: if self._lastPitchedObject.pitch == firstPitched.pitch: self._lastPitchedObject.tie = tie.Tie("start") elif self._lastPitchedObject.isChord and firstPitched.isChord: if len(self._lastPitchedObject) == len(firstPitched): allSame = True for pitchI in range(len(self._lastPitchedObject)): if (self._lastPitchedObject.pitches[pitchI] != firstPitched.pitches[pitchI]): allSame = False if allSame is True: self._lastPitchedObject.tie = tie.Tie('start') self._lastPitchedObject = m[-1] sourceMeasureTs = mI.parts[0].getElementsByClass( 'Measure')[0].timeSignature if sourceMeasureTs != self._lastTs: m.timeSignature = copy.deepcopy(sourceMeasureTs) self._lastTs = sourceMeasureTs return m
def process(self, minWindow=1, maxWindow=1, windowStepSize=1, windowType='overlap', includeTotalWindow=True): ''' Main method for windowed analysis across one or more window sizes. Calls :meth:`~music21.analysis.WindowedAnalysis.analyze` for the number of different window sizes to be analyzed. The `minWindow` and `maxWindow` set the range of window sizes in quarter lengths. The `windowStepSize` parameter determines the increment between these window sizes, in quarter lengths. If `minWindow` or `maxWindow` is None, the largest window size available will be set. If `includeTotalWindow` is True, the largest window size will always be added. >>> s = corpus.parse('bach/bwv324') >>> ksAnalyzer = analysis.discrete.KrumhanslSchmuckler() placing one part into analysis >>> sopr = s.parts[0] >>> wa = analysis.windowed.WindowedAnalysis(sopr, ksAnalyzer) >>> solutions, colors, meta = wa.process(1, 1, includeTotalWindow=False) >>> len(solutions) # we only have one series of windows 1 >>> solutions, colors, meta = wa.process(1, 2, includeTotalWindow=False) >>> len(solutions) # we have two series of windows 2 >>> solutions[1] [(<music21.pitch.Pitch B>, 'major', 0.6868...), (<music21.pitch.Pitch B>, 'minor', 0.8308...), (<music21.pitch.Pitch D>, 'major', 0.6868...), (<music21.pitch.Pitch B>, 'minor', 0.8308...),...] >>> colors[1] ['#ffb5ff', '#9b519b', '#ffd752', '#9b519b', ...] >>> meta [{'windowSize': 1}, {'windowSize': 2}] ''' if maxWindow is None: maxLength = len(self._windowedStream) else: maxLength = maxWindow if minWindow is None: minLength = len(self._windowedStream) else: minLength = minWindow if windowType is None: windowType = 'overlap' elif windowType.lower() in ['overlap']: windowType = 'overlap' elif windowType.lower() in ['nooverlap', 'nonoverlapping']: windowType = 'noOverlap' elif windowType.lower() in ['adjacentaverage']: windowType = 'adjacentAverage' # need to create storage for the output of each row, or the processing # of all windows of a single size across the entire Stream solutionMatrix = [] colorMatrix = [] # store meta data about each row as a dictionary metaMatrix = [] if common.isNum(windowStepSize): windowSizes = list(range(minLength, maxLength + 1, windowStepSize)) else: num, junk = common.getNumFromStr(windowStepSize) windowSizes = [] x = minLength while True: windowSizes.append(x) x = x * round(int(num)) if x > (maxLength * 0.75): break if includeTotalWindow: totalWindow = len(self._windowedStream) if totalWindow not in windowSizes: windowSizes.append(totalWindow) for i in windowSizes: #environLocal.printDebug(['processing window:', i]) # each of these results are lists, where len is based on soln, colorn = self.analyze(i, windowType=windowType) # store lists of results in a list of lists solutionMatrix.append(soln) colorMatrix.append(colorn) meta = {'windowSize': i} metaMatrix.append(meta) return solutionMatrix, colorMatrix, metaMatrix
def __repr__(self): return "<music21.volume.Volume realized=%s>" % round(self.realized, 2)
def process(self, minWindow=1, maxWindow=1, windowStepSize=1, windowType='overlap', includeTotalWindow=True): ''' Main method for windowed analysis across one or more window sizes. Calls :meth:`~music21.analysis.WindowedAnalysis.analyze` for the number of different window sizes to be analyzed. The `minWindow` and `maxWindow` set the range of window sizes in quarter lengths. The `windowStepSize` parameter determines the increment between these window sizes, in quarter lengths. If `minWindow` or `maxWindow` is None, the largest window size available will be set. If `includeTotalWindow` is True, the largest window size will always be added. >>> s = corpus.parse('bach/bwv324') >>> ksAnalyzer = analysis.discrete.KrumhanslSchmuckler() placing one part into analysis >>> sopr = s.parts[0] >>> wa = analysis.windowed.WindowedAnalysis(sopr, ksAnalyzer) >>> solutions, colors, meta = wa.process(1, 1, includeTotalWindow=False) >>> len(solutions) # we only have one series of windows 1 >>> solutions, colors, meta = wa.process(1, 2, includeTotalWindow=False) >>> len(solutions) # we have two series of windows 2 >>> solutions[1] [(<music21.pitch.Pitch B>, 'major', 0.6868...), (<music21.pitch.Pitch B>, 'minor', 0.8308...), (<music21.pitch.Pitch D>, 'major', 0.6868...), (<music21.pitch.Pitch B>, 'minor', 0.8308...),...] >>> colors[1] ['#ffb5ff', '#9b519b', '#ffd752', '#9b519b', ...] >>> meta [{'windowSize': 1}, {'windowSize': 2}] ''' if maxWindow is None: maxLength = len(self._windowedStream) else: maxLength = maxWindow if minWindow is None: minLength = len(self._windowedStream) else: minLength = minWindow if windowType is None: windowType = 'overlap' elif windowType.lower() in ['overlap']: windowType = 'overlap' elif windowType.lower() in ['nooverlap', 'nonoverlapping']: windowType = 'noOverlap' elif windowType.lower() in ['adjacentaverage']: windowType = 'adjacentAverage' # need to create storage for the output of each row, or the processing # of all windows of a single size across the entire Stream solutionMatrix = [] colorMatrix = [] # store meta data about each row as a dictionary metaMatrix = [] if common.isNum(windowStepSize): windowSizes = list(range(minLength, maxLength + 1, windowStepSize)) else: num, junk = common.getNumFromStr(windowStepSize) windowSizes = [] x = minLength while True: windowSizes.append(x) x = x * round(int(num)) if x > (maxLength * 0.75): break if includeTotalWindow: totalWindow = len(self._windowedStream) if totalWindow not in windowSizes: windowSizes.append(totalWindow) for i in windowSizes: #environLocal.printDebug(['processing window:', i]) # each of these results are lists, where len is based on soln, colorn = self.analyze(i, windowType=windowType) # store lists of results in a list of lists solutionMatrix.append(soln) colorMatrix.append(colorn) meta = {'windowSize': i} metaMatrix.append(meta) return solutionMatrix, colorMatrix, metaMatrix
def reduceMeasureToNChords(self, measureObj, numChords=1, weightAlgorithm=None, trimBelow=0.25): ''' >>> s = analysis.reduceChords.testMeasureStream1() >>> cr = analysis.reduceChords.ChordReducer() Reduce to a maximum of 3 chords; though here we will only get one because the other chord is below the trimBelow threshold. >>> newS = cr.reduceMeasureToNChords(s, 3, ... weightAlgorithm=cr.qlbsmpConsonance, trimBelow = 0.3) >>> newS.show('text') {0.0} <music21.meter.TimeSignature 4/4> {0.0} <music21.chord.Chord C4 E4 G4 C5> >>> newS.notes[0].quarterLength 4.0 ''' from music21 import note if measureObj.isFlat is False: mObj = measureObj.flat.notes.stream() else: mObj = measureObj.notes.stream() chordWeights = self.computeMeasureChordWeights(mObj, weightAlgorithm) if numChords > len(chordWeights): numChords = len(chordWeights) maxNChords = sorted(chordWeights, key=chordWeights.get, reverse=True)[:numChords] if not maxNChords: r = note.Rest() r.quarterLength = mObj.duration.quarterLength for c in mObj: mObj.remove(c) mObj.insert(0, r) return mObj maxChordWeight = chordWeights[maxNChords[0]] trimmedMaxChords = [] for pcTuples in maxNChords: if chordWeights[pcTuples] >= maxChordWeight * trimBelow: trimmedMaxChords.append(pcTuples) #print chordWeights[pcTuples], maxChordWeight else: break currentGreedyChord = None currentGreedyChordPCs = None currentGreedyChordNewLength = 0.0 for c in mObj: if c.isNote: p = tuple(c.pitch.pitchClass) else: p = tuple(set([x.pitchClass for x in c.pitches])) if p in trimmedMaxChords and p != currentGreedyChordPCs: # keep this chord if currentGreedyChord is None and c.offset != 0.0: currentGreedyChordNewLength = c.offset c.offset = 0.0 elif currentGreedyChord is not None: currentGreedyChord.quarterLength = currentGreedyChordNewLength currentGreedyChordNewLength = 0.0 currentGreedyChord = c for n in c: n.tie = None if n.pitch.accidental is not None: n.pitch.accidental.displayStatus = None currentGreedyChordPCs = p currentGreedyChordNewLength += c.quarterLength else: currentGreedyChordNewLength += c.quarterLength mObj.remove(c) if currentGreedyChord is not None: currentGreedyChord.quarterLength = currentGreedyChordNewLength currentGreedyChordNewLength = 0.0 # even chord lengths... for i in range(1, len(mObj)): c = mObj[i] cOffsetCurrent = c.offset cOffsetSyncop = cOffsetCurrent - int(cOffsetCurrent) if round(cOffsetSyncop, 3) in [0.250, 0.125, 0.333, 0.063, 0.062]: lastC = mObj[i - 1] lastC.quarterLength -= cOffsetSyncop c.offset = int(cOffsetCurrent) c.quarterLength += cOffsetSyncop return mObj
def reduceThisMeasure(self, mI, measureIndex, maxChords, closedPosition, forceOctave): m = stream.Measure() m.number = measureIndex mIchord = mI.chordify() newPart = self.reduceMeasureToNChords(mIchord, maxChords, weightAlgorithm=self.qlbsmpConsonance, trimBelow=0.3) #newPart.show('text') cLast = None cLastEnd = 0.0 for cEl in newPart: cElCopy = copy.deepcopy(cEl) if 'Chord' in cEl.classes and closedPosition is not False: if forceOctave is not False: cElCopy.closedPosition(forceOctave=forceOctave, inPlace=True) else: cElCopy.closedPosition(inPlace=True) cElCopy.removeRedundantPitches(inPlace=True) newOffset = cEl.getOffsetBySite(newPart) # extend over gaps if cLast is not None: if round(newOffset - cLastEnd, 6) != 0.0: cLast.quarterLength += newOffset - cLastEnd cLast = cElCopy cLastEnd = newOffset + cElCopy.quarterLength m._insertCore(newOffset, cElCopy) tsContext = mI.parts[0].getContextByClass('TimeSignature') if tsContext is not None: if round(tsContext.barDuration.quarterLength - cLastEnd, 6) != 0.0: cLast.quarterLength += tsContext.barDuration.quarterLength - cLastEnd m.elementsChanged() # add ties if self._lastPitchedObject is not None: firstPitched = m[0] if self._lastPitchedObject.isNote and firstPitched.isNote: if self._lastPitchedObject.pitch == firstPitched.pitch: self._lastPitchedObject.tie = tie.Tie("start") elif self._lastPitchedObject.isChord and firstPitched.isChord: if len(self._lastPitchedObject) == len(firstPitched): allSame = True for pitchI in range(len(self._lastPitchedObject)): if (self._lastPitchedObject.pitches[pitchI] != firstPitched.pitches[pitchI]): allSame = False if allSame is True: self._lastPitchedObject.tie = tie.Tie('start') self._lastPitchedObject = m[-1] sourceMeasureTs = mI.parts[0].getElementsByClass('Measure')[0].timeSignature if sourceMeasureTs != self._lastTs: m.timeSignature = copy.deepcopy(sourceMeasureTs) self._lastTs = sourceMeasureTs return m
def __repr__(self): return "<music21.volume.Volume realized=%s>" % round(self.realized, 2)