def getOmrScore(self): ''' Returns a ScoreCorrector object of the OMR score. does NOT store it anywhere... >>> omrPath = omr.correctors.K525omrShortPath >>> ground = omr.correctors.K525groundTruthShortPath >>> omrGTP = omr.evaluators.OmrGroundTruthPair(omr=omrPath, ground=ground) >>> ssOMR = omrGTP.getOmrScore() >>> ssOMR <music21.omr.correctors.ScoreCorrector object at 0x...> ''' if (self.debug is True): print('parsing OMR score') if (self.omrM21Score is None): self.omrM21Score = converter.parse(self.omrPath) return correctors.ScoreCorrector(self.omrM21Score)
def getGroundScore(self): ''' Returns a ScoreCorrector object of the Ground truth score >>> omrPath = omr.correctors.K525omrShortPath >>> ground = omr.correctors.K525groundTruthShortPath >>> omrGTP = omr.evaluators.OmrGroundTruthPair(omr=omrPath, ground=ground) >>> ssGT = omrGTP.getGroundScore() >>> ssGT <music21.omr.correctors.ScoreCorrector object at 0x...> ''' if self.debug is True: print('parsing Ground Truth score') if (self.groundM21Score is None): self.groundM21Score = converter.parse(self.groundPath) return correctors.ScoreCorrector(self.groundM21Score)
def autoCorrelationBestMeasure(inputScore): ''' Essentially it's the ratio of amount of rhythmic similarity within a piece, which gives an upper bound on what the omr.corrector.prior measure should be able to achieve for the flagged measures. If a piece has low rhythmic similarity in general, then there's no way for a correct match to be found within the unflagged measures in the piece. Returns a tuple of the total number of NON-flagged measures and the total number of those measures that have a rhythmic match. Takes in a stream.Score. >>> c = converter.parse(omr.correctors.K525omrShortPath) # first 21 measures >>> totalUnflagged, totalUnflaggedWithMatches = omr.evaluators.autoCorrelationBestMeasure(c) >>> (totalUnflagged, totalUnflaggedWithMatches) (71, 64) >>> print( float(totalUnflaggedWithMatches) / totalUnflagged ) 0.901... Schoenberg has low autoCorrelation. >>> c = corpus.parse('schoenberg/opus19/movement6') >>> totalUnflagged, totalUnflaggedWithMatches = omr.evaluators.autoCorrelationBestMeasure(c) >>> (totalUnflagged, totalUnflaggedWithMatches) (18, 6) >>> print( float(totalUnflaggedWithMatches) / totalUnflagged ) 0.333... ''' ss = correctors.ScoreCorrector(inputScore) allHashes = ss.getAllHashes() totalMeasures = 0 totalMatches = 0 singleParts = ss.singleParts for pNum, pHashArray in enumerate(allHashes): incorrectIndices = singleParts[pNum].getIncorrectMeasureIndices() for i, mHash in enumerate(pHashArray): if i in incorrectIndices: continue totalMeasures += 1 match = False ## horizontal search... for j, nHash in enumerate(pHashArray): if i == j: continue if mHash == nHash: match = True break ## vertical search... if match is False: for otherPNum in range(len(singleParts)): if otherPNum == pNum: continue otherHash = allHashes[otherPNum][i] if otherHash == mHash: match = True break if match is True: totalMatches += 1 return (totalMeasures, totalMatches)