def test(): from music21 import instrument as j sc1 = stream.Score() # instruments = [Piccolo(), Glockenspiel(), 72, 69, 41, 27, 47, 1, 1, 1, 1, 34] instrument = [ j.Piccolo(), j.Xylophone(), j.Clarinet(), j.Oboe(), j.Violin(), j.ElectricGuitar(), j.Harp(), j.Piano(), j.Piano(), j.Piano(), j.Piano(), j.ElectricBass() ] instrumentOctave = [3, 2, 2, 2, 1, 1, 1, 2, 1, 0, -1, -2] for i in range(12): inst = instrument[i] if i < 9: inst.midiChannel = i else: inst.midiChannel = i + 1 part = addPart(instrument=inst) if instrumentOctave[i] != 0: part.transpose(12 * instrumentOctave[i], inPlace=True) sc1.insert(0, part) sc1.show()
def indexed_seq_to_score(seq, index2note, note2index): """ :param note2index: :param index2note: :param seq: voice major :return: """ num_pitches = len(index2note) slur_index = note2index[SLUR_SYMBOL] score = stream.Score() voice_index = SOP_INDEX part = stream.Part(id='part' + str(voice_index)) dur = 0 f = note.Rest() for k, n in enumerate(seq): # if it is a played note if not n == slur_index: # add previous note if dur > 0: f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) dur = 1 f = standard_note(index2note[n]) else: dur += 1 # add last note f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) score.insert(part) return score
def museDataWorkToStreamScore(museDataWork, inputM21=None): '''Given an museDataWork object, build into a multi-part :class:`~music21.stream.Score` with metadata. This assumes that this MuseDataHandler defines a single work (with 1 or fewer reference numbers). if the optional parameter inputM21 is given a music21 Stream subclass, it will use that object as the outermost object. However, inner parts will always be made :class:`~music21.stream.Part` objects. ''' from music21 import stream from music21 import metadata if inputM21 == None: s = stream.Score() else: s = inputM21 # each musedata part has complete metadata, so must get first mdpObjs = museDataWork.getParts() md = metadata.Metadata() s.insert(0, md) md.title = mdpObjs[0].getWorkTitle() md.movementNumber = mdpObjs[0].getMovementNumber() md.movementName = mdpObjs[0].getMovementTitle() # not obvious where composer is stored #md.composer = mdpObjs[0].getWorkNumber() #md.localeOfComposition = mdpObjs[0].getWorkNumber() md.number = mdpObjs[0].getWorkNumber() for mdPart in mdpObjs: musedataPartToStreamPart(mdPart, s) return s
def makeScoreWithPickup(self): """Make a short score with pick up and two voices.""" sc = stream.Score() num_voices = 2 pitches = ['C', 'A-'] for i in range(num_voices): part = stream.Part() part.id = 'part %d' % i time_sig = meter.TimeSignature('4/4') key_sig = key.Key('c') # Add pickup measure. pickup = stream.Measure() pickup.append(time_sig) pickup.append(key_sig) n1 = music21_note.Note(pitches[i]) n1.duration.quarterLength = 1 pickup.append(n1) part.append(pickup) # Add full measure. full_m = stream.Measure() full_m.append(n1) n2 = n1.transpose('M2') full_m.append(n2) full_m.repeatAppend(n1, 2) part.append(full_m) sc.insert(0, part) # Show the full score and all score elements in indented text. # sc.show('text') return sc
def merge_streams( *streams: stream.Stream, stream_class: Optional[Type[Union[stream.Voice, stream.Part, stream.Score]]] = None ) -> stream.Stream: """ Creates a new stream by combining streams vertically. Args: *streams: Streams to merge. stream_class: Optional; The type of stream to convert to (Score, Part or Voice). By default, a generic Stream is returned. Returns: """ if stream_class is None: post_stream = stream.Stream() if stream_class is stream.Score: post_stream = stream.Score() elif stream_class is stream.Part: post_stream = stream.Part() elif stream_class is stream.Voice: post_stream = stream.Voice() for stream_ in streams: post_stream.insert(0, stream_) return post_stream
def postProcess( output, n_tracks=4, n_bars=2, n_steps_per_bar=16, ): parts = stream.Score() parts.append(tempo.MetronomeMark(number=66)) max_pitches = binarise_output(output) midi_note_score = np.vstack([ max_pitches[i].reshape([n_bars * n_steps_per_bar, n_tracks]) for i in range(len(output)) ]) for i in range(n_tracks): last_x = int(midi_note_score[:, i][0]) s = stream.Part() dur = 0 for idx, x in enumerate(midi_note_score[:, i]): x = int(x) if (x != last_x or idx % 4 == 0) and idx > 0: n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(n) dur = 0 last_x = x dur = dur + 0.25 n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(n) parts.append(s) return parts
def testBasicJ(self): from music21 import stream, note, converter p1 = stream.Part() for m in range(3): m = stream.Measure() for i in range(4): m.append(note.Note('C4')) p1.append(m) p2 = stream.Part() for m in range(3): m = stream.Measure() for i in range(4): m.append(note.Note('G4')) p2.append(m) s = stream.Score() s.insert(0, p1) s.insert(0, p2) #s.show() temp = converter.freezeStr(s, fmt='pickle') sPost = converter.thawStr(temp) self.assertEqual(len(sPost.parts), 2) self.assertEqual(len(sPost.parts[0].getElementsByClass('Measure')), 3) self.assertEqual(len(sPost.parts[1].getElementsByClass('Measure')), 3) self.assertEqual(len(sPost.flat.notes), 24)
def combine_voices_harm(length: int, *voices, inst=None, time_sig='4/4'): """ Combines the voices with music21 objects. :param length: common length of voices to consider :param voices: sequences of integers encoding notes :param inst: instruments :param time_sig: time signature :return stream of chords """ voices = voices[0] if inst is None: inst = [instrument.BrassInstrument() if i % 2 == 0 else instrument.Piano() for i in range(len(voices))] score = stream.Score(timeSignature=time_sig) parts = [stream.Part() for _ in range(len(voices))] for part_index in range(len(voices)): for i in range(length): parts[part_index].append(translate(voices[part_index][i], 1)) for i in reversed(range(len(parts))): parts[i].insert(0, inst[i]) score.insert(0, parts[i]) return score
def getPopulationScore(population: [individual.Individual]): s = stream.Score(id='mainScore') part = stream.Part(id='part0') part1 = stream.Part(id='part1') for i in range(len(population)): # For each measure for m in population[i].measures: measure = stream.Measure() chord_measure = stream.Measure() if m.chord is not None: chord_measure.append(chord.Chord(m.chord, quarterLength=4.0)) duration_count = 0.0 # For each note for j in m.notes: if j.pitch == 'REST': n = note.Rest() n.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) else: n = note.Note(j.pitch) n.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) measure.append(n) duration_count += j.duration.duration_value # Add rest if measure is not filled if duration_count < 1.0: measure[len(measure) - 1].duration.quarterLength += (1.0 - duration_count) / 0.25 part.append(measure) part1.append(chord_measure) s.append(part) s.append(part1) return s
def asOpus(self): ''' returns all snippets as a :class:`~music21.stream.Opus` object >>> deduto = alpha.trecento.cadencebook.BallataSheet().workByTitle('deduto') >>> deduto.title 'Deduto sey a quel' >>> dedutoScore = deduto.asOpus() >>> dedutoScore <music21.stream.Opus ...> >>> #_DOCS_SHOW dedutoScore.show('lily.png') ''' o = stream.Opus() md = metadata.Metadata() o.insert(0, md) o.metadata.composer = self.composer o.metadata.title = self.title bs = self.snippets for thisSnippet in bs: if thisSnippet is None: continue if (thisSnippet.tenor is None and thisSnippet.cantus is None and thisSnippet.contratenor is None): continue s = stream.Score() for dummy in range(self.totalVoices): s.insert(0, stream.Part()) for partNumber, snippetPart in enumerate( thisSnippet.getElementsByClass('TrecentoCadenceStream')): if thisSnippet.snippetName != "" and partNumber == self.totalVoices - 1: textEx = expressions.TextExpression(thisSnippet.snippetName) textEx.positionVertical = 'below' if 'FrontPaddedSnippet' in thisSnippet.classes: if snippetPart.hasMeasures(): snippetPart.getElementsByClass('Measure')[-1].insert(0, textEx) else: snippetPart.append(textEx) else: if snippetPart.hasMeasures(): snippetPart.getElementsByClass('Measure')[0].insert(0, textEx) else: snippetPart.insert(0, textEx) # if currentTs is None or timeSig != currentTs: # s.append(timeSig) # currentTs = timeSig try: currentScorePart = s.parts[partNumber] except IndexError: continue # error in coding for thisElement in snippetPart: if 'TimeSignature' in thisElement.classes: continue currentScorePart.append(thisElement) o.insert(0, s) return o
def testJoinPartStaffsE(self): ''' Measure numbers existing only in certain PartStaffs: don't collapse together ''' from music21 import corpus from music21 import layout sch = corpus.parse('schoenberg/opus19', 2) s = stream.Score() ps1 = stream.PartStaff() ps2 = stream.PartStaff() s.append(ps1) s.append(ps2) s.insert(0, layout.StaffGroup([ps1, ps2])) m1 = sch.parts[0].measure(1) # RH m2 = sch.parts[1].measure(2) # LH m3 = sch.parts[0].measure(3) # RH ps1.append(m1) ps1.append(m3) ps2.insert(m1.offset, m2) root = self.getET(s) m1tag, m2tag, m3tag = root.findall('part/measure') self.assertEqual({staff.text for staff in m1tag.findall('note/staff')}, {'1'}) self.assertEqual({staff.text for staff in m2tag.findall('note/staff')}, {'2'}) self.assertEqual({staff.text for staff in m3tag.findall('note/staff')}, {'1'})
def fbFeatureExtraction(): exampleFB = converter.parse('ismir2011_fb_example1b.xml') fe1 = features.jSymbolic.\ PitchClassDistributionFeature(exampleFB) print(fe1.extract().vector) # [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.6666666666666666, 0.0, 0.0, 1.0, 0.0, 0.0] n1 = exampleFB.parts[0][1][5] n1.expressions.append(expressions.Turn()) x = expressions.realizeOrnaments(n1) n2 = exampleFB.parts[0][2][2] n2.expressions.append(expressions.Mordent()) y = expressions.realizeOrnaments(n2) exampleFB.parts[0][1].elements = [exampleFB.parts[0][1][4]] exampleFB.parts[0][1].append(x) exampleFB.parts[0][2].elements = [ exampleFB.parts[0][2][0], exampleFB.parts[0][2][1] ] exampleFB.parts[0][2].append(y) fb1 = figuredBass.realizer.figuredBassFromStream(exampleFB.parts[1]) #realization = fb1.realize() sol1 = fb1.generateRandomRealization() exampleFBOut = stream.Score() exampleFBOut.insert(0, exampleFB.parts[0]) exampleFBOut.insert(0, sol1.parts[0]) exampleFBOut.insert(0, sol1.parts[1]) fe1.setData(exampleFBOut) print(fe1.extract().vector)
def generateAllRealizations(self): ''' Generates all unique realizations as a :class:`~music21.stream.Score`. .. warning:: This method is unoptimized, and may take a prohibitive amount of time for a Realization which has more than 100 solutions. ''' allSols = stream.Score() possibilityProgressions = self.getAllPossibilityProgressions() if not possibilityProgressions: raise FiguredBassLineException('Zero solutions') sol0 = self.generateRealizationFromPossibilityProgression( possibilityProgressions[0]) for music21Part in sol0: allSols.append(music21Part) for possibIndex in range(1, len(possibilityProgressions)): solX = self.generateRealizationFromPossibilityProgression( possibilityProgressions[possibIndex]) for partIndex in range(len(solX)): for music21Measure in solX[partIndex]: allSols[partIndex].append(music21Measure) return allSols
def evaluate(self, chorale, subdivision): # init key analyzer # we must add measures by hand for the case when we are parsing midi files chorale_with_measures = stream.Score() for part in chorale.parts: chorale_with_measures.append(part.makeMeasures()) ka = analysis.floatingKey.KeyAnalyzer(chorale_with_measures) ka.windowSize = self.window_size res = ka.run() measure_offset_map = chorale_with_measures.parts.measureOffsetMap() length = int(chorale.duration.quarterLength * subdivision) # in 16th notes key_signatures = np.zeros((length, )) measure_index = -1 for time_index in range(length): beat_index = time_index / subdivision if beat_index in measure_offset_map: measure_index += 1 # todo remove this trick: problem with the last measures... if measure_index == len(res): measure_index -= 1 key_signatures[time_index] = self.get_index( res[measure_index].sharps) return np.array(key_signatures, dtype=np.int32)
def testJoinPartStaffsB(self): ''' Gapful first PartStaff, ensure <backup> in second PartStaff correct ''' from music21 import layout from music21 import note s = stream.Score() ps1 = stream.PartStaff() ps1.insert(0, note.Note()) # Gap ps1.insert(3, note.Note()) ps2 = stream.PartStaff() ps2.insert(0, note.Note()) s.append(ps1) s.append(ps2) s.insert(0, layout.StaffGroup([ps1, ps2])) root = self.getET(s) notes = root.findall('.//note') forward = root.find('.//forward') backup = root.find('.//backup') amountToBackup = ( int(notes[0].find('duration').text) + int(forward.find('duration').text) + int(notes[1].find('duration').text) ) self.assertEqual(int(backup.find('duration').text), amountToBackup)
def findRetrogradeVoices(show=True): ''' the structure of the piece strongly suggests a retrograde solution (e.g., there is a cadence in m5 and five measures from the end and one at the exact center). This method tries all transpositions of one voice vs. the other and gives positive points to intervals of 3, 4, 5, 6, and 8 (incl. tritones, since they might be fixed w/ other voices; 4th is included since there could be a 3rd or 5th below it). ''' for transpose in [1, 2, -2, 3, -3, 4, -4]: for invert in [False, True]: qj1 = getQJ() qj2 = getQJ() if transpose != 1: transposeStreamDiatonic(qj2, transpose) if invert is True: qj2.invertDiatonic(qj2.flat.notesAndRests[0], inPlace=True) qj2 = reverse(qj2, makeNotation=False) qj = stream.Score() qj.insert(0, qj2.flat) qj.insert(0, qj1.flat) qjChords = qj.chordify() consScore = 0 totIntervals = 0 for n in qjChords.flat.notesAndRests: strength = getStrengthForNote(n) if n.isRest is True or len(n.pitches) < 2: thisScore = strength else: int1 = interval.Interval(n.pitches[0], n.pitches[1]) #print int1.generic.simpleUndirected if int1.generic.simpleUndirected in [1, 3, 4, 5]: thisScore = strength elif int1.generic.simpleUndirected == 6: # less good thisScore = strength / 2.0 else: thisScore = -2 * strength if n.duration.quarterLength < 2: thisScore = thisScore * n.duration.quarterLength else: thisScore = thisScore * 8 consScore += thisScore totIntervals += 1 n.lyric = str(thisScore) finalScore = int(100 * (consScore + 0.0) / totIntervals) qj.insert(0, qjChords.flat) qj2.flat.notesAndRests[0].addLyric('Trans: ' + str(transpose)) qj2.flat.notesAndRests[0].addLyric('Invert: ' + str(invert)) qj1.flat.notesAndRests[0].addLyric('Score: ' + str(finalScore)) if show == True: qj.show() else: if invert == True: invStr = "Invert" else: invStr = " " print str(transpose) + " " + invStr + " " + str(finalScore)
def systemScoreFromScore(self, scoreElement, scoreObj=None): ''' returns an :class:`~music21.stream.Score` object from a <score> tag. The Score object is not a standard music21 Score object which contains parts, then measures, then voices, but instead contains systems which optionally contain voices, which contain parts. No measures have yet been created. ''' if scoreObj is None: scoreObj = stream.Score() systemsList = scoreElement.findall('systems') if not systemsList: raise CapellaImportException( "Cannot find a <systems> tag in the <score> object") elif len(systemsList) > 1: raise CapellaImportException( "Found more than one <systems> tag in the <score> object, what does this mean?") systemsElement = systemsList[0] systemList = systemsElement.findall('system') if not systemList: raise CapellaImportException( 'Cannot find any <system> tags in the <systems> tag in the <score> object') for systemNumber, thisSystem in enumerate(systemList): systemObj = self.systemFromSystem(thisSystem) systemObj.systemNumber = systemNumber + 1 # 1 indexed, like musicians think scoreObj._appendCore(systemObj) scoreObj.elementsChanged() return scoreObj
def makeExampleScore(): r''' Makes example score for use in stream-to-timespan conversion docs. >>> score = timespans.makeExampleScore() >>> score.show('text') {0.0} <music21.stream.Part ...> {0.0} <music21.instrument.Instrument PartA: : > {0.0} <music21.stream.Measure 1 offset=0.0> {0.0} <music21.clef.BassClef> {0.0} <music21.meter.TimeSignature 2/4> {0.0} <music21.note.Note C> {1.0} <music21.note.Note D> {2.0} <music21.stream.Measure 2 offset=2.0> {0.0} <music21.note.Note E> {1.0} <music21.note.Note F> {4.0} <music21.stream.Measure 3 offset=4.0> {0.0} <music21.note.Note G> {1.0} <music21.note.Note A> {6.0} <music21.stream.Measure 4 offset=6.0> {0.0} <music21.note.Note B> {1.0} <music21.note.Note C> {2.0} <music21.bar.Barline style=final> {0.0} <music21.stream.Part ...> {0.0} <music21.instrument.Instrument PartB: : > {0.0} <music21.stream.Measure 1 offset=0.0> {0.0} <music21.clef.BassClef> {0.0} <music21.meter.TimeSignature 2/4> {0.0} <music21.note.Note C> {2.0} <music21.stream.Measure 2 offset=2.0> {0.0} <music21.note.Note G> {4.0} <music21.stream.Measure 3 offset=4.0> {0.0} <music21.note.Note E> {6.0} <music21.stream.Measure 4 offset=6.0> {0.0} <music21.note.Note D> {2.0} <music21.bar.Barline style=final> ''' from music21 import converter from music21 import stream streamA = converter.parse('tinynotation: 2/4 C4 D E F G A B C') streamB = converter.parse('tinynotation: 2/4 C2 G E D') streamA.makeMeasures(inPlace=True) streamB.makeMeasures(inPlace=True) partA = stream.Part() for x in streamA: partA.append(x) instrumentA = partA.getInstrument() instrumentA.partId = 'PartA' partA.insert(0, instrumentA) partB = stream.Part() for x in streamB: partB.append(x) instrumentB = partB.getInstrument() instrumentB.partId = 'PartB' partB.insert(0, instrumentB) score = stream.Score() score.insert(0, partA) score.insert(0, partB) return score
def __init__(self, time_sig, is_rand): self.time_sig = int(Fraction(time_sig) * 4) self.is_rand = is_rand self.score = stream.Score(id="Drums") self.score.insert(0, self.get_part('bass')) self.score.insert(0, self.get_part('mid')) self.score.insert(0, self.get_part('high')) self.score.chordify()
def create_midi(prediction_output, n): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] instruments = {} # create note and chord objects based on the values generated by the model for pattern in prediction_output: parts = pattern.split() _instrument, _notes, _duration, _offset = (parts[0], parts[1:-2], parts[-2], parts[-1]) if (_instrument not in instruments.keys()): instruments[_instrument] = [] _chord, _note = None, None # set duration of chords and notes if (isinstance(_notes, list) and len(_notes) > 1): _chord = chord.Chord(_notes) _chord = set_duration(_chord, _duration, _offset) instruments[_instrument].append(_chord) elif (isinstance(_notes, list) and len(_notes) == 1): _note = note.Note(_notes[0]) _note = set_duration(_note, _duration, _offset) instruments[_instrument].append(_note) midi_stream = stream.Score() for instrument_key in instruments.keys(): current_instrument = None try: current_instrument = instrument.instrumentFromMidiProgram( int(instrument_key)) except: print('%s: WARNING: invalid instrument!' % instrument_key) current_instrument = get_random_instrument() print('%s: INFO: selected random!' % current_instrument.instrumentName) current_part = stream.Part() # only one part is supported now current_timesignature = meter.TimeSignature('3/4') current_part.append(current_timesignature) current_part.append(current_instrument) measure_count = 0 for pitch in instruments[instrument_key]: current_part.append(pitch) midi_stream.append(current_part) if args.optimizer: for p in midi_stream.parts: p.makeMeasures(inPlace=True) if args.optimizer: midi_stream.makeNotation(inPlace=True) names = [s.lower() for s in sample(RandomWords().get_random_words(), 2)] midi_stream.write('midi', fp='%s_%s.mid' % tuple(names))
def notesAndDurationsToStream(notesList, durationList, scNotes=None, removeRestsAtBeginning=True, qle=None): ''' take a list of :class:`~music21.note.Note` objects or rests and an equally long list of how long each ones lasts in terms of samples and returns a Stream using the information from quarterLengthEstimation and quantizeDurations. returns a :class:`~music21.stream.Score` object, containing a metadata object and a single :class:`~music21.stream.Part` object, which in turn contains the notes, etc. Does not run :meth:`~music21.stream.Stream.makeNotation` on the Score. >>> durationList = [20, 19, 10, 30, 6, 21] >>> n = note.Note >>> noteList = [n('C#4'), n('D5'), n('B4'), n('F#5'), n('C5'), note.Rest()] >>> s,lengthPart = audioSearch.notesAndDurationsToStream(noteList, durationList) >>> s.show('text') {0.0} <music21.metadata.Metadata object at ...> {0.0} <music21.stream.Part ...> {0.0} <music21.note.Note C#> {1.0} <music21.note.Note D> {2.0} <music21.note.Note B> {2.5} <music21.note.Note F#> {4.0} <music21.note.Note C> {4.25} <music21.note.Rest rest> ''' # rounding lengths p2 = stream.Part() # If the score is available, the quarter estimation is better: # It could take into account the changes of tempo during the song, but it # would take more processing time if scNotes != None: fe = features.native.MostCommonNoteQuarterLength(scNotes) mostCommon = fe.extract().vector[0] qle = quarterLengthEstimation(durationList, mostCommon) elif scNotes is None: # this is for the transcriber qle = quarterLengthEstimation(durationList) for i in range(len(durationList)): actualDuration = quantizeDuration(durationList[i] / qle) notesList[i].quarterLength = actualDuration if not (removeRestsAtBeginning and (notesList[i].name == "rest")): p2.append(notesList[i]) removeRestsAtBeginning = False sc = stream.Score() sc.metadata = metadata.Metadata() sc.metadata.title = 'Automatic Music21 Transcription' sc.insert(0, p2) if scNotes is None: # Case transcriber return sc, len(p2) else: #case follower return sc,qle
def testMeasurePadding(self): s = stream.Score([converter.parse('tinyNotation: 4/4 c4')]) s[stream.Measure].first().paddingLeft = 2.0 s[stream.Measure].first().paddingRight = 1.0 tree = self.getET(s) self.assertEqual(len(tree.findall('.//rest')), 0) s[stream.Measure].first().paddingLeft = 1.0 tree = self.getET(s) self.assertEqual(len(tree.findall('.//rest')), 1)
def filterExtraMeasuresWithoutParts(self, omr): sco = stream.Score() s = stream.Part() for measure in omr.getElementsByClass(stream.Measure): if measure.duration.quarterLength > 0: s.append(measure) sco.append(s) return sco
def wrap_query_as_piece(query): piece = stream.Score() part = stream.Part() part.partName = "query" measures = converter.parse("tinynotation: %s" % query) for measure in measures: part.append(measure) piece.append(part) return piece
def get_score(self): part = stream.Part() count = 1 for p in self.progs: part.append(self.bass_bar(chord=p, num_id=count)) count += 1 score = stream.Score() score.insert(instrument.Bass()) score.insert(0, part) return score
def getSymbol(self, measure): pa = PipelineAlignment() s = stream.Score() p = stream.Part() # if measure.hasVoices(): # measure=measure.getElementsByClass(stream.Voice)[0] p.append(measure) s.append(p) symbol = pa.filterOMR(s) return symbol[1]
def test_indexer_init_3(self): # The first six tests ensure __init__() accepts the six valid types of "score" argument. # 3: stream.Score, given a Score class TestIndexer(indexer.Indexer): required_score_type = 'stream.Score' test_score = stream.Score([stream.Part(), stream.Part()]) test_ind = TestIndexer(test_score) self.assertTrue(isinstance(test_ind._score, stream.Score)) self.assertSequenceEqual(test_score, test_ind._score)
def test_indexer_init_2b(self): # The first six tests ensure __init__() accepts the six valid types of "score" argument. # 2b: stream.Part, given a Score (no parts in the Score) class TestIndexer(indexer.Indexer): required_score_type = 'stream.Part' test_score = stream.Score([]) test_ind = TestIndexer(test_score) self.assertTrue(isinstance(test_ind._score, list)) self.assertSequenceEqual(test_score, test_ind._score)
def make_music(model_file): with open(model_file) as f: model = json.load(f) chain = Chain.from_json(model) score = stream.Score() soprano_part = stream.Part() soprano_part.insert(0, instrument.Soprano()) alto_part = stream.Part() alto_part.insert(0, instrument.Alto()) tenor_part = stream.Part() tenor_part.insert(0, instrument.Tenor()) bass_part = stream.Part() bass_part.insert(0, instrument.Bass()) counter = { Voice.Soprano: Decimal(0.), Voice.Alto: Decimal(0.), Voice.Tenor: Decimal(0.), Voice.Bass: Decimal(0.), } current_state = { Voice.Soprano: None, Voice.Alto: None, Voice.Tenor: None, Voice.Bass: None, } parts = { Voice.Soprano: soprano_part, Voice.Alto: alto_part, Voice.Tenor: tenor_part, Voice.Bass: bass_part, } for state in chain.walk(): S, A, T, B = make_tuple(state) current_state[Voice.Soprano] = S current_state[Voice.Alto] = A current_state[Voice.Tenor] = T current_state[Voice.Bass] = B min_value = min(counter.values()) min_voices = [k for k in counter if counter[k] == min_value] for voice in min_voices: pitch, d = current_state[voice] if pitch == 'rest': n = note.Rest(duration=duration.Duration(d)) else: n = note.Note(pitch, duration=duration.Duration(d)) parts[voice].append(n) counter[voice] += Decimal(d) for k, v in parts.items(): score.insert(Voice.order(k), v) score.show()
def replaceMeasure(self, omr, mybar, mIndex): s = stream.Score() p = stream.Part() for i in range(len(omr.parts[0].getElementsByClass(stream.Measure))): bar = omr.parts[0].getElementsByClass(stream.Measure)[i] if i == mIndex: p.append(mybar.getElementsByClass(stream.Measure)[0]) else: p.append(bar) s.append(p) return s