Ejemplo n.º 1
0
    def __init__(self, path):
        self.path = path
        self.nfft = 2048
        self.overlap = 0.5
        self.hop_length = int(self.nfft * (1 - self.overlap))
        self.n_bins = 72
        self.mag_exp = 4
        self.pre_post_max = 6
        self.threshold = -71

        self.audio_sample, self.sr = self.load()
        self.cqt = self.compute_cqt()
        self.thresh_cqt = self.compute_thresholded_cqt(self.cqt)

        self.onsets = self.compute_onset(self.thresh_cqt)

        self.tempo, self.beats, self.mm = self.estimate_tempo()

        self.music_info = np.array([
            self.estimate_pitch_and_notes(i)
            for i in range(len(self.onsets[1]) - 1)
        ])
        self.note_info = list(self.music_info[:, 2])

        self.stream = Stream()
Ejemplo n.º 2
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict() # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0: # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]: # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = { note.pitch.midi : note for note in notes }
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Ejemplo n.º 3
0
 def raiseLeadingTone(self, stream1, minorScale):
     '''Given a stream of notes and a minor scale object, returns a new
     stream that raises all the leading tones of the original stream. Also
     raises the sixth if applicable to avoid augmented intervals.'''
     notes2 = stream1.notes[:]
     stream2 = Stream(notes2)
     sixth = minorScale.pitchFromScaleDegree(6).name
     seventh = minorScale.pitchFromScaleDegree(7).name
     tonic = minorScale.getTonic().name
     for i in range(len(stream1.notes)-2):
         note1 = stream1.notes[i]
         note2 = stream1.notes[i+1]
         note3 = stream1.notes[i+2]
         if note1 is not None and note2 is not None and note3 is not None:
             if note1.name == sixth and note2.name == seventh and note3.name == tonic:
                 newNote1 = interval.transposeNote(note1, "A1")
                 newNote2 = interval.transposeNote(note2, "A1")
                 stream2.notes[i] = newNote1
                 stream2.notes[i+1] = newNote2
     for i in range(len(stream1.notes)-1):
         note1 = stream1.notes[i]
         note2 = stream1.notes[i+1]
         if note1 is not None and note2 is not None:
             if note1.name == seventh and note2.name == tonic:
                 newNote = interval.transposeNote(note1, "A1")
                 stream2.notes[i] = newNote
     return stream2
Ejemplo n.º 4
0
 def generateFirstSpecies(self, stream1, minorScale):
     '''Given a stream (the cantus firmus) and the stream's key in the
     form of a MinorScale object, generates a stream of first species
     counterpoint that follows the rules of 21M.301.'''
     # DOES NOT YET CHECK FOR TOO MANY THIRDS/SIXTHS IN A ROW,
     # DOES NOT YET RAISE LEADING TONES, AND DOES NOT CHECK FOR NOODLING.
     stream2 = Stream([])
     firstNote = stream1.notes[0]
     choices = [interval.transposeNote(firstNote, "P1"),\
                interval.transposeNote(firstNote, "P5"),\
                interval.transposeNote(firstNote, "P8")]
     note1 = random.choice(choices)
     note1.duration = firstNote.duration
     stream2.append(note1)
     afterLeap = False
     for i in range(1, len(stream1.notes)):
         prevFirmus = stream1.notes[i-1]
         currFirmus = stream1.notes[i]
         prevNote = stream2.notes[i-1]
         choices = self.generateValidNotes(prevFirmus, currFirmus, prevNote, afterLeap, minorScale)
         if len(choices) == 0:
             raise ModalCounterpointException("Sorry, please try again")
         newNote = random.choice(choices)
         newNote.duration = currFirmus.duration
         stream2.append(newNote)
         int = interval.notesToInterval(prevNote, newNote)
         if int.generic.undirected > 3: afterLeap = True
         else: afterLeap = False
     return stream2
Ejemplo n.º 5
0
def decode_score(encoding, num_measures, ts, image=False):
    score = Stream()
    score.timeSignature = TimeSignature(ts)
    steps_per_measure = len(encoding) / num_measures
    measure_ind = 0
    while measure_ind < num_measures:
        start_beat = int(measure_ind * steps_per_measure)
        end_beat = int((measure_ind + 1) * steps_per_measure)
        measure = Measure()
        for beat_ind in range(start_beat, end_beat):
            if image:
                played_pitches = np.nonzero(encoding[beat_ind])[0]
            else:
                played_pitches = np.nonzero(encoding[beat_ind])
            if len(played_pitches) == 0:
                measure.append(Rest(quarterLength=4.0 / GRANULARITY))
            else:
                played_notes = [
                    midi_to_note(int(pitch + MIN_PITCH))
                    for pitch in played_pitches
                ]
                chord = Chord(played_notes, quarterLength=4.0 / GRANULARITY)
                measure.append(chord)
        score.append(measure)
        measure_ind += 1
    return score
 def getChordSequence(self):
     s = Stream()
     for part in self.getParts():  # type: Part
         for elt in part.recurse().getElementsByClass(
                 ChordSymbol):  # type: ChordSymbol
             s.insert(elt.getOffsetInHierarchy(part), copy(elt))
     return s
def createScalePart():
    c = QuarterNote(); c.step = "C"
    d = QuarterNote(); d.step = "D"
    # etc
    b = QuarterNote(); b.step = "B"
    
    s1 = Stream()
    s1.append([c, d, b])
    print(s1.lily)
    lS1 = LilyString("{" + s1.lily + "}")
    lS1.showPNG()
Ejemplo n.º 8
0
def song_notes(score):
    # For some reason Stream([n for n in score.flat.notes]) accumulate
    # notes in the wrong order, so we append them explicitly.

    stream = Stream()
    for n in score.flat.notes.stripTies():
        if n.isChord:
            stream.append(n[-1])
        else:
            stream.append(n)
    return stream
Ejemplo n.º 9
0
    def create_note_stream(self, notes_sequence):
        """
        Creates a music21.stream.Stream object to which notes are added.

        :param notes_sequence: sequence of notes to add in a stream.
        :return: a Stream of Note objects.
        """
        notes_arr = self.get_notes_from_sequence(notes_sequence)
        stream = Stream()
        for note in notes_arr:
            stream.append(note)
        return stream
Ejemplo n.º 10
0
    def create_note_stream(self, notes_sequence):
        """
        Creates a music21.stream.Stream object to which notes are added.

        :param notes_sequence: sequence of notes to add in a stream.
        :return: a Stream of Note objects.
        """
        notes_arr = self.get_notes_from_sequence(notes_sequence)
        stream = Stream()
        for note in notes_arr:
            stream.append(note)
        return stream
Ejemplo n.º 11
0
def generate_notes_in_batch(note_params_df,
                            output_dir,
                            audio_format='flac',
                            sample_rate=44100):
    """
    Generates a batch of single note samples from the given table of parameters.

    `note_params_df` - a Pandas Dataframe with columns:
    `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note.
    `output_dir` - output directory for the MIDI files

    Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a
    """
    os.makedirs(output_dir, exist_ok=True)

    fs = FluidSynth(sample_rate=sample_rate)

    stream = Stream()

    for i, row in note_params_df.iterrows():
        stream.append(MetronomeMark(number=row['tempo']))
        stream.append(make_instrument(int(row['midi_instrument'])))
        duration = row['duration']
        stream.append(
            chord_with_volume(
                Chord([
                    Note(midi=int(row['midi_number']),
                         duration=Duration(duration))
                ]), row['volume']))
        stream.append(Rest(duration=Duration(2 * duration)))

    midi_file = '{0}/all_samples.midi'.format(output_dir)
    audio_file_stereo = '{0}/all_samples_stereo.{1}'.format(
        output_dir, audio_format)
    audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format)
    audio_index_file = '{0}/all_samples_index.csv'.format(output_dir)

    # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!!
    # The parts should be split according to an index.
    audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate)
    audio_index.to_csv(audio_index_file)

    write_midi(stream, midi_file)

    fs.midi_to_audio(midi_file, audio_file_stereo)

    convert_to_mono(audio_file_stereo, audio_file)
    os.remove(audio_file_stereo)

    x, sample_rate = sf.read(audio_file)

    parts = split_audio_to_parts(x, sample_rate, audio_index)
    store_parts_to_files(parts, sample_rate, output_dir, audio_format)
Ejemplo n.º 12
0
 def _getStream(self):
     if self.spineCollection is None:
         raise HumdrumException("parse lines first!")
     elif self.spineCollection.spines is None:
         raise HumdrumException("really? not a single spine in your data? um, not my problem!")
     elif self.spineCollection.spines[0].music21Objects is None:
         raise HumdrumException("okay, you got at least one spine, but it aint got nothing in it; have you thought of taking up kindergarten teaching?")
     else:
         masterStream = Stream()
         for thisSpine in self.spineCollection:
             thisSpine.music21Objects.id = "spine_" + str(thisSpine.id)
             masterStream.insert(thisSpine.music21Objects)
         return masterStream
Ejemplo n.º 13
0
def append_stream(original_stream: stream.Stream, *streams: stream.Stream):
    """

    Appends all elements of one or more streams at the end of a stream.

    Args:
        original_stream: The stream to append to.
        *streams: Any number of streams to be appended to the original stream.
    """
    for stream_ in streams:
        h_offset = original_stream.highestTime
        for element in stream_.elements:
            original_stream.insert(element.offset + h_offset, element)
Ejemplo n.º 14
0
def compose_repository_song(repo_data):
    vprint('Composing a song using the data from your Git Repository...')
    song = Stream()

    scale = MajorScale('%s4' % random.choice('ABCDEFG'))
    print('Using Scale: %s' % scale)
    clips, phrases = phrasify(repo_data, scale)

    for sha in repo_data:
        for clip in phrases[hash(sha)]:
            for note in clips[clip]:
                song.append(note)

    return song
Ejemplo n.º 15
0
    def _realizeM21Sequence(self, chords):
        s = Stream()

        offset = 0

        # step through the template and add notes to stream
        for chord in chords:
            duration = chord.getDuration()
            for pitch in chord.getPitchSet():
                n = Note(pitch)
                n.duration.quarterLength = duration
                s.insert(offset, n)
            offset += duration
        return s
Ejemplo n.º 16
0
    def _realizeM21Sequence(self, notes):
        s = Stream()

        offset = 0

        # step through the backbone notes and add notes to stream
        for note in notes:
            duration = 1
            pitch = note.getPitch()
            n = m21Note.Note(pitch)
            n.duration.quarterLength = duration
            s.insert(offset, n)
            offset += duration
        return s
def melodic_arch(score: stream.Stream, phrase_length: int):
    total_phrases = 0  #total number of phrases
    sum_pitch_height = [
        0 for i in range(phrase_length)
    ]  #sum of heights for each note position, measured in semitones above middle C
    phrase = []  #phrase is empty at beginning of piece
    for n in score.recurse().notesAndRests:
        if isinstance(n, chord.Chord):
            n = max(n)
        if n.tie and (n.tie.type == 'stop' or n.tie.type
                      == 'continue'):  #do not count a tied note more than once
            continue
        if n.isRest or (len(n.expressions) != 0
                        and 'fermata' == n.expressions[0].name):
            if len(phrase) == phrase_length:
                total_phrases += 1
                for i in range(phrase_length):
                    sum_pitch_height[
                        i] += phrase[i].pitch.ps - 60  #60 is middle C
            phrase = []
        else:
            phrase.append(n)
    #if reached end of score, check the last phrase is of desired length
    if len(phrase) == phrase_length:
        total_phrases += 1
        for i in range(phrase_length):
            sum_pitch_height[i] += phrase[i].pitch.ps - 60

    if total_phrases == 0:
        return None
    return [height / total_phrases for height in sum_pitch_height]
def avg_phrase_length(score: stream.Stream):
    total_phrases = 0  #total number of phrases
    length = 0  #phrase is empty at beginning of piece
    phrase_lengths = []  #all the phrase lengths
    for n in score.recurse().notesAndRests:
        if isinstance(n, chord.Chord):
            n = max(n)
        if n.tie and (n.tie.type == 'stop' or n.tie.type
                      == 'continue'):  #do not count a tied note more than once
            continue
        if n.isRest or (len(n.expressions) != 0
                        and 'fermata' == n.expressions[0].name):
            phrase_lengths.append(length)
            total_phrases += 1
            length = 0
        else:
            length += 1
    #reached end
    if length != 0:
        phrase_lengths.append(length)
        total_phrases += 1

    if total_phrases == 0:
        return None
    return sum(phrase_lengths) / total_phrases
Ejemplo n.º 19
0
def main():
    parser = get_cmd_line_parser(description=__doc__)
    ParserArguments.filename(parser)
    ParserArguments.tempo(parser)
    ParserArguments.framerate(parser)
    ParserArguments.set_defaults(parser)
    ParserArguments.best(parser)
    args = parser.parse_args()
    defaults.framerate = args.framerate

    song = Stream()

    roots = 'ABCDEFG'
    scales = [scale.MajorScale, scale.MinorScale,
              scale.WholeToneScale, scale.ChromaticScale]

    print('Choosing a random scale from Major, Minor, Whole Tone, Chromatic.')
    rscale = random.choice(scales)(Pitch(random.choice(roots)))
    print('Using: %s' % rscale.name)

    print('Generating a score...')
    random_note_count = 50
    random_note_speeds = [0.5, 1]
    print('100 Random 1/8th and 1/4th notes in rapid succession...')
    for i in range(random_note_count):
        note = Note(random.choice(rscale.pitches))
        note.duration.quarterLength = random.choice(random_note_speeds)
        song.append(note)

    scale_practice_count = 4
    print('Do the scale up and down a few times... maybe %s' %
          scale_practice_count)
    rev = rscale.pitches[:]
    rev.reverse()
    updown_scale = rscale.pitches[:]
    updown_scale.extend(rev[1:-1])
    print('updown scale: %s' % updown_scale)
    for count, pitch in enumerate(cycle(updown_scale)):
        print(' note %s, %s' % (count, pitch))
        song.append(Note(pitch))
        if count >= scale_practice_count * len(updown_scale):
            break

    print('Composition finished:')
    song.show('txt')

    if args.best:
        print('Audifying the song to file "{}"...')
        wave = audify_to_file(song, args.tempo, args.filename, verbose=True)
    else:
        wave = audify_basic(song, args.tempo, verbose=True)
        print('Writing Song to file "{}"...'.format(args.filename))
        with wav_file_context(args.filename) as fout:
            fout.write_frames(wave.frames)

    return 0
Ejemplo n.º 20
0
def value_list_to_midi(value_list):
    length = value_list.get(LENGTH)
    num_of_parts = value_list.get(NUM_OF_PARTS)
    parts = []
    for _ in range(num_of_parts):
        parts.append(get_part(length, value_list))

    stream = Stream(parts)
    return stream
Ejemplo n.º 21
0
def playSound(n, speedfactor):
    if has_simpleaudio:
        soundof([n], n.duration / speedfactor)
    else:
        try:
            s = Stream()
            if n.isChord: n = n.chord21
            else: s.append(n.note21)
            sp = StreamPlayer(s)
            sp.play()
            # if n.isChord:
            #     s.append(n)
            # else:
            #     nn = Note(n.nameWithOctave)
            #     s.append(nn)
            # sp = StreamPlayer(s)
            # sp.play()
        except:
            print('Unable to play sounds, add -z option')
        return
Ejemplo n.º 22
0
 def xtestColorCapuaFicta(self):
     from music21.note import Note
     from music21.stream import Stream
     (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
     (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
     n11.duration.type = "quarter"
     n11.name = "D"
     n12.duration.type = "quarter"
     n12.name = "E"
     n13.duration.type = "quarter"
     n13.name = "F"
     n14.duration.type = "quarter"
     n14.name = "G"
 
     n21.name = "C"
     n21.duration.type = "quarter"
     n22.name = "C"
     n22.duration.type = "quarter"
     n23.name = "B"
     n23.octave = 3
     n23.duration.type = "quarter"
     n24.name = "C"
     n24.duration.type = "quarter"
 
     stream1 = Stream()
     stream1.append([n11, n12, n13, n14])
     stream2 = Stream()
     stream2.append([n21, n22, n23, n24])
 
 
     ### Need twoStreamComparer to Work
     evaluateWithoutFicta(stream1, stream2)
     assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
Ejemplo n.º 23
0
def colorCapuaFictaTest():
    (n11, n12, n13, n14) = (Note(), Note(), Note(), Note())
    (n21, n22, n23, n24) = (Note(), Note(), Note(), Note())
    n11.duration.type = "quarter"
    n11.name = "D"
    n12.duration.type = "quarter"
    n12.name = "E"
    n13.duration.type = "quarter"
    n13.name = "F"
    n14.duration.type = "quarter"
    n14.name = "G"

    n21.name = "C"
    n21.duration.type = "quarter"
    n22.name = "C"
    n22.duration.type = "quarter"
    n23.name = "B"
    n23.octave = 3
    n23.duration.type = "quarter"
    n24.name = "C"
    n24.duration.type = "quarter"

    stream1 = Stream()
    stream1.append([n11, n12, n13, n14])
    stream2 = Stream()
    stream2.append([n21, n22, n23, n24])

    ### Need twoStreamComparer to Work
    capua.evaluateWithoutFicta(stream1, stream2)
    assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
    capua.evaluateCapuaTwoStreams(stream1, stream2)

    capua.colorCapuaFicta(stream1, stream2, "both")
    assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name

    assert n11.editorial.color == "yellow"
    assert n12.editorial.color == "yellow"
    assert n13.editorial.color == "green"
    assert n14.editorial.color == "yellow"

    assert n11.editorial.harmonicInterval.name == "M2"
    assert n21.editorial.harmonicInterval.name == "M2"

    assert n13.editorial.harmonicInterval.name == "P5"
    assert n13.editorial.misc["noFictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaInterval"].name == "P5"
    assert n13.editorial.color == "green"
    assert stream1.lily.strip(
    ) == r'''\clef "treble" \color "yellow" d'4 \color "yellow" e'4 \ficta \color "green" fis'!4 \color "yellow" g'4'''
Ejemplo n.º 24
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict(
    )  # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0:  # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]:  # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = {note.pitch.midi: note for note in notes}
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Ejemplo n.º 25
0
def show_sequence(chord_sequence):
    stream = Stream()

    chord_names = [chord.standard_name for chord in chord_sequence]

    print(chord_names)
    chord_sequence = [chord_sequence[0],
                      *chord_sequence]  # to solve a music21 problem

    for extended_chord in chord_sequence:
        chord = Chord(notes=extended_chord.components, type='whole')
        stream.append(chord)

    stream.show()
    stream.show('midi')
Ejemplo n.º 26
0
def testVerify():
    s1 = Stream(converter.parse("tinyNotation: d1 a g f e d f e d'"))
    s2 = Stream(converter.parse("tinyNotation: d'1 c' b- a g f a c'# d'"))
    biggerStream = Stream()
    biggerStream.append(stream.Part(s1))
    biggerStream.append(stream.Part(s2))
    #biggerStream.show()

    verifyCounterpointVerbose(s1, s2)
def nonharmonic_notes(score: stream.Stream):
    key_sig = score.analyze('key')
    certainty = key_sig.tonalCertainty()
    notes_within_key = [p.name for p in key_sig.pitches]
    for pitch in key_sig.pitches:
        notes_within_key.extend(
            [p.name for p in pitch.getAllCommonEnharmonics()])
    total_notes = 0  #total number of notes
    num_nonharmonic = 0  #total number of nonharmonic notes
    for n in score.recurse().notes:
        if isinstance(n, chord.Chord):
            n = max(n)
        if n.tie and (n.tie.type == 'stop' or n.tie.type
                      == 'continue'):  #do not count a tied note more than once
            continue
        else:
            if n.pitch.name not in notes_within_key:
                num_nonharmonic += 1
            total_notes += 1

    if total_notes == 0:
        return None

    return (certainty, 1 - num_nonharmonic / total_notes)
Ejemplo n.º 28
0
    def __init__(self, id, eventList = None):
        self.id = id
        if eventList is None:
            eventList = []
        for event in eventList:
            event.spineId = id
        
        self.eventList = eventList
        self.music21Objects = Stream()
        self.beginningPosition = 0
        self.endingPosition = 0
        self.upstream = []
        self.downstream = []

        self._spineCollection = None
        self._spineType = None
Ejemplo n.º 29
0
    def testColorCapuaFicta(self):
        from music21.note import Note
        from music21.stream import Stream

        (n11, n12, n13, n14) = (Note('D'), Note('E'), Note('F'), Note('G'))
        (n21, n22, n23, n24) = (Note('C'), Note('C'), Note('B3'), Note('C'))

        stream1 = Stream()
        stream1.append([n11, n12, n13, n14])
        stream2 = Stream()
        stream2.append([n21, n22, n23, n24])

        # Need twoStreamComparer to Work
        evaluateWithoutFicta(stream1, stream2)
        assert n13.editorial.harmonicInterval.name == 'd5', n13.editorial.harmonicInterval.name
        evaluateCapuaTwoStreams(stream1, stream2)

        colorCapuaFicta(stream1, stream2, 'both')
        assert n13.editorial.harmonicInterval.name == 'P5', n13.editorial.harmonicInterval.name
Ejemplo n.º 30
0
def test():
    from music21.stream import Stream

    n1 = music21.note.Note()
    n1.name = "E"
    n1.duration.type = "half"

    n3 = music21.note.Note()
    n3.name = "D"
    n3.duration.type = "half"

    n2 = music21.note.Note()
    n2.name = "C#"
    n2.octave = 5
    n2.duration.type = "half"

    n4 = n3.clone()
    n4.octave = 5

    st1 = Stream()
    st2 = Stream()
    st1.append([n1, n3])
    st2.append([n2, n4])

    staff1 = LilyStaff()
    staff1.appendElement(st1)
    staff2 = LilyStaff()
    staff2.appendElement(st2)
    vs1 = LilyVoiceSection(staff2, staff1)
    vs1.prependTimeSignature("2/2")
    isStaff2 = vs1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in Voice Section should be staff2"

    s1 = LilyScore(vs1, LilyLayout(), LilyMidi())
    lf1 = LilyFile(s1)
    isStaff2 = lf1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in File should be staff2"

    print(lf1)
    if lf1:
        lf1.showPNGandPlayMIDI()
    print(lf1.midiFilename)
Ejemplo n.º 31
0
def colorCapuaFictaTest():
    (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
    (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
    n11.duration.type = "quarter"
    n11.name = "D"
    n12.duration.type = "quarter"
    n12.name = "E"
    n13.duration.type = "quarter"
    n13.name = "F"
    n14.duration.type = "quarter"
    n14.name = "G"

    n21.name = "C"
    n21.duration.type = "quarter"
    n22.name = "C"
    n22.duration.type = "quarter"
    n23.name = "B"
    n23.octave = 3
    n23.duration.type = "quarter"
    n24.name = "C"
    n24.duration.type = "quarter"

    stream1 = Stream()
    stream1.append([n11, n12, n13, n14])
    stream2 = Stream()
    stream2.append([n21, n22, n23, n24])


    ### Need twoStreamComparer to Work
    capua.evaluateWithoutFicta(stream1, stream2)
    assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
    capua.evaluateCapuaTwoStreams(stream1, stream2)

    capua.colorCapuaFicta(stream1, stream2, "both")
    assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name

    assert n11.editorial.color == "yellow"
    assert n12.editorial.color == "yellow"
    assert n13.editorial.color == "green"
    assert n14.editorial.color == "yellow"

    assert n11.editorial.harmonicInterval.name == "M2"
    assert n21.editorial.harmonicInterval.name == "M2"

    assert n13.editorial.harmonicInterval.name == "P5"
    assert n13.editorial.misc["noFictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaInterval"].name == "P5"
    assert n13.editorial.color == "green"
    assert stream1.lily.strip() == r'''\clef "treble" \color "yellow" d'4 \color "yellow" e'4 \ficta \color "green" fis'!4 \color "yellow" g'4'''
def get_note_lengths(score: stream.Stream):
    note_lengths = dict()
    note_count = 0.0
    for n in score.recurse().notesAndRests:
        if isinstance(n, chord.Chord):
            n = max(n)
        if n.quarterLength not in note_lengths:
            note_lengths[n.quarterLength] = 1
        else:
            note_lengths[n.quarterLength] += 1
        note_count += 1.0
    i = 0
    vector = []
    while i < 4:
        i += 0.25
        if i in note_lengths:
            vector.append(note_lengths[i] / note_count)
        else:
            vector.append(0)

    return vector
Ejemplo n.º 33
0
 def testColorCapuaFicta(self):
     from music21.note import Note
     from music21.stream import Stream
     (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
     (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
     n11.duration.type = "quarter"
     n11.name = "D"
     n12.duration.type = "quarter"
     n12.name = "E"
     n13.duration.type = "quarter"
     n13.name = "F"
     n14.duration.type = "quarter"
     n14.name = "G"
 
     n21.name = "C"
     n21.duration.type = "quarter"
     n22.name = "C"
     n22.duration.type = "quarter"
     n23.name = "B"
     n23.octave = 3
     n23.duration.type = "quarter"
     n24.name = "C"
     n24.duration.type = "quarter"
 
     stream1 = Stream()
     stream1.append([n11, n12, n13, n14])
     stream2 = Stream()
     stream2.append([n21, n22, n23, n24])
 
 
     ### Need twoStreamComparer to Work
     evaluateWithoutFicta(stream1, stream2)
     assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
     evaluateCapuaTwoStreams(stream1, stream2)
 
     colorCapuaFicta(stream1, stream2, "both")
     assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name
Ejemplo n.º 34
0
def generate_single_note(midi_number,
                         midi_instrument=0,
                         volume=1.0,
                         duration=1.0,
                         tempo=120):
    """
    Generates a stream containing a single note with given parameters.
    midi_number - MIDI note number, 0 to 127
    midi_instrument - MIDI intrument number, 0 to 127
    duration - floating point number (in quarter note lengths)
    volume - 0.0 to 1.0
    tempo - number of quarter notes per minute (eg. 120)

    Note that there's a quarter note rest at the beginning and at the end.
    """
    return Stream([
        MetronomeMark(number=tempo),
        make_instrument(int(midi_instrument)),
        chord_with_volume(
            Chord([Note(midi=int(midi_number), duration=Duration(duration))]),
            volume)
    ])
Ejemplo n.º 35
0
def test():
    from music21.stream import Stream
    
    n1 = music21.note.Note()
    n1.name = "E"
    n1.duration.type = "half"
    
    n3 = music21.note.Note()
    n3.name = "D"
    n3.duration.type = "half"
    
    n2 = music21.note.Note()
    n2.name = "C#"
    n2.octave = 5
    n2.duration.type = "half"
    
    n4 = n3.clone()
    n4.octave = 5

    st1 = Stream()
    st2 = Stream()
    st1.append([n1, n3])
    st2.append([n2, n4])

    staff1 = LilyStaff()
    staff1.appendElement(st1)
    staff2 = LilyStaff()
    staff2.appendElement(st2)
    vs1 = LilyVoiceSection(staff2, staff1)
    vs1.prependTimeSignature("2/2")
    isStaff2 = vs1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in Voice Section should be staff2"
    
    s1 = LilyScore(vs1, LilyLayout(), LilyMidi() )
    lf1 = LilyFile(s1)
    isStaff2 = lf1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in File should be staff2"

    print(lf1)
    if lf1:
        lf1.showPNGandPlayMIDI()
    print(lf1.midiFilename)
Ejemplo n.º 36
0
    def testColorCapuaFicta(self):
        from music21.note import Note
        from music21.stream import Stream

        (n11, n12, n13, n14) = (Note('D'), Note('E'), Note('F'), Note('G'))
        (n21, n22, n23, n24) = (Note('C'), Note('C'), Note('B3'), Note('C'))

        stream1 = Stream()
        stream1.append([n11, n12, n13, n14])
        stream2 = Stream()
        stream2.append([n21, n22, n23, n24])


        ### Need twoStreamComparer to Work
        evaluateWithoutFicta(stream1, stream2)
        assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
        evaluateCapuaTwoStreams(stream1, stream2)

        colorCapuaFicta(stream1, stream2, "both")
        assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name
Ejemplo n.º 37
0
def test():
    stream = Stream()

    n1 = Note("C4", duration=Duration(1.5))
    n2 = Note("D4", duration=Duration(0.5))
    n3 = Note("E4")
    n4 = Note("F4")
    n5 = Note("G4")
    n6 = Note("A4")

    n7 = Note("C4")
    n8 = Note("D4").getGrace()
    n9 = Note("E4").getGrace()
    n10 = Note("F4")
    n11 = Note("G4")
    n12 = Note("A4", duration=Duration(0.5))
    n13 = Note("A4", duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 38
0
 def playsound(self, n):
     s = Stream() 
     if n.isChord: n = n.chord21
     else: s.append(n.note21)
     sp = StreamPlayer(s)
     sp.play()
def get_meter(score: stream.Stream):
    first_meter = score.recurse().getElementsByClass(meter.TimeSignature)
    if not first_meter:
        return (0, 0)
    else:
        return (first_meter[0].numerator, first_meter[0].denominator)
Ejemplo n.º 40
0
def build_midi(harmony, melody):
    chords_dict = get_chord_dicts()[1]

    song = []
    for i, eighth in enumerate(melody):
        # eighth = multi_hot_to_pianoroll(piano_roll[:midi_range]) # now make_music returns pianorolls already
        # chord = one_hot_to_index(piano_roll[-chord_classes:]) # TODO add chord to midi
        # print(f'EIGHTH: {eighth}') # DEBUG

        song_notes = []
        for note_ in eighth:
            note_name = NOTES[note_%12]
            note_octave = start_octave + note_//12 # starting from C2
            song_notes.append(note_name + str(note_octave))

        song_chords = []
        full_chord = chords_dict[harmony[i]]
        if full_chord != '<unk>':
            for chord_ in full_chord:
                chord_name = NOTES[chord_%12]
                song_chords.append(chord_name + str(start_octave-1))

        song.append(("REST" if len(song_notes) == 0 else song_notes, "REST" if len(song_chords) == 0 else song_chords))

    notes_score = Score()
    notes_score.append(instrument.Piano())
    chords_score = Score()
    chords_score.append(instrument.KeyboardInstrument())
    bass_score = Score()
    bass_score.append(instrument.ElectricBass())

    current_note_length = 0
    current_chord_length = 0

    for i, _ in enumerate(song):

        current_note_length += 0.5
        current_chord_length += 0.5

        # print(f'NOTE: {song[i][0]}\t\t\t- CHORD: {song[i][1]}')

        if i < len(song)-1:
            # note
            if song[i][0] != song[i+1][0]:
                if song[i][0] == "REST":
                    notes_score.append(note.Rest(duration=Duration(current_note_length)))
                else:
                    notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))
                current_note_length = 0

            # chord
            if song[i][1] != song[i+1][1] or current_chord_length == 4:
                if song[i][1] == "REST":
                    chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
                else:
                    chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))
                current_chord_length = 0
        else:
            # note
            if song[i][0] == "REST":
                notes_score.append(note.Rest(duration=Duration(current_note_length)))
            else:
                notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))

            # chord
            if song[i][1] == "REST":
                chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
            else:
                chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))

    song_stream = Stream()
    song_stream.insert(0, notes_score)
    song_stream.insert(0, chords_score)
    song_stream.insert(0, bass_score)

    if not os.path.exists('melodies'):
        os.makedirs('melodies')
    dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    song_stream.write('midi', fp=f'melodies/generated_{dt}.mid')
def read_vmf_string(vmf_string):
    """
    Reads VMF data from a string to a Score Stream.

    :param vmf_string: The contents of the VMF file as a string.
    :return: A music21 score instance containing the music in the VMF file.
    """

    parts_converted = {}

    vmf = json.loads(vmf_string)

    # create a score
    score = Score()

    # Get the initial data
    number_of_parts = vmf['header']['number_of_parts']
    number_of_voices = vmf['header']['number_of_voices']
    smallest_note = float(Fraction(vmf['header']['tick_value']))

    # create the parts and first measure.
    for voice_number in range(number_of_parts):
        part = Part()
        voice = Voice()

        part.append(voice)

        score.append(part)

    # get the body of the vmf
    body = vmf['body']

    part_number = 0

    # We do this because we want to do each part at a time.
    for voice_number in range(number_of_voices):
        # Get all ticks for a given part.
        part = [tick[voice_number] for tick in body]

        current_element = None
        current_voice = None

        # iterate over each tick
        for tick in part:

            if current_voice is None:
                # Get the parent part if it exists.
                try:
                    current_part = parts_converted[tick[-1]]

                    # add a new voice and write to it.
                    voice = Voice()

                    initial_key_signature = KeySignature(vmf['header']['key_signature']['0.0'])
                    initial_time_signature = TimeSignature(vmf['header']['time_signature']['0.0'])

                    voice.append(initial_key_signature)
                    voice.append(initial_time_signature)

                    current_part.append(voice)

                except KeyError:
                    # Add it to our dictionary otherwise.
                    current_part = score.parts[part_number]
                    part_number += 1

                    parts_converted[tick[-1]] = current_part

                # Get the last voice.
                current_voice = current_part.voices[-1]

            if tick[0] == 1:
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # Find how many notes to write. This will always be an int.
                number_of_notes = int(find_number_of_notes_in_tick(tick))

                if number_of_notes == 1:
                    # create a new note
                    current_element = Note(Pitch(pitchClass=tick[3], octave=tick[4]))
                else:
                    pitches = []

                    # create the pitches.
                    # From the beginning to the end of the pitch section of the tick.
                    for i in range(FIRST_PITCH_INDEX, FIRST_PITCH_INDEX + 2 * number_of_notes, 2):
                        pitch = Pitch(pitchClass=tick[i], octave=tick[i + 1])
                        pitches.append(pitch)

                    # create a new chord with these pitches.
                    current_element = Chord(pitches)


                # set the velocity of the note.
                current_element.volume.velocity = DynamicConverter.vmf_to_velocity(tick[DYNAMIC_BIT])
                # set the articulation
                if tick[ARTICULATION_BIT] != 0:
                    current_element.articulations.append(
                        ArticulationConverter.vmf_to_articulation(tick[ARTICULATION_BIT]))

                # set the value for this tick.
                current_element.quarterLength = smallest_note
            elif tick[0] == 2:
                # extend previous note
                current_element.quarterLength += smallest_note

            elif tick[0] == 0 and (isinstance(current_element, note.Note) or current_element is None):
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # create new rest
                current_element = Rest()

                # Set the value for this tick.
                current_element.quarterLength = smallest_note

            elif tick[0] == 0 and isinstance(current_element, note.Rest):
                # extend previous rest.
                current_element.quarterLength += smallest_note

        # Append the last element in progress.
        if current_element is not None:
            # check for precision and adjust
            rounded = round(current_element.quarterLength)
            if abs(current_element.quarterLength - rounded) < PRECISION:
                current_element.quarterLength = rounded

            # append to the part
            current_voice.append(current_element)

    # create the stream for time signature changes
    time_signature_stream = Stream()

    for offset, time_signature_str in sorted(vmf['header']['time_signature'].items()):
        time_signature = TimeSignature(time_signature_str)
        time_signature_stream.append(time_signature)
        time_signature_stream[-1].offset = float(offset)

    # finish up the file.
    for part in score.parts:
        for voice in part.voices:
            voice.makeMeasures(inPlace=True, meterStream=time_signature_stream)

        for offset, t in sorted(vmf['header']['tempo'].items()):
            mm = tempo.MetronomeMark(number=t, referent=note.Note(type='quarter'))
            voice.insert(offset, mm)

        for offset, ks in sorted(vmf['header']['key_signature'].items()):
            voice.insert(offset, KeySignature(ks))

    return score
synth_audio_converted = np.array([sample * 32767 for sample in synth_audio],
                                 dtype=np.int16)

# write to wav file
file = wave.open("output/" + filename + "_sine.wav", "wb")
file.setnchannels(1)
file.setsampwidth(2)  # 2 bytes = 16 bit
file.setframerate(fs)
file.writeframes(synth_audio_converted)
file.close()

# Get music21 notes
note_info = list(music_info[:, 1])

# Create music21 stream
s = Stream()
s.append(mm)
electricguitar = instrument.fromString('electric guitar')
electricguitar.midiChannel = 0
electricguitar.midiProgram = 30  #Set program to Overdriven Guitar
s.append(electricguitar)
s.insert(0, metadata.Metadata())
for note in note_info:
    s.append(note)

# Analyse music21 stream to get song Key
key = s.analyze('key')
print("Key: " + key.name)
# Insert Key to Stream
s.insert(0, key)
Ejemplo n.º 43
0
def test():
    stream = Stream()

    n1 = Note('C4', duration=Duration(1.5))
    n2 = Note('D4', duration=Duration(0.5))
    n3 = Note('E4')
    n4 = Note('F4')
    n5 = Note('G4')
    n6 = Note('A4')

    n7 = Note('C4')
    n8 = Note('D4').getGrace()
    n9 = Note('E4').getGrace()
    n10 = Note('F4')
    n11 = Note('G4')
    n12 = Note('A4', duration=Duration(0.5))
    n13 = Note('A4', duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 44
0
class HumdrumSpine(object):
    '''
    A HumdrumSpine is a collection of events arranged vertically that have a
    connection to each other.
    Each HumdrumSpine MUST have an id (numeric or string) attached to it.

    spine1 = HumdrumSpine(5, [SpineEvent1, SpineEvent2])
    spine1.beginningPosition = 5
    spine1.endingPosition = 6
    spine1.upstream = [3]
    spine1.downstream = [7,8]
    spine1.spineCollection = weakref.ref(SpineCollection1)
           # we keep weak references to the spineCollection so that we 
           # don't have circular references

    print(spine1.spineType)  
           # searches the EventList or upstreamSpines to figure 
           # out the spineType

    '''
    def __init__(self, id, eventList = None):
        self.id = id
        if eventList is None:
            eventList = []
        for event in eventList:
            event.spineId = id
        
        self.eventList = eventList
        self.music21Objects = Stream()
        self.beginningPosition = 0
        self.endingPosition = 0
        self.upstream = []
        self.downstream = []

        self._spineCollection = None
        self._spineType = None

    def __repr__(self):
        return str(self.id) + repr(self.upstream) + repr(self.downstream)

    def append(self, event):
        self.eventList.append(event)

    def __iter__(self):
        '''Resets the counter to 0 so that iteration is correct'''
        self.iterIndex = 0
        return self

    def next(self):
        '''Returns the current event and increments the iteration index.'''
        if self.iterIndex == len(self.eventList):
            raise StopIteration
        thisEvent = self.eventList[self.iterIndex]
        self.iterIndex += 1
        return thisEvent

    def _getSpineCollection(self):
        return common.unwrapWeakref(self._spineCollection)

    def _setSpineCollection(self, sc = None):
        self._spineCollection = sc
    
    spineCollection = property(_getSpineCollection, _setSpineCollection)

    def upstreamSpines(self):
        '''
        Returns the HumdrumSpine(s) that are upstream (if the spineCollection is set)
        '''
        if self.upstream:
            sc1 = self.spineCollection
            if sc1:
                spineReturn = []
                for upstreamId in self.upstream:
                    spineReturn.append(sc1.getSpineById(upstreamId))
                return spineReturn
            else:
                return []
        else:
            return []

    def downstreamSpines(self):
        '''
        Returns the HumdrumSpine(s) that are downstream (if the 
        spineCollection is set)
        '''
        if self.downstream:
            sc1 = self.spineCollection
            if sc1:
                spineReturn = []
                for downstreamId in self.downstream:
                    spineReturn.append(sc1.getSpineById(downstreamId))
                return spineReturn
            else:
                return []
        else:
            return []

    def _getLocalSpineType(self):
        if self._spineType is not None:
            return self._spineType
        else:
            for thisEvent in self.eventList:
                m1 = re.match("\*\*(.*)", thisEvent.contents)
                if m1:
                    self._spineType = m1.group(1)
                    return self._spineType
            return None
    
    def _getUpstreamSpineType(self):
        pS = self.upstreamSpines()
        if pS:
            ## leftFirst, DepthFirst search
            for thisPS in pS:
                psSpineType = thisPS.spineType
                if psSpineType is not None:
                    return psSpineType
            return None
        else:
            return None
            

    def _getSpineType(self):
        if self._spineType is not None:
            return self._spineType
        else:
            st = self._getLocalSpineType()
            if st is not None:
                self._spineType = st
                return st
            else:
                st = self._getUpstreamSpineType()
                if st is not None:
                    self._spineType = st
                    return st
                else:
                    raise HumdrumException("Could not determine spineType " +
                                           "for spine with id " + str(self.id))
    
    def _setSpineType(self, newSpineType = None):
        self._spineType = newSpineType
    
    spineType = property(_getSpineType, _setSpineType)

    def parse(self):
        '''
        Dummmy method that pushes all these objects to music21Objects
        even though they probably are not.
        '''
        for event in self.eventList:
            eventC = str(event.contents)
            if eventC == ".":
                pass
            else:
                self.music21Objects.append(event)
def get_key(score: stream.Stream):
    first_key = score.recurse().getElementsByClass(key.KeySignature)
    if not first_key:
        return 0
    else:
        return first_key[0].sharps