Ejemplo n.º 1
0
 def xtestColorCapuaFicta(self):
     from music21.note import Note
     from music21.stream import Stream
     (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
     (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
     n11.duration.type = "quarter"
     n11.name = "D"
     n12.duration.type = "quarter"
     n12.name = "E"
     n13.duration.type = "quarter"
     n13.name = "F"
     n14.duration.type = "quarter"
     n14.name = "G"
 
     n21.name = "C"
     n21.duration.type = "quarter"
     n22.name = "C"
     n22.duration.type = "quarter"
     n23.name = "B"
     n23.octave = 3
     n23.duration.type = "quarter"
     n24.name = "C"
     n24.duration.type = "quarter"
 
     stream1 = Stream()
     stream1.append([n11, n12, n13, n14])
     stream2 = Stream()
     stream2.append([n21, n22, n23, n24])
 
 
     ### Need twoStreamComparer to Work
     evaluateWithoutFicta(stream1, stream2)
     assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
Ejemplo n.º 2
0
 def generateFirstSpecies(self, stream1, minorScale):
     '''Given a stream (the cantus firmus) and the stream's key in the
     form of a MinorScale object, generates a stream of first species
     counterpoint that follows the rules of 21M.301.'''
     # DOES NOT YET CHECK FOR TOO MANY THIRDS/SIXTHS IN A ROW,
     # DOES NOT YET RAISE LEADING TONES, AND DOES NOT CHECK FOR NOODLING.
     stream2 = Stream([])
     firstNote = stream1.notes[0]
     choices = [interval.transposeNote(firstNote, "P1"),\
                interval.transposeNote(firstNote, "P5"),\
                interval.transposeNote(firstNote, "P8")]
     note1 = random.choice(choices)
     note1.duration = firstNote.duration
     stream2.append(note1)
     afterLeap = False
     for i in range(1, len(stream1.notes)):
         prevFirmus = stream1.notes[i-1]
         currFirmus = stream1.notes[i]
         prevNote = stream2.notes[i-1]
         choices = self.generateValidNotes(prevFirmus, currFirmus, prevNote, afterLeap, minorScale)
         if len(choices) == 0:
             raise ModalCounterpointException("Sorry, please try again")
         newNote = random.choice(choices)
         newNote.duration = currFirmus.duration
         stream2.append(newNote)
         int = interval.notesToInterval(prevNote, newNote)
         if int.generic.undirected > 3: afterLeap = True
         else: afterLeap = False
     return stream2
Ejemplo n.º 3
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict(
    )  # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0:  # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]:  # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = {note.pitch.midi: note for note in notes}
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Ejemplo n.º 4
0
def decode_score(encoding, num_measures, ts, image=False):
    score = Stream()
    score.timeSignature = TimeSignature(ts)
    steps_per_measure = len(encoding) / num_measures
    measure_ind = 0
    while measure_ind < num_measures:
        start_beat = int(measure_ind * steps_per_measure)
        end_beat = int((measure_ind + 1) * steps_per_measure)
        measure = Measure()
        for beat_ind in range(start_beat, end_beat):
            if image:
                played_pitches = np.nonzero(encoding[beat_ind])[0]
            else:
                played_pitches = np.nonzero(encoding[beat_ind])
            if len(played_pitches) == 0:
                measure.append(Rest(quarterLength=4.0 / GRANULARITY))
            else:
                played_notes = [
                    midi_to_note(int(pitch + MIN_PITCH))
                    for pitch in played_pitches
                ]
                chord = Chord(played_notes, quarterLength=4.0 / GRANULARITY)
                measure.append(chord)
        score.append(measure)
        measure_ind += 1
    return score
Ejemplo n.º 5
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict() # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0: # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]: # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = { note.pitch.midi : note for note in notes }
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Ejemplo n.º 6
0
def testVerify():
    s1 = Stream(converter.parse("tinyNotation: d1 a g f e d f e d'"))
    s2 = Stream(converter.parse("tinyNotation: d'1 c' b- a g f a c'# d'"))
    biggerStream = Stream()
    biggerStream.append(stream.Part(s1))
    biggerStream.append(stream.Part(s2))
    #biggerStream.show()

    verifyCounterpointVerbose(s1, s2)
Ejemplo n.º 7
0
def main():
    parser = get_cmd_line_parser(description=__doc__)
    ParserArguments.filename(parser)
    ParserArguments.tempo(parser)
    ParserArguments.framerate(parser)
    ParserArguments.set_defaults(parser)
    ParserArguments.best(parser)
    args = parser.parse_args()
    defaults.framerate = args.framerate

    song = Stream()

    roots = 'ABCDEFG'
    scales = [scale.MajorScale, scale.MinorScale,
              scale.WholeToneScale, scale.ChromaticScale]

    print('Choosing a random scale from Major, Minor, Whole Tone, Chromatic.')
    rscale = random.choice(scales)(Pitch(random.choice(roots)))
    print('Using: %s' % rscale.name)

    print('Generating a score...')
    random_note_count = 50
    random_note_speeds = [0.5, 1]
    print('100 Random 1/8th and 1/4th notes in rapid succession...')
    for i in range(random_note_count):
        note = Note(random.choice(rscale.pitches))
        note.duration.quarterLength = random.choice(random_note_speeds)
        song.append(note)

    scale_practice_count = 4
    print('Do the scale up and down a few times... maybe %s' %
          scale_practice_count)
    rev = rscale.pitches[:]
    rev.reverse()
    updown_scale = rscale.pitches[:]
    updown_scale.extend(rev[1:-1])
    print('updown scale: %s' % updown_scale)
    for count, pitch in enumerate(cycle(updown_scale)):
        print(' note %s, %s' % (count, pitch))
        song.append(Note(pitch))
        if count >= scale_practice_count * len(updown_scale):
            break

    print('Composition finished:')
    song.show('txt')

    if args.best:
        print('Audifying the song to file "{}"...')
        wave = audify_to_file(song, args.tempo, args.filename, verbose=True)
    else:
        wave = audify_basic(song, args.tempo, verbose=True)
        print('Writing Song to file "{}"...'.format(args.filename))
        with wav_file_context(args.filename) as fout:
            fout.write_frames(wave.frames)

    return 0
def createScalePart():
    c = QuarterNote(); c.step = "C"
    d = QuarterNote(); d.step = "D"
    # etc
    b = QuarterNote(); b.step = "B"
    
    s1 = Stream()
    s1.append([c, d, b])
    print(s1.lily)
    lS1 = LilyString("{" + s1.lily + "}")
    lS1.showPNG()
Ejemplo n.º 9
0
def song_notes(score):
    # For some reason Stream([n for n in score.flat.notes]) accumulate
    # notes in the wrong order, so we append them explicitly.

    stream = Stream()
    for n in score.flat.notes.stripTies():
        if n.isChord:
            stream.append(n[-1])
        else:
            stream.append(n)
    return stream
Ejemplo n.º 10
0
    def create_note_stream(self, notes_sequence):
        """
        Creates a music21.stream.Stream object to which notes are added.

        :param notes_sequence: sequence of notes to add in a stream.
        :return: a Stream of Note objects.
        """
        notes_arr = self.get_notes_from_sequence(notes_sequence)
        stream = Stream()
        for note in notes_arr:
            stream.append(note)
        return stream
Ejemplo n.º 11
0
    def create_note_stream(self, notes_sequence):
        """
        Creates a music21.stream.Stream object to which notes are added.

        :param notes_sequence: sequence of notes to add in a stream.
        :return: a Stream of Note objects.
        """
        notes_arr = self.get_notes_from_sequence(notes_sequence)
        stream = Stream()
        for note in notes_arr:
            stream.append(note)
        return stream
Ejemplo n.º 12
0
def compose_repository_song(repo_data):
    vprint('Composing a song using the data from your Git Repository...')
    song = Stream()

    scale = MajorScale('%s4' % random.choice('ABCDEFG'))
    print('Using Scale: %s' % scale)
    clips, phrases = phrasify(repo_data, scale)

    for sha in repo_data:
        for clip in phrases[hash(sha)]:
            for note in clips[clip]:
                song.append(note)

    return song
Ejemplo n.º 13
0
def colorCapuaFictaTest():
    (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
    (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
    n11.duration.type = "quarter"
    n11.name = "D"
    n12.duration.type = "quarter"
    n12.name = "E"
    n13.duration.type = "quarter"
    n13.name = "F"
    n14.duration.type = "quarter"
    n14.name = "G"

    n21.name = "C"
    n21.duration.type = "quarter"
    n22.name = "C"
    n22.duration.type = "quarter"
    n23.name = "B"
    n23.octave = 3
    n23.duration.type = "quarter"
    n24.name = "C"
    n24.duration.type = "quarter"

    stream1 = Stream()
    stream1.append([n11, n12, n13, n14])
    stream2 = Stream()
    stream2.append([n21, n22, n23, n24])


    ### Need twoStreamComparer to Work
    capua.evaluateWithoutFicta(stream1, stream2)
    assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
    capua.evaluateCapuaTwoStreams(stream1, stream2)

    capua.colorCapuaFicta(stream1, stream2, "both")
    assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name

    assert n11.editorial.color == "yellow"
    assert n12.editorial.color == "yellow"
    assert n13.editorial.color == "green"
    assert n14.editorial.color == "yellow"

    assert n11.editorial.harmonicInterval.name == "M2"
    assert n21.editorial.harmonicInterval.name == "M2"

    assert n13.editorial.harmonicInterval.name == "P5"
    assert n13.editorial.misc["noFictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaInterval"].name == "P5"
    assert n13.editorial.color == "green"
    assert stream1.lily.strip() == r'''\clef "treble" \color "yellow" d'4 \color "yellow" e'4 \ficta \color "green" fis'!4 \color "yellow" g'4'''
Ejemplo n.º 14
0
def colorCapuaFictaTest():
    (n11, n12, n13, n14) = (Note(), Note(), Note(), Note())
    (n21, n22, n23, n24) = (Note(), Note(), Note(), Note())
    n11.duration.type = "quarter"
    n11.name = "D"
    n12.duration.type = "quarter"
    n12.name = "E"
    n13.duration.type = "quarter"
    n13.name = "F"
    n14.duration.type = "quarter"
    n14.name = "G"

    n21.name = "C"
    n21.duration.type = "quarter"
    n22.name = "C"
    n22.duration.type = "quarter"
    n23.name = "B"
    n23.octave = 3
    n23.duration.type = "quarter"
    n24.name = "C"
    n24.duration.type = "quarter"

    stream1 = Stream()
    stream1.append([n11, n12, n13, n14])
    stream2 = Stream()
    stream2.append([n21, n22, n23, n24])

    ### Need twoStreamComparer to Work
    capua.evaluateWithoutFicta(stream1, stream2)
    assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
    capua.evaluateCapuaTwoStreams(stream1, stream2)

    capua.colorCapuaFicta(stream1, stream2, "both")
    assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name

    assert n11.editorial.color == "yellow"
    assert n12.editorial.color == "yellow"
    assert n13.editorial.color == "green"
    assert n14.editorial.color == "yellow"

    assert n11.editorial.harmonicInterval.name == "M2"
    assert n21.editorial.harmonicInterval.name == "M2"

    assert n13.editorial.harmonicInterval.name == "P5"
    assert n13.editorial.misc["noFictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaHarmony"] == "perfect cons"
    assert n13.editorial.misc["capua2FictaInterval"].name == "P5"
    assert n13.editorial.color == "green"
    assert stream1.lily.strip(
    ) == r'''\clef "treble" \color "yellow" d'4 \color "yellow" e'4 \ficta \color "green" fis'!4 \color "yellow" g'4'''
Ejemplo n.º 15
0
def show_sequence(chord_sequence):
    stream = Stream()

    chord_names = [chord.standard_name for chord in chord_sequence]

    print(chord_names)
    chord_sequence = [chord_sequence[0],
                      *chord_sequence]  # to solve a music21 problem

    for extended_chord in chord_sequence:
        chord = Chord(notes=extended_chord.components, type='whole')
        stream.append(chord)

    stream.show()
    stream.show('midi')
Ejemplo n.º 16
0
def generate_notes_in_batch(note_params_df,
                            output_dir,
                            audio_format='flac',
                            sample_rate=44100):
    """
    Generates a batch of single note samples from the given table of parameters.

    `note_params_df` - a Pandas Dataframe with columns:
    `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note.
    `output_dir` - output directory for the MIDI files

    Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a
    """
    os.makedirs(output_dir, exist_ok=True)

    fs = FluidSynth(sample_rate=sample_rate)

    stream = Stream()

    for i, row in note_params_df.iterrows():
        stream.append(MetronomeMark(number=row['tempo']))
        stream.append(make_instrument(int(row['midi_instrument'])))
        duration = row['duration']
        stream.append(
            chord_with_volume(
                Chord([
                    Note(midi=int(row['midi_number']),
                         duration=Duration(duration))
                ]), row['volume']))
        stream.append(Rest(duration=Duration(2 * duration)))

    midi_file = '{0}/all_samples.midi'.format(output_dir)
    audio_file_stereo = '{0}/all_samples_stereo.{1}'.format(
        output_dir, audio_format)
    audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format)
    audio_index_file = '{0}/all_samples_index.csv'.format(output_dir)

    # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!!
    # The parts should be split according to an index.
    audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate)
    audio_index.to_csv(audio_index_file)

    write_midi(stream, midi_file)

    fs.midi_to_audio(midi_file, audio_file_stereo)

    convert_to_mono(audio_file_stereo, audio_file)
    os.remove(audio_file_stereo)

    x, sample_rate = sf.read(audio_file)

    parts = split_audio_to_parts(x, sample_rate, audio_index)
    store_parts_to_files(parts, sample_rate, output_dir, audio_format)
Ejemplo n.º 17
0
    def testColorCapuaFicta(self):
        from music21.note import Note
        from music21.stream import Stream

        (n11, n12, n13, n14) = (Note('D'), Note('E'), Note('F'), Note('G'))
        (n21, n22, n23, n24) = (Note('C'), Note('C'), Note('B3'), Note('C'))

        stream1 = Stream()
        stream1.append([n11, n12, n13, n14])
        stream2 = Stream()
        stream2.append([n21, n22, n23, n24])

        # Need twoStreamComparer to Work
        evaluateWithoutFicta(stream1, stream2)
        assert n13.editorial.harmonicInterval.name == 'd5', n13.editorial.harmonicInterval.name
        evaluateCapuaTwoStreams(stream1, stream2)

        colorCapuaFicta(stream1, stream2, 'both')
        assert n13.editorial.harmonicInterval.name == 'P5', n13.editorial.harmonicInterval.name
Ejemplo n.º 18
0
def test():
    from music21.stream import Stream
    
    n1 = music21.note.Note()
    n1.name = "E"
    n1.duration.type = "half"
    
    n3 = music21.note.Note()
    n3.name = "D"
    n3.duration.type = "half"
    
    n2 = music21.note.Note()
    n2.name = "C#"
    n2.octave = 5
    n2.duration.type = "half"
    
    n4 = n3.clone()
    n4.octave = 5

    st1 = Stream()
    st2 = Stream()
    st1.append([n1, n3])
    st2.append([n2, n4])

    staff1 = LilyStaff()
    staff1.appendElement(st1)
    staff2 = LilyStaff()
    staff2.appendElement(st2)
    vs1 = LilyVoiceSection(staff2, staff1)
    vs1.prependTimeSignature("2/2")
    isStaff2 = vs1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in Voice Section should be staff2"
    
    s1 = LilyScore(vs1, LilyLayout(), LilyMidi() )
    lf1 = LilyFile(s1)
    isStaff2 = lf1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in File should be staff2"

    print(lf1)
    if lf1:
        lf1.showPNGandPlayMIDI()
    print(lf1.midiFilename)
Ejemplo n.º 19
0
def test():
    from music21.stream import Stream

    n1 = music21.note.Note()
    n1.name = "E"
    n1.duration.type = "half"

    n3 = music21.note.Note()
    n3.name = "D"
    n3.duration.type = "half"

    n2 = music21.note.Note()
    n2.name = "C#"
    n2.octave = 5
    n2.duration.type = "half"

    n4 = n3.clone()
    n4.octave = 5

    st1 = Stream()
    st2 = Stream()
    st1.append([n1, n3])
    st2.append([n2, n4])

    staff1 = LilyStaff()
    staff1.appendElement(st1)
    staff2 = LilyStaff()
    staff2.appendElement(st2)
    vs1 = LilyVoiceSection(staff2, staff1)
    vs1.prependTimeSignature("2/2")
    isStaff2 = vs1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in Voice Section should be staff2"

    s1 = LilyScore(vs1, LilyLayout(), LilyMidi())
    lf1 = LilyFile(s1)
    isStaff2 = lf1.firstContents("staff")
    assert isStaff2 is staff2, "first staff in File should be staff2"

    print(lf1)
    if lf1:
        lf1.showPNGandPlayMIDI()
    print(lf1.midiFilename)
Ejemplo n.º 20
0
def playSound(n, speedfactor):
    if has_simpleaudio:
        soundof([n], n.duration / speedfactor)
    else:
        try:
            s = Stream()
            if n.isChord: n = n.chord21
            else: s.append(n.note21)
            sp = StreamPlayer(s)
            sp.play()
            # if n.isChord:
            #     s.append(n)
            # else:
            #     nn = Note(n.nameWithOctave)
            #     s.append(nn)
            # sp = StreamPlayer(s)
            # sp.play()
        except:
            print('Unable to play sounds, add -z option')
        return
Ejemplo n.º 21
0
    def testColorCapuaFicta(self):
        from music21.note import Note
        from music21.stream import Stream

        (n11, n12, n13, n14) = (Note('D'), Note('E'), Note('F'), Note('G'))
        (n21, n22, n23, n24) = (Note('C'), Note('C'), Note('B3'), Note('C'))

        stream1 = Stream()
        stream1.append([n11, n12, n13, n14])
        stream2 = Stream()
        stream2.append([n21, n22, n23, n24])


        ### Need twoStreamComparer to Work
        evaluateWithoutFicta(stream1, stream2)
        assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
        evaluateCapuaTwoStreams(stream1, stream2)

        colorCapuaFicta(stream1, stream2, "both")
        assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name
Ejemplo n.º 22
0
def test():
    stream = Stream()

    n1 = Note("C4", duration=Duration(1.5))
    n2 = Note("D4", duration=Duration(0.5))
    n3 = Note("E4")
    n4 = Note("F4")
    n5 = Note("G4")
    n6 = Note("A4")

    n7 = Note("C4")
    n8 = Note("D4").getGrace()
    n9 = Note("E4").getGrace()
    n10 = Note("F4")
    n11 = Note("G4")
    n12 = Note("A4", duration=Duration(0.5))
    n13 = Note("A4", duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 23
0
def test():
    stream = Stream()

    n1 = Note('C4', duration=Duration(1.5))
    n2 = Note('D4', duration=Duration(0.5))
    n3 = Note('E4')
    n4 = Note('F4')
    n5 = Note('G4')
    n6 = Note('A4')

    n7 = Note('C4')
    n8 = Note('D4').getGrace()
    n9 = Note('E4').getGrace()
    n10 = Note('F4')
    n11 = Note('G4')
    n12 = Note('A4', duration=Duration(0.5))
    n13 = Note('A4', duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 24
0
 def testColorCapuaFicta(self):
     from music21.note import Note
     from music21.stream import Stream
     (n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
     (n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
     n11.duration.type = "quarter"
     n11.name = "D"
     n12.duration.type = "quarter"
     n12.name = "E"
     n13.duration.type = "quarter"
     n13.name = "F"
     n14.duration.type = "quarter"
     n14.name = "G"
 
     n21.name = "C"
     n21.duration.type = "quarter"
     n22.name = "C"
     n22.duration.type = "quarter"
     n23.name = "B"
     n23.octave = 3
     n23.duration.type = "quarter"
     n24.name = "C"
     n24.duration.type = "quarter"
 
     stream1 = Stream()
     stream1.append([n11, n12, n13, n14])
     stream2 = Stream()
     stream2.append([n21, n22, n23, n24])
 
 
     ### Need twoStreamComparer to Work
     evaluateWithoutFicta(stream1, stream2)
     assert n13.editorial.harmonicInterval.name == "d5", n13.editorial.harmonicInterval.name
     evaluateCapuaTwoStreams(stream1, stream2)
 
     colorCapuaFicta(stream1, stream2, "both")
     assert n13.editorial.harmonicInterval.name == "P5", n13.editorial.harmonicInterval.name
Ejemplo n.º 25
0
 def playsound(self, n):
     s = Stream() 
     if n.isChord: n = n.chord21
     else: s.append(n.note21)
     sp = StreamPlayer(s)
     sp.play()
                                 dtype=np.int16)

# write to wav file
file = wave.open("output/" + filename + "_sine.wav", "wb")
file.setnchannels(1)
file.setsampwidth(2)  # 2 bytes = 16 bit
file.setframerate(fs)
file.writeframes(synth_audio_converted)
file.close()

# Get music21 notes
note_info = list(music_info[:, 1])

# Create music21 stream
s = Stream()
s.append(mm)
electricguitar = instrument.fromString('electric guitar')
electricguitar.midiChannel = 0
electricguitar.midiProgram = 30  #Set program to Overdriven Guitar
s.append(electricguitar)
s.insert(0, metadata.Metadata())
for note in note_info:
    s.append(note)

# Analyse music21 stream to get song Key
key = s.analyze('key')
print("Key: " + key.name)
# Insert Key to Stream
s.insert(0, key)

# Save MIDI to file
def read_vmf_string(vmf_string):
    """
    Reads VMF data from a string to a Score Stream.

    :param vmf_string: The contents of the VMF file as a string.
    :return: A music21 score instance containing the music in the VMF file.
    """

    parts_converted = {}

    vmf = json.loads(vmf_string)

    # create a score
    score = Score()

    # Get the initial data
    number_of_parts = vmf['header']['number_of_parts']
    number_of_voices = vmf['header']['number_of_voices']
    smallest_note = float(Fraction(vmf['header']['tick_value']))

    # create the parts and first measure.
    for voice_number in range(number_of_parts):
        part = Part()
        voice = Voice()

        part.append(voice)

        score.append(part)

    # get the body of the vmf
    body = vmf['body']

    part_number = 0

    # We do this because we want to do each part at a time.
    for voice_number in range(number_of_voices):
        # Get all ticks for a given part.
        part = [tick[voice_number] for tick in body]

        current_element = None
        current_voice = None

        # iterate over each tick
        for tick in part:

            if current_voice is None:
                # Get the parent part if it exists.
                try:
                    current_part = parts_converted[tick[-1]]

                    # add a new voice and write to it.
                    voice = Voice()

                    initial_key_signature = KeySignature(vmf['header']['key_signature']['0.0'])
                    initial_time_signature = TimeSignature(vmf['header']['time_signature']['0.0'])

                    voice.append(initial_key_signature)
                    voice.append(initial_time_signature)

                    current_part.append(voice)

                except KeyError:
                    # Add it to our dictionary otherwise.
                    current_part = score.parts[part_number]
                    part_number += 1

                    parts_converted[tick[-1]] = current_part

                # Get the last voice.
                current_voice = current_part.voices[-1]

            if tick[0] == 1:
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # Find how many notes to write. This will always be an int.
                number_of_notes = int(find_number_of_notes_in_tick(tick))

                if number_of_notes == 1:
                    # create a new note
                    current_element = Note(Pitch(pitchClass=tick[3], octave=tick[4]))
                else:
                    pitches = []

                    # create the pitches.
                    # From the beginning to the end of the pitch section of the tick.
                    for i in range(FIRST_PITCH_INDEX, FIRST_PITCH_INDEX + 2 * number_of_notes, 2):
                        pitch = Pitch(pitchClass=tick[i], octave=tick[i + 1])
                        pitches.append(pitch)

                    # create a new chord with these pitches.
                    current_element = Chord(pitches)


                # set the velocity of the note.
                current_element.volume.velocity = DynamicConverter.vmf_to_velocity(tick[DYNAMIC_BIT])
                # set the articulation
                if tick[ARTICULATION_BIT] != 0:
                    current_element.articulations.append(
                        ArticulationConverter.vmf_to_articulation(tick[ARTICULATION_BIT]))

                # set the value for this tick.
                current_element.quarterLength = smallest_note
            elif tick[0] == 2:
                # extend previous note
                current_element.quarterLength += smallest_note

            elif tick[0] == 0 and (isinstance(current_element, note.Note) or current_element is None):
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # create new rest
                current_element = Rest()

                # Set the value for this tick.
                current_element.quarterLength = smallest_note

            elif tick[0] == 0 and isinstance(current_element, note.Rest):
                # extend previous rest.
                current_element.quarterLength += smallest_note

        # Append the last element in progress.
        if current_element is not None:
            # check for precision and adjust
            rounded = round(current_element.quarterLength)
            if abs(current_element.quarterLength - rounded) < PRECISION:
                current_element.quarterLength = rounded

            # append to the part
            current_voice.append(current_element)

    # create the stream for time signature changes
    time_signature_stream = Stream()

    for offset, time_signature_str in sorted(vmf['header']['time_signature'].items()):
        time_signature = TimeSignature(time_signature_str)
        time_signature_stream.append(time_signature)
        time_signature_stream[-1].offset = float(offset)

    # finish up the file.
    for part in score.parts:
        for voice in part.voices:
            voice.makeMeasures(inPlace=True, meterStream=time_signature_stream)

        for offset, t in sorted(vmf['header']['tempo'].items()):
            mm = tempo.MetronomeMark(number=t, referent=note.Note(type='quarter'))
            voice.insert(offset, mm)

        for offset, ks in sorted(vmf['header']['key_signature'].items()):
            voice.insert(offset, KeySignature(ks))

    return score
Ejemplo n.º 28
0
class Transcriptor:
    def __init__(self, path):
        self.path = path
        self.nfft = 2048
        self.overlap = 0.5
        self.hop_length = int(self.nfft * (1 - self.overlap))
        self.n_bins = 72
        self.mag_exp = 4
        self.pre_post_max = 6
        self.threshold = -71

        self.audio_sample, self.sr = self.load()
        self.cqt = self.compute_cqt()
        self.thresh_cqt = self.compute_thresholded_cqt(self.cqt)

        self.onsets = self.compute_onset(self.thresh_cqt)

        self.tempo, self.beats, self.mm = self.estimate_tempo()

        self.music_info = np.array([
            self.estimate_pitch_and_notes(i)
            for i in range(len(self.onsets[1]) - 1)
        ])
        self.note_info = list(self.music_info[:, 2])

        self.stream = Stream()

    def load(self):
        x, sr = librosa.load(self.path, sr=None, mono=True)
        print("x Shape =", x.shape)
        print("Sample rate =", sr)
        print("Audio Length in seconds = {} [s]" .format(x.shape[0] / sr))
        return x, sr

    def compute_cqt(self):
        c = librosa.cqt(self.audio_sample, sr=self.sr, hop_length=self.hop_length,
                        fmin=None, n_bins=self.n_bins, res_type='fft')
        c_mag = librosa.magphase(c)[0] ** self.mag_exp
        cdb = librosa.amplitude_to_db(c_mag, ref=np.max)
        return cdb

    def compute_thresholded_cqt(self, cqt):
        new_cqt = np.copy(cqt)
        new_cqt[new_cqt < self.threshold] = -120
        return new_cqt

    def compute_onset_env(self, cqt):
        return librosa.onset.onset_strength(S=cqt, sr=self.sr, aggregate=np.mean,
                                            hop_length=self.hop_length)

    def compute_onset(self, cqt):
        onset_env = self.compute_onset_env(cqt)
        onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env,
                                                  sr=self.sr, units='frames',
                                                  hop_length=self.hop_length,
                                                  pre_max=self.pre_post_max,
                                                  post_max=self.pre_post_max,
                                                  backtrack=False)

        onset_boundaries = np.concatenate([[0], onset_frames, [cqt.shape[1]]])
        onset_times = librosa.frames_to_time(onset_boundaries, sr=self.sr,
                                             hop_length=self.hop_length)

        return [onset_times, onset_boundaries, onset_env]

    def display_cqt_tuning(self):
        plt.figure()
        librosa.display.specshow(self.thresh_cqt, sr=self.sr, hop_length=self.hop_length,
                                 x_axis='time', y_axis='cqt_note', cmap='coolwarm')
        plt.ylim([librosa.note_to_hz('B2'), librosa.note_to_hz('B5')])
        plt.vlines(self.onsets[0], 0, self.sr / 2, color='k', alpha=0.8)
        plt.title("CQT")
        plt.colorbar()
        plt.show()

    def estimate_tempo(self):
        tempo, beats = librosa.beat.beat_track(y=None, sr=self.sr,
                                               onset_envelope=self.onsets[2],
                                               hop_length=self.hop_length,
                                               start_bpm=120.0,
                                               tightness=100.0,
                                               trim=True,
                                               units='frames')
        tempo = int(2 * round(tempo / 2))
        mm = MetronomeMark(referent='quarter', number=tempo)
        return tempo, beats, mm

    def generate_note(self, f0_info, n_duration, round_to_sixteenth=True):
        f0 = f0_info[0]
        a = remap(f0_info[1], self.cqt.min(), self.cqt.max(), 0, 1)
        duration = librosa.frames_to_time(n_duration, sr=self.sr, hop_length=self.hop_length)
        note_duration = 0.02 * np.around(duration / 0.02)  # Round to 2 decimal places for music21 compatibility
        midi_duration = second_to_quarter(duration, self.tempo)
        midi_velocity = int(round(remap(f0_info[1], self.cqt.min(), self.cqt.max(), 80, 120)))
        if round_to_sixteenth:
            midi_duration = round(midi_duration * 16) / 16
        try:
            if f0 is None:
                midi_note = None
                note_info = Rest(type=self.mm.secondsToDuration(note_duration).type)
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note), type=self.mm.secondsToDuration(note_duration).type)
                note.volume.velocity = midi_velocity
                note_info = [note]
        except DurationException:
            if f0 is None:
                midi_note = None
                note_info = Rest(type='32nd')
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note),
                            type='eighth')
                note.volume.velocity = midi_velocity
                note_info = [note]

        midi_info = [midi_note, midi_duration, midi_velocity]
        n = np.arange(librosa.frames_to_samples(n_duration, hop_length=self.hop_length))
        sine_wave = a * np.sin(2 * np.pi * f0 * n / float(self.sr))
        return [sine_wave, midi_info, note_info]

    def estimate_pitch(self, segment, threshold):
        freqs = librosa.cqt_frequencies(n_bins=self.n_bins, fmin=librosa.note_to_hz('C1'),
                                        bins_per_octave=12)
        if segment.max() < threshold:
            return [None, np.mean((np.amax(segment, axis=0)))]
        else:
            f0 = int(np.mean((np.argmax(segment, axis=0))))
        return [freqs[f0], np.mean((np.amax(segment, axis=0)))]

    def estimate_pitch_and_notes(self, i):
        n0 = self.onsets[1][i]
        n1 = self.onsets[1][i + 1]
        f0_info = self.estimate_pitch(np.mean(self.cqt[:, n0:n1], axis=1), threshold=self.threshold)
        return self.generate_note(f0_info, n1 - n0)

    def transcript(self):
        self.stream.append(self.mm)
        electric_guitar = instrument.fromString('grand piano')
        electric_guitar.midiChannel = 0
        electric_guitar.midiProgram = 1
        self.stream.append(electric_guitar)
        self.stream.insert(0, metadata.Metadata())
        self.stream.metadata.title = self.path.split('/')[-1]
        for note in self.note_info:
            self.stream.append(note)
        key = self.stream.analyze('key')
        print(key.name)
        # Insert Key to Stream
        self.stream.insert(0, key)

        # self.stream.show('text')

    def show_stream(self):
        self.stream.show()

    def convert_stream_to_midi(self):
        midi_file = midi.translate.streamToMidiFile(self.stream)
        midi_file.open('midi_scale.mid', 'wb')
        midi_file.write()
        midi_file.close()

        midi_file = midi.translate.streamToMidiFile(self.stream)
        filename = filedialog.asksaveasfile(initialdir="/", title="Save Midi File",
                                            filetypes=('midi files', ('*.mid', '*.midi')))
        midi_file.open(filename.name, 'wb')
        midi_file.write()
        midi_file.close()
Ejemplo n.º 29
0
    def melody_and_chords_streams(self) -> Tuple[Stream, Stream]:
        """
        The chord stream contains realized chords and chord symbols and rests for NC

        :return:
        """
        melody = Stream()
        chord_dict = defaultdict(list)
        measure_duration = None
        for measure_idx, measure in enumerate(
                self.ls.recurse().getElementsByClass(Measure)):
            if measure_duration is None:
                measure_duration = measure.duration.quarterLength
            else:
                if measure_duration != measure.duration.quarterLength:
                    raise WrongBarDurationError()
            mel_measure = measure.cloneEmpty()
            if measure_idx == 0:
                anacrusis = measure.barDuration.quarterLength - measure.duration.quarterLength
                if anacrusis:
                    mel_measure.append(Rest(duration=Duration(anacrusis)))
            for elt in measure:
                if elt.isClassOrSubclass((ChordSymbol, )):
                    chord_dict[measure_idx].append(elt)
                else:
                    mel_measure.append(deepcopy(elt))
            melody.append(mel_measure)
        chords = deepcopy(melody)
        clef = None
        for _clef in chords.recurse().getElementsByClass(Clef):
            clef = _clef
            break
        if clef:
            clef.activeSite.insert(0, BassClef())
            clef.activeSite.remove(clef)
        last_chord_symbol = None
        for measure_idx, measure in enumerate(
                chords.getElementsByClass(Measure)):
            original_measure_duration = measure.duration.quarterLength
            measure.removeByClass([Rest, Note])
            if chord_dict[measure_idx]:
                beats = [floor(ch.beat) for ch in chord_dict[measure_idx]] \
                        + [1 + original_measure_duration]
                durations = [(beats[i + 1] - beats[i])
                             for i in range(len(beats) - 1)]
                if beats[0] > 1:
                    if last_chord_symbol is None:
                        measure.insert(0,
                                       Rest(duration=Duration(beats[0] - 1)))
                    else:
                        _cs = deepcopy(last_chord_symbol)
                        _cs.duration = Duration(beats[0] - 1)
                        measure.insert(0, _cs)
                for chord_symbol_idx, chord_symbol in enumerate(
                        chord_dict[measure_idx]):
                    chord_symbol.duration = Duration(
                        durations[chord_symbol_idx])
                    measure.insert(beats[chord_symbol_idx] - 1, chord_symbol)
                    last_chord_symbol = chord_symbol
            else:
                if last_chord_symbol is None:
                    measure.insert(
                        0, Rest(duration=Duration(original_measure_duration)))
                else:
                    _cs = deepcopy(last_chord_symbol)
                    _cs.duration = Duration(original_measure_duration)
                    measure.insert(0, _cs)
        return melody, chords
Ejemplo n.º 30
0
def get_midi_stream_1():
    part1 = [Rest(), Rest(), Note('E-'), Rest()]
    part2 = [Rest(), Rest(), Note('A-'), Rest()]
    part3 = [Note('B-'), Rest(), Note('E-'), Rest()]
    part4 = [Note('B-'), Rest(), Note('A-'), Rest()]
    part5 = [Note('B-'), Rest(), Rest(), Rest()]
    part6 = [Note('G'), Rest(), Note('C'), Rest()]
    part7 = [Note('D'), Rest(), Note('E-'), Rest()]
    stream_instance = Stream()
    stream_instance.append(deepcopy(part1))
    stream_instance.append(deepcopy(part2))
    stream_instance.append(deepcopy(part3))
    stream_instance.append(deepcopy(part2))
    stream_instance.append(deepcopy(part3))
    stream_instance.append(deepcopy(part2))
    stream_instance.append(deepcopy(part4))
    stream_instance.append(deepcopy(part5))
    stream_instance.append(deepcopy(part1))
    stream_instance.append(deepcopy(part6))
    stream_instance.append(deepcopy(part7))
    stream_instance.append(deepcopy(part6))
    stream_instance.append(deepcopy(part7))
    return stream_instance
Ejemplo n.º 31
0
class HumdrumSpine(object):
    '''
    A HumdrumSpine is a collection of events arranged vertically that have a
    connection to each other.
    Each HumdrumSpine MUST have an id (numeric or string) attached to it.

    spine1 = HumdrumSpine(5, [SpineEvent1, SpineEvent2])
    spine1.beginningPosition = 5
    spine1.endingPosition = 6
    spine1.upstream = [3]
    spine1.downstream = [7,8]
    spine1.spineCollection = weakref.ref(SpineCollection1)
           # we keep weak references to the spineCollection so that we 
           # don't have circular references

    print(spine1.spineType)  
           # searches the EventList or upstreamSpines to figure 
           # out the spineType

    '''
    def __init__(self, id, eventList = None):
        self.id = id
        if eventList is None:
            eventList = []
        for event in eventList:
            event.spineId = id
        
        self.eventList = eventList
        self.music21Objects = Stream()
        self.beginningPosition = 0
        self.endingPosition = 0
        self.upstream = []
        self.downstream = []

        self._spineCollection = None
        self._spineType = None

    def __repr__(self):
        return str(self.id) + repr(self.upstream) + repr(self.downstream)

    def append(self, event):
        self.eventList.append(event)

    def __iter__(self):
        '''Resets the counter to 0 so that iteration is correct'''
        self.iterIndex = 0
        return self

    def next(self):
        '''Returns the current event and increments the iteration index.'''
        if self.iterIndex == len(self.eventList):
            raise StopIteration
        thisEvent = self.eventList[self.iterIndex]
        self.iterIndex += 1
        return thisEvent

    def _getSpineCollection(self):
        return common.unwrapWeakref(self._spineCollection)

    def _setSpineCollection(self, sc = None):
        self._spineCollection = sc
    
    spineCollection = property(_getSpineCollection, _setSpineCollection)

    def upstreamSpines(self):
        '''
        Returns the HumdrumSpine(s) that are upstream (if the spineCollection is set)
        '''
        if self.upstream:
            sc1 = self.spineCollection
            if sc1:
                spineReturn = []
                for upstreamId in self.upstream:
                    spineReturn.append(sc1.getSpineById(upstreamId))
                return spineReturn
            else:
                return []
        else:
            return []

    def downstreamSpines(self):
        '''
        Returns the HumdrumSpine(s) that are downstream (if the 
        spineCollection is set)
        '''
        if self.downstream:
            sc1 = self.spineCollection
            if sc1:
                spineReturn = []
                for downstreamId in self.downstream:
                    spineReturn.append(sc1.getSpineById(downstreamId))
                return spineReturn
            else:
                return []
        else:
            return []

    def _getLocalSpineType(self):
        if self._spineType is not None:
            return self._spineType
        else:
            for thisEvent in self.eventList:
                m1 = re.match("\*\*(.*)", thisEvent.contents)
                if m1:
                    self._spineType = m1.group(1)
                    return self._spineType
            return None
    
    def _getUpstreamSpineType(self):
        pS = self.upstreamSpines()
        if pS:
            ## leftFirst, DepthFirst search
            for thisPS in pS:
                psSpineType = thisPS.spineType
                if psSpineType is not None:
                    return psSpineType
            return None
        else:
            return None
            

    def _getSpineType(self):
        if self._spineType is not None:
            return self._spineType
        else:
            st = self._getLocalSpineType()
            if st is not None:
                self._spineType = st
                return st
            else:
                st = self._getUpstreamSpineType()
                if st is not None:
                    self._spineType = st
                    return st
                else:
                    raise HumdrumException("Could not determine spineType " +
                                           "for spine with id " + str(self.id))
    
    def _setSpineType(self, newSpineType = None):
        self._spineType = newSpineType
    
    spineType = property(_getSpineType, _setSpineType)

    def parse(self):
        '''
        Dummmy method that pushes all these objects to music21Objects
        even though they probably are not.
        '''
        for event in self.eventList:
            eventC = str(event.contents)
            if eventC == ".":
                pass
            else:
                self.music21Objects.append(event)