def getChordSequence(self):
     s = Stream()
     for part in self.getParts():  # type: Part
         for elt in part.recurse().getElementsByClass(
                 ChordSymbol):  # type: ChordSymbol
             s.insert(elt.getOffsetInHierarchy(part), copy(elt))
     return s
Ejemplo n.º 2
0
 def _getStream(self):
     if self.spineCollection is None:
         raise HumdrumException("parse lines first!")
     elif self.spineCollection.spines is None:
         raise HumdrumException("really? not a single spine in your data? um, not my problem!")
     elif self.spineCollection.spines[0].music21Objects is None:
         raise HumdrumException("okay, you got at least one spine, but it aint got nothing in it; have you thought of taking up kindergarten teaching?")
     else:
         masterStream = Stream()
         for thisSpine in self.spineCollection:
             thisSpine.music21Objects.id = "spine_" + str(thisSpine.id)
             masterStream.insert(thisSpine.music21Objects)
         return masterStream
Ejemplo n.º 3
0
def append_stream(original_stream: stream.Stream, *streams: stream.Stream):
    """

    Appends all elements of one or more streams at the end of a stream.

    Args:
        original_stream: The stream to append to.
        *streams: Any number of streams to be appended to the original stream.
    """
    for stream_ in streams:
        h_offset = original_stream.highestTime
        for element in stream_.elements:
            original_stream.insert(element.offset + h_offset, element)
Ejemplo n.º 4
0
    def _realizeM21Sequence(self, chords):
        s = Stream()

        offset = 0

        # step through the template and add notes to stream
        for chord in chords:
            duration = chord.getDuration()
            for pitch in chord.getPitchSet():
                n = Note(pitch)
                n.duration.quarterLength = duration
                s.insert(offset, n)
            offset += duration
        return s
Ejemplo n.º 5
0
    def _realizeM21Sequence(self, notes):
        s = Stream()

        offset = 0

        # step through the backbone notes and add notes to stream
        for note in notes:
            duration = 1
            pitch = note.getPitch()
            n = m21Note.Note(pitch)
            n.duration.quarterLength = duration
            s.insert(offset, n)
            offset += duration
        return s
Ejemplo n.º 6
0
def test():
    stream = Stream()

    n1 = Note('C4', duration=Duration(1.5))
    n2 = Note('D4', duration=Duration(0.5))
    n3 = Note('E4')
    n4 = Note('F4')
    n5 = Note('G4')
    n6 = Note('A4')

    n7 = Note('C4')
    n8 = Note('D4').getGrace()
    n9 = Note('E4').getGrace()
    n10 = Note('F4')
    n11 = Note('G4')
    n12 = Note('A4', duration=Duration(0.5))
    n13 = Note('A4', duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 7
0
def test():
    stream = Stream()

    n1 = Note("C4", duration=Duration(1.5))
    n2 = Note("D4", duration=Duration(0.5))
    n3 = Note("E4")
    n4 = Note("F4")
    n5 = Note("G4")
    n6 = Note("A4")

    n7 = Note("C4")
    n8 = Note("D4").getGrace()
    n9 = Note("E4").getGrace()
    n10 = Note("F4")
    n11 = Note("G4")
    n12 = Note("A4", duration=Duration(0.5))
    n13 = Note("A4", duration=Duration(0.5))

    gliss1 = Glissando([n2, n3])
    gliss2 = Glissando([n5, n6])
    gliss3 = Glissando([n6, n7])
    gliss4 = Glissando([n8, n9])

    slur1 = Slur([n2, n3])
    slur2 = Slur([n6, n7])
    slur3 = Slur([n9, n10])

    stream.append([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13])
    stream.insert(0, gliss1)
    stream.insert(0, gliss2)
    stream.insert(0, gliss3)
    stream.insert(0, gliss4)
    stream.insert(0, slur1)
    stream.insert(0, slur2)
    stream.insert(0, slur3)

    return stream
Ejemplo n.º 8
0
def build_midi(harmony, melody):
    chords_dict = get_chord_dicts()[1]

    song = []
    for i, eighth in enumerate(melody):
        # eighth = multi_hot_to_pianoroll(piano_roll[:midi_range]) # now make_music returns pianorolls already
        # chord = one_hot_to_index(piano_roll[-chord_classes:]) # TODO add chord to midi
        # print(f'EIGHTH: {eighth}') # DEBUG

        song_notes = []
        for note_ in eighth:
            note_name = NOTES[note_%12]
            note_octave = start_octave + note_//12 # starting from C2
            song_notes.append(note_name + str(note_octave))

        song_chords = []
        full_chord = chords_dict[harmony[i]]
        if full_chord != '<unk>':
            for chord_ in full_chord:
                chord_name = NOTES[chord_%12]
                song_chords.append(chord_name + str(start_octave-1))

        song.append(("REST" if len(song_notes) == 0 else song_notes, "REST" if len(song_chords) == 0 else song_chords))

    notes_score = Score()
    notes_score.append(instrument.Piano())
    chords_score = Score()
    chords_score.append(instrument.KeyboardInstrument())
    bass_score = Score()
    bass_score.append(instrument.ElectricBass())

    current_note_length = 0
    current_chord_length = 0

    for i, _ in enumerate(song):

        current_note_length += 0.5
        current_chord_length += 0.5

        # print(f'NOTE: {song[i][0]}\t\t\t- CHORD: {song[i][1]}')

        if i < len(song)-1:
            # note
            if song[i][0] != song[i+1][0]:
                if song[i][0] == "REST":
                    notes_score.append(note.Rest(duration=Duration(current_note_length)))
                else:
                    notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))
                current_note_length = 0

            # chord
            if song[i][1] != song[i+1][1] or current_chord_length == 4:
                if song[i][1] == "REST":
                    chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
                else:
                    chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))
                current_chord_length = 0
        else:
            # note
            if song[i][0] == "REST":
                notes_score.append(note.Rest(duration=Duration(current_note_length)))
            else:
                notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))

            # chord
            if song[i][1] == "REST":
                chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
            else:
                chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))

    song_stream = Stream()
    song_stream.insert(0, notes_score)
    song_stream.insert(0, chords_score)
    song_stream.insert(0, bass_score)

    if not os.path.exists('melodies'):
        os.makedirs('melodies')
    dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    song_stream.write('midi', fp=f'melodies/generated_{dt}.mid')
# write to wav file
file = wave.open("output/" + filename + "_sine.wav", "wb")
file.setnchannels(1)
file.setsampwidth(2)  # 2 bytes = 16 bit
file.setframerate(fs)
file.writeframes(synth_audio_converted)
file.close()

# Get music21 notes
note_info = list(music_info[:, 1])

# Create music21 stream
s = Stream()
s.append(mm)
electricguitar = instrument.fromString('electric guitar')
electricguitar.midiChannel = 0
electricguitar.midiProgram = 30  #Set program to Overdriven Guitar
s.append(electricguitar)
s.insert(0, metadata.Metadata())
for note in note_info:
    s.append(note)

# Analyse music21 stream to get song Key
key = s.analyze('key')
print("Key: " + key.name)
# Insert Key to Stream
s.insert(0, key)

# Save MIDI to file
s.write('midi', "output/" + filename + "_music21.mid")
Ejemplo n.º 10
0
if args.melody == 'little_happiness':
    melody = converter.parse(A_LITTLE_HAPPINESS)
elif args.melody == 'jj_lin':
    melody = converter.parse(JJ_LIN_MELODY)
else:
    print('Unrecognized melody: should be jj_lin or little_happiness')
    sys.exit(1)

if args.series not in ('major', 'minor'):
    print('Unrecognized series: should be major or minor')
    sys.exit(1)

melody.insert(0, MetronomeMark(number=95))

# Pick algorithm
if args.algorithm == 'basic':
    chord_search.run(chords, melody, args.series)
elif args.algorithm == 'hmm':
    viterbi.run(chords, melody, args.series)
else:
    print('Unrecognized algorithm: should be basic or hmm')
    sys.exit(1)

# Combine two parts
song = Stream()
song.insert(0, melody)
song.insert(0, chords)

# song.show('midi')
song.show()
Ejemplo n.º 11
0
class Transcriptor:
    def __init__(self, path):
        self.path = path
        self.nfft = 2048
        self.overlap = 0.5
        self.hop_length = int(self.nfft * (1 - self.overlap))
        self.n_bins = 72
        self.mag_exp = 4
        self.pre_post_max = 6
        self.threshold = -71

        self.audio_sample, self.sr = self.load()
        self.cqt = self.compute_cqt()
        self.thresh_cqt = self.compute_thresholded_cqt(self.cqt)

        self.onsets = self.compute_onset(self.thresh_cqt)

        self.tempo, self.beats, self.mm = self.estimate_tempo()

        self.music_info = np.array([
            self.estimate_pitch_and_notes(i)
            for i in range(len(self.onsets[1]) - 1)
        ])
        self.note_info = list(self.music_info[:, 2])

        self.stream = Stream()

    def load(self):
        x, sr = librosa.load(self.path, sr=None, mono=True)
        print("x Shape =", x.shape)
        print("Sample rate =", sr)
        print("Audio Length in seconds = {} [s]" .format(x.shape[0] / sr))
        return x, sr

    def compute_cqt(self):
        c = librosa.cqt(self.audio_sample, sr=self.sr, hop_length=self.hop_length,
                        fmin=None, n_bins=self.n_bins, res_type='fft')
        c_mag = librosa.magphase(c)[0] ** self.mag_exp
        cdb = librosa.amplitude_to_db(c_mag, ref=np.max)
        return cdb

    def compute_thresholded_cqt(self, cqt):
        new_cqt = np.copy(cqt)
        new_cqt[new_cqt < self.threshold] = -120
        return new_cqt

    def compute_onset_env(self, cqt):
        return librosa.onset.onset_strength(S=cqt, sr=self.sr, aggregate=np.mean,
                                            hop_length=self.hop_length)

    def compute_onset(self, cqt):
        onset_env = self.compute_onset_env(cqt)
        onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env,
                                                  sr=self.sr, units='frames',
                                                  hop_length=self.hop_length,
                                                  pre_max=self.pre_post_max,
                                                  post_max=self.pre_post_max,
                                                  backtrack=False)

        onset_boundaries = np.concatenate([[0], onset_frames, [cqt.shape[1]]])
        onset_times = librosa.frames_to_time(onset_boundaries, sr=self.sr,
                                             hop_length=self.hop_length)

        return [onset_times, onset_boundaries, onset_env]

    def display_cqt_tuning(self):
        plt.figure()
        librosa.display.specshow(self.thresh_cqt, sr=self.sr, hop_length=self.hop_length,
                                 x_axis='time', y_axis='cqt_note', cmap='coolwarm')
        plt.ylim([librosa.note_to_hz('B2'), librosa.note_to_hz('B5')])
        plt.vlines(self.onsets[0], 0, self.sr / 2, color='k', alpha=0.8)
        plt.title("CQT")
        plt.colorbar()
        plt.show()

    def estimate_tempo(self):
        tempo, beats = librosa.beat.beat_track(y=None, sr=self.sr,
                                               onset_envelope=self.onsets[2],
                                               hop_length=self.hop_length,
                                               start_bpm=120.0,
                                               tightness=100.0,
                                               trim=True,
                                               units='frames')
        tempo = int(2 * round(tempo / 2))
        mm = MetronomeMark(referent='quarter', number=tempo)
        return tempo, beats, mm

    def generate_note(self, f0_info, n_duration, round_to_sixteenth=True):
        f0 = f0_info[0]
        a = remap(f0_info[1], self.cqt.min(), self.cqt.max(), 0, 1)
        duration = librosa.frames_to_time(n_duration, sr=self.sr, hop_length=self.hop_length)
        note_duration = 0.02 * np.around(duration / 0.02)  # Round to 2 decimal places for music21 compatibility
        midi_duration = second_to_quarter(duration, self.tempo)
        midi_velocity = int(round(remap(f0_info[1], self.cqt.min(), self.cqt.max(), 80, 120)))
        if round_to_sixteenth:
            midi_duration = round(midi_duration * 16) / 16
        try:
            if f0 is None:
                midi_note = None
                note_info = Rest(type=self.mm.secondsToDuration(note_duration).type)
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note), type=self.mm.secondsToDuration(note_duration).type)
                note.volume.velocity = midi_velocity
                note_info = [note]
        except DurationException:
            if f0 is None:
                midi_note = None
                note_info = Rest(type='32nd')
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note),
                            type='eighth')
                note.volume.velocity = midi_velocity
                note_info = [note]

        midi_info = [midi_note, midi_duration, midi_velocity]
        n = np.arange(librosa.frames_to_samples(n_duration, hop_length=self.hop_length))
        sine_wave = a * np.sin(2 * np.pi * f0 * n / float(self.sr))
        return [sine_wave, midi_info, note_info]

    def estimate_pitch(self, segment, threshold):
        freqs = librosa.cqt_frequencies(n_bins=self.n_bins, fmin=librosa.note_to_hz('C1'),
                                        bins_per_octave=12)
        if segment.max() < threshold:
            return [None, np.mean((np.amax(segment, axis=0)))]
        else:
            f0 = int(np.mean((np.argmax(segment, axis=0))))
        return [freqs[f0], np.mean((np.amax(segment, axis=0)))]

    def estimate_pitch_and_notes(self, i):
        n0 = self.onsets[1][i]
        n1 = self.onsets[1][i + 1]
        f0_info = self.estimate_pitch(np.mean(self.cqt[:, n0:n1], axis=1), threshold=self.threshold)
        return self.generate_note(f0_info, n1 - n0)

    def transcript(self):
        self.stream.append(self.mm)
        electric_guitar = instrument.fromString('grand piano')
        electric_guitar.midiChannel = 0
        electric_guitar.midiProgram = 1
        self.stream.append(electric_guitar)
        self.stream.insert(0, metadata.Metadata())
        self.stream.metadata.title = self.path.split('/')[-1]
        for note in self.note_info:
            self.stream.append(note)
        key = self.stream.analyze('key')
        print(key.name)
        # Insert Key to Stream
        self.stream.insert(0, key)

        # self.stream.show('text')

    def show_stream(self):
        self.stream.show()

    def convert_stream_to_midi(self):
        midi_file = midi.translate.streamToMidiFile(self.stream)
        midi_file.open('midi_scale.mid', 'wb')
        midi_file.write()
        midi_file.close()

        midi_file = midi.translate.streamToMidiFile(self.stream)
        filename = filedialog.asksaveasfile(initialdir="/", title="Save Midi File",
                                            filetypes=('midi files', ('*.mid', '*.midi')))
        midi_file.open(filename.name, 'wb')
        midi_file.write()
        midi_file.close()